Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Logging: Drop Settings from some logging ctors #33332

Merged
merged 1 commit into from
Sep 2, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,17 @@ public class Loggers {
Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic,
Setting.Property.NodeScope));

public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
public static Logger getLogger(Class<?> clazz, ShardId shardId, String... prefixes) {
return getLogger(clazz, Settings.EMPTY,
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll make follow up changes that remove the need for Settings.EMPTY eventually.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

They are just needed to make it compile and aren't used.

shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}

/**
* Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings, ShardId, String...)} but String loggerName instead of
* Just like {@link #getLogger(Class, ShardId, String...)} but String loggerName instead of
* Class.
*/
public static Logger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(loggerName, settings,
public static Logger getLogger(String loggerName, ShardId shardId, String... prefixes) {
return getLogger(loggerName, Settings.EMPTY,
asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
this.config = indexSettings.getMergeSchedulerConfig();
this.shardId = shardId;
this.indexSettings = indexSettings.getSettings();
this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId);
this.logger = Loggers.getLogger(getClass(), shardId);
refreshConfig();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ protected Engine(EngineConfig engineConfig) {
this.allocationId = engineConfig.getAllocationId();
this.store = engineConfig.getStore();
this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name
engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId());
engineConfig.getShardId());
this.eventListener = engineConfig.getEventListener();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent
protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) {
this.shardId = shardId;
this.indexSettings = indexSettings;
this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId);
this.logger = Loggers.getLogger(getClass(), shardId);
this.deprecationLogger = new DeprecationLogger(logger);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire
final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING);
logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval);
ByteSizeCachingDirectory sizeCachingDir = new ByteSizeCachingDirectory(dir, refreshInterval);
this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", settings, shardId));
this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId));
this.shardLock = shardLock;
this.onClose = onClose;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,9 @@ private RecoverySourceHandler createRecoverySourceHandler(StartRecoveryRequest r
final RemoteRecoveryTargetHandler recoveryTarget =
new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), transportService,
request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime));
handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt(), settings);
handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt());
return handler;
}
}
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.CancellableThreads;
import org.elasticsearch.common.util.concurrent.FutureUtils;
Expand Down Expand Up @@ -116,13 +115,12 @@ protected void onCancel(String reason, @Nullable Exception suppressedException)

public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget,
final StartRecoveryRequest request,
final int fileChunkSizeInBytes,
final Settings nodeSettings) {
final int fileChunkSizeInBytes) {
this.shard = shard;
this.recoveryTarget = recoveryTarget;
this.request = request;
this.shardId = this.request.shardId().id();
this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName());
this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName());
this.chunkSizeInBytes = fileChunkSizeInBytes;
this.response = new RecoveryResponse();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ public RecoveryTarget(final IndexShard indexShard,
this.cancellableThreads = new CancellableThreads();
this.recoveryId = idGenerator.incrementAndGet();
this.listener = listener;
this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
this.logger = Loggers.getLogger(getClass(), indexShard.shardId());
this.indexShard = indexShard;
this.sourceNode = sourceNode;
this.shardId = indexShard.shardId();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ public void testSendFiles() throws Throwable {
final StartRecoveryRequest request = getStartRecoveryRequest();
Store store = newStore(createTempDir());
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request,
recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY);
recoverySettings.getChunkSize().bytesAsInt());
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
Expand Down Expand Up @@ -174,7 +174,7 @@ public void testSendSnapshotSendsOps() throws IOException {
when(shard.state()).thenReturn(IndexShardState.STARTED);
final RecoveryTargetHandler recoveryTarget = mock(RecoveryTargetHandler.class);
final RecoverySourceHandler handler =
new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes, Settings.EMPTY);
new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes);
final List<Translog.Operation> operations = new ArrayList<>();
final int initialNumberOfDocs = randomIntBetween(16, 64);
for (int i = 0; i < initialNumberOfDocs; i++) {
Expand Down Expand Up @@ -281,7 +281,7 @@ public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request,
recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {
recoverySettings.getChunkSize().bytesAsInt()) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
Expand Down Expand Up @@ -340,7 +340,7 @@ public void testHandleExceptionOnSendFiles() throws Throwable {
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request,
recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {
recoverySettings.getChunkSize().bytesAsInt()) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
Expand Down Expand Up @@ -405,11 +405,10 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE
final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean();
final AtomicBoolean phase2Called = new AtomicBoolean();
final RecoverySourceHandler handler = new RecoverySourceHandler(
shard,
mock(RecoveryTargetHandler.class),
request,
recoverySettings.getChunkSize().bytesAsInt(),
Settings.EMPTY) {
shard,
mock(RecoveryTargetHandler.class),
request,
recoverySettings.getChunkSize().bytesAsInt()) {

@Override
public void phase1(final IndexCommit snapshot, final Supplier<Integer> translogOps) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.RecoveryTarget;
import org.elasticsearch.indices.recovery.StartRecoveryRequest;
import org.elasticsearch.node.Node;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.snapshots.Snapshot;
Expand Down Expand Up @@ -572,11 +571,10 @@ protected final void recoverUnstartedReplica(final IndexShard replica,
final StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), targetAllocationId,
pNode, rNode, snapshot, replica.routingEntry().primary(), 0, startingSeqNo);
final RecoverySourceHandler recovery = new RecoverySourceHandler(
primary,
recoveryTarget,
request,
(int) ByteSizeUnit.MB.toBytes(1),
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build());
primary,
recoveryTarget,
request,
(int) ByteSizeUnit.MB.toBytes(1));
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet());
recovery.recoverToTarget();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha
if (indexShard != null) {
Boolean remove = shardSet.remove(indexShard);
if (remove == Boolean.TRUE) {
Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
Logger logger = Loggers.getLogger(getClass(), indexShard.shardId());
MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId());
}
}
Expand Down