Skip to content

Commit

Permalink
Logging: Drop Settings from some logging ctors (#33332)
Browse files Browse the repository at this point in the history
Drops `Settings` from some logging ctors now that they are no longer
needed. This should allow us to stop passing `Settings` around to quite
as many places.
  • Loading branch information
nik9000 authored Sep 2, 2018
1 parent ea4eef8 commit f8b7a4d
Show file tree
Hide file tree
Showing 11 changed files with 27 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,17 @@ public class Loggers {
Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic,
Setting.Property.NodeScope));

public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
public static Logger getLogger(Class<?> clazz, ShardId shardId, String... prefixes) {
return getLogger(clazz, Settings.EMPTY,
shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}

/**
* Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings, ShardId, String...)} but String loggerName instead of
* Just like {@link #getLogger(Class, ShardId, String...)} but String loggerName instead of
* Class.
*/
public static Logger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(loggerName, settings,
public static Logger getLogger(String loggerName, ShardId shardId, String... prefixes) {
return getLogger(loggerName, Settings.EMPTY,
asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
this.config = indexSettings.getMergeSchedulerConfig();
this.shardId = shardId;
this.indexSettings = indexSettings.getSettings();
this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId);
this.logger = Loggers.getLogger(getClass(), shardId);
refreshConfig();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ protected Engine(EngineConfig engineConfig) {
this.allocationId = engineConfig.getAllocationId();
this.store = engineConfig.getStore();
this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name
engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId());
engineConfig.getShardId());
this.eventListener = engineConfig.getEventListener();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent
protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) {
this.shardId = shardId;
this.indexSettings = indexSettings;
this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId);
this.logger = Loggers.getLogger(getClass(), shardId);
this.deprecationLogger = new DeprecationLogger(logger);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire
final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING);
logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval);
ByteSizeCachingDirectory sizeCachingDir = new ByteSizeCachingDirectory(dir, refreshInterval);
this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", settings, shardId));
this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId));
this.shardLock = shardLock;
this.onClose = onClose;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,9 @@ private RecoverySourceHandler createRecoverySourceHandler(StartRecoveryRequest r
final RemoteRecoveryTargetHandler recoveryTarget =
new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), transportService,
request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime));
handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt(), settings);
handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt());
return handler;
}
}
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.CancellableThreads;
import org.elasticsearch.common.util.concurrent.FutureUtils;
Expand Down Expand Up @@ -116,13 +115,12 @@ protected void onCancel(String reason, @Nullable Exception suppressedException)

public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget,
final StartRecoveryRequest request,
final int fileChunkSizeInBytes,
final Settings nodeSettings) {
final int fileChunkSizeInBytes) {
this.shard = shard;
this.recoveryTarget = recoveryTarget;
this.request = request;
this.shardId = this.request.shardId().id();
this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName());
this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName());
this.chunkSizeInBytes = fileChunkSizeInBytes;
this.response = new RecoveryResponse();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ public RecoveryTarget(final IndexShard indexShard,
this.cancellableThreads = new CancellableThreads();
this.recoveryId = idGenerator.incrementAndGet();
this.listener = listener;
this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
this.logger = Loggers.getLogger(getClass(), indexShard.shardId());
this.indexShard = indexShard;
this.sourceNode = sourceNode;
this.shardId = indexShard.shardId();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ public void testSendFiles() throws Throwable {
final StartRecoveryRequest request = getStartRecoveryRequest();
Store store = newStore(createTempDir());
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request,
recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY);
recoverySettings.getChunkSize().bytesAsInt());
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
Expand Down Expand Up @@ -174,7 +174,7 @@ public void testSendSnapshotSendsOps() throws IOException {
when(shard.state()).thenReturn(IndexShardState.STARTED);
final RecoveryTargetHandler recoveryTarget = mock(RecoveryTargetHandler.class);
final RecoverySourceHandler handler =
new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes, Settings.EMPTY);
new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes);
final List<Translog.Operation> operations = new ArrayList<>();
final int initialNumberOfDocs = randomIntBetween(16, 64);
for (int i = 0; i < initialNumberOfDocs; i++) {
Expand Down Expand Up @@ -281,7 +281,7 @@ public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request,
recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {
recoverySettings.getChunkSize().bytesAsInt()) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
Expand Down Expand Up @@ -340,7 +340,7 @@ public void testHandleExceptionOnSendFiles() throws Throwable {
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request,
recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {
recoverySettings.getChunkSize().bytesAsInt()) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
Expand Down Expand Up @@ -405,11 +405,10 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE
final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean();
final AtomicBoolean phase2Called = new AtomicBoolean();
final RecoverySourceHandler handler = new RecoverySourceHandler(
shard,
mock(RecoveryTargetHandler.class),
request,
recoverySettings.getChunkSize().bytesAsInt(),
Settings.EMPTY) {
shard,
mock(RecoveryTargetHandler.class),
request,
recoverySettings.getChunkSize().bytesAsInt()) {

@Override
public void phase1(final IndexCommit snapshot, final Supplier<Integer> translogOps) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.RecoveryTarget;
import org.elasticsearch.indices.recovery.StartRecoveryRequest;
import org.elasticsearch.node.Node;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.snapshots.Snapshot;
Expand Down Expand Up @@ -572,11 +571,10 @@ protected final void recoverUnstartedReplica(final IndexShard replica,
final StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), targetAllocationId,
pNode, rNode, snapshot, replica.routingEntry().primary(), 0, startingSeqNo);
final RecoverySourceHandler recovery = new RecoverySourceHandler(
primary,
recoveryTarget,
request,
(int) ByteSizeUnit.MB.toBytes(1),
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build());
primary,
recoveryTarget,
request,
(int) ByteSizeUnit.MB.toBytes(1));
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet());
recovery.recoverToTarget();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha
if (indexShard != null) {
Boolean remove = shardSet.remove(indexShard);
if (remove == Boolean.TRUE) {
Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
Logger logger = Loggers.getLogger(getClass(), indexShard.shardId());
MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId());
}
}
Expand Down

0 comments on commit f8b7a4d

Please sign in to comment.