Skip to content

Commit

Permalink
Merge branch 'opensearch-project:main' into IdentityCore
Browse files Browse the repository at this point in the history
  • Loading branch information
stephen-crawford committed Apr 27, 2023
2 parents 8bf0e2c + d984f50 commit 39f855d
Show file tree
Hide file tree
Showing 96 changed files with 541 additions and 1,717 deletions.
8 changes: 8 additions & 0 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,14 @@ updates:
labels:
- "dependabot"
- "dependencies"
- directory: /distribution/archives/darwin-arm64-tar/
open-pull-requests-limit: 1
package-ecosystem: gradle
schedule:
interval: weekly
labels:
- "dependabot"
- "dependencies"
- directory: /distribution/archives/integ-test-zip/
open-pull-requests-limit: 1
package-ecosystem: gradle
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `jackson` from 2.14.2 to 2.15.0 ([#7286](https://github.com/opensearch-project/OpenSearch/pull/7286)

### Changed
- Enable `./gradlew build` on MacOS by disabling bcw tests ([#7303](https://github.com/opensearch-project/OpenSearch/pull/7303))

### Deprecated

Expand Down
10 changes: 9 additions & 1 deletion build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,16 @@ tasks.register("verifyVersions") {
*/

boolean bwc_tests_enabled = true

/* place an issue link here when committing bwc changes */
final String bwc_tests_disabled_issue = ""
String bwc_tests_disabled_issue = ""

/* there's no existing MacOS release, therefore disable bcw tests */
if (Os.isFamily(Os.FAMILY_MAC)) {
bwc_tests_enabled = false
bwc_tests_disabled_issue = "https://github.com/opensearch-project/OpenSearch/issues/4173"
}

if (bwc_tests_enabled == false) {
if (bwc_tests_disabled_issue.isEmpty()) {
throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false")
Expand Down
1 change: 1 addition & 0 deletions buildSrc/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ if (project != rootProject) {
dependencies {
reaper project('reaper')
distribution project(':distribution:archives:darwin-tar')
distribution project(':distribution:archives:darwin-arm64-tar')
distribution project(':distribution:archives:linux-arm64-tar')
distribution project(':distribution:archives:linux-tar')
distribution project(':distribution:archives:windows-zip')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,15 @@
import org.opensearch.cluster.metadata.MappingMetadata;
import org.opensearch.common.Nullable;
import org.opensearch.core.ParseField;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.settings.Settings;
import org.opensearch.core.xcontent.ConstructingObjectParser;
import org.opensearch.core.xcontent.XContentParser;
import org.opensearch.index.mapper.MapperService;

import java.io.IOException;
import java.util.AbstractMap;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
Expand All @@ -59,9 +60,7 @@ public class IndexTemplateMetadata {
true,
(a, name) -> {
List<Map.Entry<String, AliasMetadata>> alias = (List<Map.Entry<String, AliasMetadata>>) a[5];
ImmutableOpenMap<String, AliasMetadata> aliasMap = new ImmutableOpenMap.Builder<String, AliasMetadata>().putAll(
alias.stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))
).build();
final Map<String, AliasMetadata> aliasMap = alias.stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
return new IndexTemplateMetadata(
name,
(Integer) a[0],
Expand Down Expand Up @@ -128,7 +127,7 @@ public class IndexTemplateMetadata {

private final MappingMetadata mappings;

private final ImmutableOpenMap<String, AliasMetadata> aliases;
private final Map<String, AliasMetadata> aliases;

public IndexTemplateMetadata(
String name,
Expand All @@ -137,7 +136,7 @@ public IndexTemplateMetadata(
List<String> patterns,
Settings settings,
MappingMetadata mappings,
ImmutableOpenMap<String, AliasMetadata> aliases
final Map<String, AliasMetadata> aliases
) {
if (patterns == null || patterns.isEmpty()) {
throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns);
Expand All @@ -148,7 +147,7 @@ public IndexTemplateMetadata(
this.patterns = patterns;
this.settings = settings;
this.mappings = mappings;
this.aliases = aliases;
this.aliases = Collections.unmodifiableMap(aliases);
}

public String name() {
Expand Down Expand Up @@ -176,7 +175,7 @@ public MappingMetadata mappings() {
return this.mappings;
}

public ImmutableOpenMap<String, AliasMetadata> aliases() {
public Map<String, AliasMetadata> aliases() {
return this.aliases;
}

Expand Down Expand Up @@ -217,12 +216,12 @@ public static class Builder {

private MappingMetadata mappings;

private final ImmutableOpenMap.Builder<String, AliasMetadata> aliases;
private final Map<String, AliasMetadata> aliases;

public Builder(String name) {
this.name = name;
mappings = null;
aliases = ImmutableOpenMap.builder();
aliases = new HashMap<>();
}

public Builder(IndexTemplateMetadata indexTemplateMetadata) {
Expand All @@ -233,7 +232,7 @@ public Builder(IndexTemplateMetadata indexTemplateMetadata) {
settings(indexTemplateMetadata.settings());

mappings = indexTemplateMetadata.mappings();
aliases = ImmutableOpenMap.builder(indexTemplateMetadata.aliases());
aliases = new HashMap<>(indexTemplateMetadata.aliases());
}

public Builder order(int order) {
Expand Down Expand Up @@ -277,7 +276,7 @@ public Builder putAlias(AliasMetadata.Builder aliasMetadata) {
}

public IndexTemplateMetadata build() {
return new IndexTemplateMetadata(name, order, version, indexPatterns, settings, mappings, aliases.build());
return new IndexTemplateMetadata(name, order, version, indexPatterns, settings, mappings, aliases);
}

public static IndexTemplateMetadata fromXContent(XContentParser parser, String templateName) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
package org.opensearch.client;

import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.xcontent.LoggingDeprecationHandler;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.ToXContent;
Expand All @@ -43,10 +42,6 @@
import org.opensearch.test.OpenSearchTestCase;

import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;

/**
* Base class for HLRC response parsing tests.
Expand Down Expand Up @@ -103,17 +98,4 @@ public final void testFromXContent() throws IOException {
protected ToXContent.Params getParams() {
return ToXContent.EMPTY_PARAMS;
}

protected static <T> void assertMapEquals(ImmutableOpenMap<String, T> expected, Map<String, T> actual) {
Set<String> expectedKeys = new HashSet<>();
Iterator<String> keysIt = expected.keysIt();
while (keysIt.hasNext()) {
expectedKeys.add(keysIt.next());
}

assertEquals(expectedKeys, actual.keySet());
for (String key : expectedKeys) {
assertEquals(expected.get(key), actual.get(key));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import org.opensearch.client.GetAliasesResponseTests;
import org.opensearch.cluster.metadata.AliasMetadata;
import org.opensearch.cluster.metadata.MappingMetadata;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.settings.IndexScopedSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.core.xcontent.XContentParser;
Expand All @@ -64,9 +63,9 @@ protected org.opensearch.action.admin.indices.get.GetIndexResponse createServerT
String[] indices = generateRandomStringArray(5, 5, false, false);
final Map<String, MappingMetadata> mappings = new HashMap<>();
final Map<String, List<AliasMetadata>> aliases = new HashMap<>();
ImmutableOpenMap.Builder<String, Settings> settings = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, Settings> defaultSettings = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, String> dataStreams = ImmutableOpenMap.builder();
final Map<String, Settings> settings = new HashMap<>();
final Map<String, Settings> defaultSettings = new HashMap<>();
final Map<String, String> dataStreams = new HashMap<>();
IndexScopedSettings indexScopedSettings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS;
boolean includeDefaults = randomBoolean();
for (String index : indices) {
Expand Down Expand Up @@ -96,9 +95,9 @@ protected org.opensearch.action.admin.indices.get.GetIndexResponse createServerT
indices,
mappings,
aliases,
settings.build(),
defaultSettings.build(),
dataStreams.build()
settings,
defaultSettings,
dataStreams
);
}

Expand All @@ -114,8 +113,8 @@ protected void assertInstances(
) {
assertArrayEquals(serverTestInstance.getIndices(), clientInstance.getIndices());
assertEquals(serverTestInstance.getMappings(), clientInstance.getMappings());
assertMapEquals(serverTestInstance.getSettings(), clientInstance.getSettings());
assertMapEquals(serverTestInstance.defaultSettings(), clientInstance.getDefaultSettings());
assertEquals(serverTestInstance.getSettings(), clientInstance.getSettings());
assertEquals(serverTestInstance.defaultSettings(), clientInstance.getDefaultSettings());
assertEquals(serverTestInstance.getAliases(), clientInstance.getAliases());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,10 @@ public void testParsingFromOpenSearchResponse() throws IOException {
assertThat(result.mappings().sourceAsMap(), equalTo(expectedMapping.get("_doc")));

assertThat(result.aliases().size(), equalTo(esIMD.aliases().size()));
List<AliasMetadata> expectedAliases = Arrays.stream(esIMD.aliases().values().toArray(AliasMetadata.class))
List<AliasMetadata> expectedAliases = Arrays.stream(esIMD.aliases().values().toArray(new AliasMetadata[0]))
.sorted(Comparator.comparing(AliasMetadata::alias))
.collect(Collectors.toList());
List<AliasMetadata> actualAliases = Arrays.stream(result.aliases().values().toArray(AliasMetadata.class))
List<AliasMetadata> actualAliases = Arrays.stream(result.aliases().values().toArray(new AliasMetadata[0]))
.sorted(Comparator.comparing(AliasMetadata::alias))
.collect(Collectors.toList());
for (int j = 0; j < result.aliases().size(); j++) {
Expand Down Expand Up @@ -216,7 +216,7 @@ static void toXContent(GetIndexTemplatesResponse response, XContentBuilder build

serverTemplateBuilder.patterns(clientITMD.patterns());

Iterator<AliasMetadata> aliases = clientITMD.aliases().valuesIt();
Iterator<AliasMetadata> aliases = clientITMD.aliases().values().iterator();
aliases.forEachRemaining((a) -> serverTemplateBuilder.putAlias(a));

serverTemplateBuilder.settings(clientITMD.settings());
Expand Down
3 changes: 2 additions & 1 deletion distribution/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
modulesFiles = { platform ->
copySpec {
eachFile {
if (it.relativePath.segments[-2] == 'bin' || (platform == 'darwin-x64' && it.relativePath.segments[-2] == 'MacOS')) {
if (it.relativePath.segments[-2] == 'bin' || ((platform == 'darwin-x64' || platform == 'darwin-arm64') && it.relativePath.segments[-2] == 'MacOS')) {
// bin files, wherever they are within modules (eg platform specific) should be executable
// and MacOS is an alternative to bin on macOS
it.mode = 0755
Expand Down Expand Up @@ -622,6 +622,7 @@ subprojects {
}

['archives:darwin-tar',
'archives:darwin-arm64-tar',
'archives:integ-test-zip',
'archives:linux-arm64-tar',
'archives:linux-tar',
Expand Down
1 change: 1 addition & 0 deletions distribution/docker/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ configurations {
}

dependencies {
arm64DockerSource project(path: ":distribution:archives:darwin-arm64-tar", configuration:"default")
arm64DockerSource project(path: ":distribution:archives:linux-arm64-tar", configuration:"default")
s390xDockerSource project(path: ":distribution:archives:linux-s390x-tar", configuration:"default")
ppc64leDockerSource project(path: ":distribution:archives:linux-ppc64le-tar", configuration:"default")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@
import org.opensearch.cluster.routing.UnassignedInfo;
import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.opensearch.common.Priority;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.ByteSizeValue;
import org.opensearch.common.unit.TimeValue;
Expand Down Expand Up @@ -113,15 +112,9 @@ public void testCreateShrinkIndexToN() {
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON)
.get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
String mergeNode = discoveryNodes[0].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
Expand Down Expand Up @@ -212,15 +205,9 @@ public void testShrinkIndexPrimaryTerm() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get();

final ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertThat(dataNodes.size(), greaterThanOrEqualTo(2));
final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
final String mergeNode = discoveryNodes[0].getName();
// This needs more than the default timeout if a large number of shards were created.
ensureGreen(TimeValue.timeValueSeconds(120));
Expand Down Expand Up @@ -298,15 +285,9 @@ public void testCreateShrinkIndex() {
for (int i = 0; i < docs; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
Expand Down Expand Up @@ -426,15 +407,9 @@ public void testCreateShrinkIndexFails() throws Exception {
for (int i = 0; i < 20; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
String spareNode = discoveryNodes[0].getName();
String mergeNode = discoveryNodes[1].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
Expand Down Expand Up @@ -534,15 +509,9 @@ public void testCreateShrinkWithIndexSort() throws Exception {
.setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON)
.get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
String mergeNode = discoveryNodes[0].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
Expand Down Expand Up @@ -614,14 +583,8 @@ public void testShrinkCommitsMergeOnIdle() throws Exception {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("source").get();
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
Expand Down
Loading

0 comments on commit 39f855d

Please sign in to comment.