diff --git a/.ci/bwcVersions b/.ci/bwcVersions
index 6b86da2c91261..cfaadc5ed1e5e 100644
--- a/.ci/bwcVersions
+++ b/.ci/bwcVersions
@@ -23,4 +23,6 @@ BWC_VERSION:
- "2.9.0"
- "2.9.1"
- "2.10.0"
+ - "2.10.1"
- "2.11.0"
+ - "2.12.0"
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 69616e533d1ed..c47b9e0b69256 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -19,6 +19,7 @@ Resolves #[Issue number to be closed when this PR is merged]
- [ ] New functionality has javadoc added
- [ ] Commits are signed per the DCO using --signoff
- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog))
+- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose)
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin).
diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml
new file mode 100644
index 0000000000000..6a66ac5fb5609
--- /dev/null
+++ b/.github/workflows/assemble.yml
@@ -0,0 +1,26 @@
+name: Gradle Assemble
+on: [pull_request]
+
+jobs:
+ assemble:
+ if: github.repository == 'opensearch-project/OpenSearch'
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest, macos-latest]
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v3
+ with:
+ java-version: 11
+ distribution: temurin
+ - name: Setup docker (missing on MacOS)
+ if: runner.os == 'macos'
+ run: |
+ brew install docker
+ colima start
+ sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock
+ - name: Run Gradle (assemble)
+ run: |
+ ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE
diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml
index c2a2cedaaefb4..76981276fe085 100644
--- a/.github/workflows/lucene-snapshots.yml
+++ b/.github/workflows/lucene-snapshots.yml
@@ -38,7 +38,7 @@ jobs:
- name: Set hash
working-directory: ./lucene
run: |
- echo "::set-output name=REVISION::$(git rev-parse --short HEAD)"
+ echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
id: version
- name: Initialize gradle settings
diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml
index f4622859916c7..b04f404b11c55 100644
--- a/.github/workflows/precommit.yml
+++ b/.github/workflows/precommit.yml
@@ -1,4 +1,4 @@
-name: Gradle Precommit and Assemble
+name: Gradle Precommit
on: [pull_request]
jobs:
@@ -19,12 +19,3 @@ jobs:
- name: Run Gradle (precommit)
run: |
./gradlew javadoc precommit --parallel
- - name: Setup docker (missing on MacOS)
- if: runner.os == 'macos'
- run: |
- brew install docker
- colima start
- sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock
- - name: Run Gradle (assemble)
- run: |
- ./gradlew assemble --parallel
diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml
index df785bcc70014..a20c671c137b2 100644
--- a/.github/workflows/version.yml
+++ b/.github/workflows/version.yml
@@ -59,7 +59,7 @@ jobs:
sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java
- name: Create Pull Request
- uses: peter-evans/create-pull-request@v3
+ uses: peter-evans/create-pull-request@v5
with:
token: ${{ steps.github_app_token.outputs.token }}
base: ${{ env.BASE }}
@@ -86,7 +86,7 @@ jobs:
sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java
- name: Create Pull Request
- uses: peter-evans/create-pull-request@v3
+ uses: peter-evans/create-pull-request@v5
with:
token: ${{ steps.github_app_token.outputs.token }}
base: ${{ env.BASE_X }}
@@ -113,7 +113,7 @@ jobs:
sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java
- name: Create Pull Request
- uses: peter-evans/create-pull-request@v3
+ uses: peter-evans/create-pull-request@v5
with:
token: ${{ steps.github_app_token.outputs.token }}
base: main
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3d1ca935193c3..a9e5bb3982708 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679))
- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110))
- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618))
+- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992))
### Dependencies
- Bump `log4j-core` from 2.18.0 to 2.19.0
@@ -32,7 +33,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1
- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1
- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12
-- Bump `org.apache.commons:commons-compress` from 1.22 to 1.23.0
- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0
- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0
- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291))
@@ -53,6 +53,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773))
- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792))
- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855))
+- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/))
+- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558))
+
### Deprecated
@@ -81,52 +84,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
## [Unreleased 2.x]
### Added
-- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386))
-- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681))
-- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694))
-- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666))
-- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131))
-- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189))
-- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562))
### Dependencies
-- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575))
-- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968))
-- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950))
-- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972))
-- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971))
-- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022))
-- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022))
-- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098))
-- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125))
-- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752))
-- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126))
-- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206))
-- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208))
-- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209))
-- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
-- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
-- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
### Changed
-- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415))
-- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916))
-- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840))
-- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036))
-- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042))
-- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122))
### Deprecated
### Removed
### Fixed
-- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725))
-- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045))
-- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082))
-- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089))
### Security
[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD
-[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x
+[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index d7bdd09ea882e..6d3e0f018657e 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -103,7 +103,7 @@ dependencies {
api localGroovy()
api 'commons-codec:commons-codec:1.16.0'
- api 'org.apache.commons:commons-compress:1.23.0'
+ api 'org.apache.commons:commons-compress:1.24.0'
api 'org.apache.ant:ant:1.10.14'
api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0'
api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0'
@@ -114,7 +114,7 @@ dependencies {
api 'com.github.johnrengelman:shadow:8.1.1'
api 'org.jdom:jdom2:2.0.6.1'
api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}"
- api 'de.thetaphi:forbiddenapis:3.5.1'
+ api 'de.thetaphi:forbiddenapis:3.6'
api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12'
api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}"
api 'org.apache.maven:maven-model:3.9.4'
diff --git a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java
index c5b4de157c75c..662510fbbf61c 100644
--- a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java
+++ b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java
@@ -45,17 +45,16 @@
/**
* A standalone process that will reap external services after a build dies.
- *
*
Input
* Since how to reap a given service is platform and service dependent, this tool
* operates on system commands to execute. It takes a single argument, a directory
* that will contain files with reaping commands. Each line in each file will be
* executed with {@link Runtime#exec(String)}.
- *
+ *
* The main method will wait indefinitely on the parent process (Gradle) by
* reading from stdin. When Gradle shuts down, whether normally or abruptly, the
* pipe will be broken and read will return.
- *
+ *
* The reaper will then iterate over the files in the configured directory,
* and execute the given commands. If any commands fail, a failure message is
* written to stderr. Otherwise, the input file will be deleted. If no inputs
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java
index cddd03ccc2019..4d45640b75e3d 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java
@@ -52,15 +52,15 @@
/**
* A container for opensearch supported version information used in BWC testing.
- *
+ *
* Parse the Java source file containing the versions declarations and use the known rules to figure out which are all
* the version the current one is wire and index compatible with.
* On top of this, figure out which of these are unreleased and provide the branch they can be built from.
- *
+ *
* Note that in this context, currentVersion is the unreleased version this build operates on.
* At any point in time there will surely be four such unreleased versions being worked on,
* thus currentVersion will be one of these.
- *
+ *
* Considering:
*
*
M, M > 0
@@ -84,7 +84,7 @@
* Each build is only concerned with versions before it, as those are the ones that need to be tested
* for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous
* version.
- *
+ *
* Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class.
* We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased
* version number to server in all branches when a version is released.
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java
index 5ae7ad1595e2f..5259700b3a63d 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java
@@ -38,7 +38,7 @@
/**
* Writes data passed to this stream as log messages.
- *
+ *
* The stream will be flushed whenever a newline is detected.
* Allows setting an optional prefix before each line of output.
*/
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java
index 159270d28e3d6..c6e49dc44d6bd 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java
@@ -76,7 +76,7 @@ public InternalBwcGitPlugin(ProviderFactory providerFactory, ExecOperations exec
public void apply(Project project) {
this.project = project;
this.gitExtension = project.getExtensions().create("bwcGitConfig", BwcGitExtension.class);
- Provider remote = providerFactory.systemProperty("bwc.remote").forUseAtConfigurationTime().orElse("opensearch-project");
+ Provider remote = providerFactory.systemProperty("bwc.remote").orElse("opensearch-project");
TaskContainer tasks = project.getTasks();
TaskProvider createCloneTaskProvider = tasks.register("createClone", LoggedExec.class, createClone -> {
@@ -105,7 +105,6 @@ public void apply(Project project) {
String remoteRepo = remote.get();
// for testing only we can override the base remote url
String remoteRepoUrl = providerFactory.systemProperty("testRemoteRepo")
- .forUseAtConfigurationTime()
.getOrElse("https://github.com/" + remoteRepo + "/OpenSearch.git");
addRemote.setCommandLine(asList("git", "remote", "add", remoteRepo, remoteRepoUrl));
});
@@ -113,7 +112,6 @@ public void apply(Project project) {
TaskProvider fetchLatestTaskProvider = tasks.register("fetchLatest", LoggedExec.class, fetchLatest -> {
Provider
*/
class S3Repository extends MeteredBlobStoreRepository {
@@ -182,6 +190,13 @@ class S3Repository extends MeteredBlobStoreRepository {
new ByteSizeValue(5, ByteSizeUnit.TB)
);
+ /**
+ * Maximum number of deletes in a DeleteObjectsRequest.
+ *
+ * @see S3 Documentation.
+ */
+ static final Setting BULK_DELETE_SIZE = Setting.intSetting("bulk_delete_size", 1000, 1, 1000);
+
/**
* Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy,
* standard_ia, onezone_ia and intelligent_tiering. Defaults to standard.
@@ -203,31 +218,29 @@ class S3Repository extends MeteredBlobStoreRepository {
private final S3Service service;
- private final String bucket;
-
- private final ByteSizeValue bufferSize;
+ private volatile String bucket;
- private final ByteSizeValue chunkSize;
+ private volatile ByteSizeValue bufferSize;
- private final BlobPath basePath;
+ private volatile ByteSizeValue chunkSize;
- private final boolean serverSideEncryption;
+ private volatile BlobPath basePath;
- private final String storageClass;
+ private volatile boolean serverSideEncryption;
- private final String cannedACL;
-
- private final RepositoryMetadata repositoryMetadata;
+ private volatile String storageClass;
+ private volatile String cannedACL;
private final AsyncTransferManager asyncUploadUtils;
private final S3AsyncService s3AsyncService;
private final boolean multipartUploadEnabled;
private final AsyncExecutorContainer priorityExecutorBuilder;
private final AsyncExecutorContainer normalExecutorBuilder;
+ private final Path pluginConfigPath;
- /**
- * Constructs an s3 backed repository
- */
+ private volatile int bulkDeletesSize;
+
+ // Used by test classes
S3Repository(
final RepositoryMetadata metadata,
final NamedXContentRegistry namedXContentRegistry,
@@ -240,77 +253,48 @@ class S3Repository extends MeteredBlobStoreRepository {
final S3AsyncService s3AsyncService,
final boolean multipartUploadEnabled
) {
- super(
+ this(
metadata,
- COMPRESS_SETTING.get(metadata.settings()),
namedXContentRegistry,
+ service,
clusterService,
recoverySettings,
- buildLocation(metadata)
+ asyncUploadUtils,
+ priorityExecutorBuilder,
+ normalExecutorBuilder,
+ s3AsyncService,
+ multipartUploadEnabled,
+ Path.of("")
);
+ }
+
+ /**
+ * Constructs an s3 backed repository
+ */
+ S3Repository(
+ final RepositoryMetadata metadata,
+ final NamedXContentRegistry namedXContentRegistry,
+ final S3Service service,
+ final ClusterService clusterService,
+ final RecoverySettings recoverySettings,
+ final AsyncTransferManager asyncUploadUtils,
+ final AsyncExecutorContainer priorityExecutorBuilder,
+ final AsyncExecutorContainer normalExecutorBuilder,
+ final S3AsyncService s3AsyncService,
+ final boolean multipartUploadEnabled,
+ Path pluginConfigPath
+ ) {
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata));
this.service = service;
this.s3AsyncService = s3AsyncService;
this.multipartUploadEnabled = multipartUploadEnabled;
-
- this.repositoryMetadata = metadata;
+ this.pluginConfigPath = pluginConfigPath;
this.asyncUploadUtils = asyncUploadUtils;
this.priorityExecutorBuilder = priorityExecutorBuilder;
this.normalExecutorBuilder = normalExecutorBuilder;
- // Parse and validate the user's S3 Storage Class setting
- this.bucket = BUCKET_SETTING.get(metadata.settings());
- if (bucket == null) {
- throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository");
- }
-
- this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
- this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
-
- // We make sure that chunkSize is bigger or equal than/to bufferSize
- if (this.chunkSize.getBytes() < bufferSize.getBytes()) {
- throw new RepositoryException(
- metadata.name(),
- CHUNK_SIZE_SETTING.getKey()
- + " ("
- + this.chunkSize
- + ") can't be lower than "
- + BUFFER_SIZE_SETTING.getKey()
- + " ("
- + bufferSize
- + ")."
- );
- }
-
- final String basePath = BASE_PATH_SETTING.get(metadata.settings());
- if (Strings.hasLength(basePath)) {
- this.basePath = new BlobPath().add(basePath);
- } else {
- this.basePath = BlobPath.cleanPath();
- }
-
- this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
-
- this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
- this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
-
- if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
- // provided repository settings
- deprecationLogger.deprecate(
- "s3_repository_secret_settings",
- "Using s3 access/secret key from repository settings. Instead "
- + "store these in named clients and the opensearch keystore for secure settings."
- );
- }
-
- logger.debug(
- "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]",
- bucket,
- chunkSize,
- serverSideEncryption,
- bufferSize,
- cannedACL,
- storageClass
- );
+ validateRepositoryMetadata(metadata);
+ readRepositoryMetadata();
}
private static Map buildLocation(RepositoryMetadata metadata) {
@@ -365,14 +349,15 @@ protected S3BlobStore createBlobStore() {
bufferSize,
cannedACL,
storageClass,
- repositoryMetadata,
+ bulkDeletesSize,
+ metadata,
asyncUploadUtils,
priorityExecutorBuilder,
normalExecutorBuilder
);
}
- // only use for testing
+ // only use for testing (S3RepositoryTests)
@Override
protected BlobStore getBlobStore() {
return super.getBlobStore();
@@ -383,11 +368,142 @@ public BlobPath basePath() {
return basePath;
}
+ @Override
+ public boolean isReloadable() {
+ return true;
+ }
+
+ @Override
+ public void reload(RepositoryMetadata newRepositoryMetadata) {
+ if (isReloadable() == false) {
+ return;
+ }
+
+ // Reload configs for S3Repository
+ super.reload(newRepositoryMetadata);
+ readRepositoryMetadata();
+
+ // Reload configs for S3RepositoryPlugin
+ service.settings(metadata);
+ s3AsyncService.settings(metadata);
+
+ // Reload configs for S3BlobStore
+ BlobStore blobStore = getBlobStore();
+ blobStore.reload(metadata);
+ }
+
+ /**
+ * Reloads the values derived from the Repository Metadata
+ */
+ private void readRepositoryMetadata() {
+ this.bucket = BUCKET_SETTING.get(metadata.settings());
+ this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
+ this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
+ final String basePath = BASE_PATH_SETTING.get(metadata.settings());
+ if (Strings.hasLength(basePath)) {
+ this.basePath = new BlobPath().add(basePath);
+ } else {
+ this.basePath = BlobPath.cleanPath();
+ }
+
+ this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
+ this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
+ this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
+ this.bulkDeletesSize = BULK_DELETE_SIZE.get(metadata.settings());
+ if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
+ // provided repository settings
+ deprecationLogger.deprecate(
+ "s3_repository_secret_settings",
+ "Using s3 access/secret key from repository settings. Instead "
+ + "store these in named clients and the opensearch keystore for secure settings."
+ );
+ }
+
+ logger.debug(
+ "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]",
+ bucket,
+ chunkSize,
+ serverSideEncryption,
+ bufferSize,
+ cannedACL,
+ storageClass
+ );
+ }
+
+ @Override
+ public void validateMetadata(RepositoryMetadata newRepositoryMetadata) {
+ super.validateMetadata(newRepositoryMetadata);
+ validateRepositoryMetadata(newRepositoryMetadata);
+ }
+
+ private void validateRepositoryMetadata(RepositoryMetadata newRepositoryMetadata) {
+ Settings settings = newRepositoryMetadata.settings();
+ if (BUCKET_SETTING.get(settings) == null) {
+ throw new RepositoryException(newRepositoryMetadata.name(), "No bucket defined for s3 repository");
+ }
+
+ // We make sure that chunkSize is bigger or equal than/to bufferSize
+ if (CHUNK_SIZE_SETTING.get(settings).getBytes() < BUFFER_SIZE_SETTING.get(settings).getBytes()) {
+ throw new RepositoryException(
+ newRepositoryMetadata.name(),
+ CHUNK_SIZE_SETTING.getKey()
+ + " ("
+ + CHUNK_SIZE_SETTING.get(settings)
+ + ") can't be lower than "
+ + BUFFER_SIZE_SETTING.getKey()
+ + " ("
+ + BUFFER_SIZE_SETTING.get(settings)
+ + ")."
+ );
+ }
+
+ validateStorageClass(STORAGE_CLASS_SETTING.get(settings));
+ validateCannedACL(CANNED_ACL_SETTING.get(settings));
+ }
+
+ private static void validateStorageClass(String storageClassStringValue) {
+ if ((storageClassStringValue == null) || storageClassStringValue.equals("")) {
+ return;
+ }
+
+ final StorageClass storageClass = StorageClass.fromValue(storageClassStringValue.toUpperCase(Locale.ENGLISH));
+ if (storageClass.equals(StorageClass.GLACIER)) {
+ throw new BlobStoreException("Glacier storage class is not supported");
+ }
+
+ if (storageClass == StorageClass.UNKNOWN_TO_SDK_VERSION) {
+ throw new BlobStoreException("`" + storageClassStringValue + "` is not a valid S3 Storage Class.");
+ }
+ }
+
+ private static void validateCannedACL(String cannedACLStringValue) {
+ if ((cannedACLStringValue == null) || cannedACLStringValue.equals("")) {
+ return;
+ }
+
+ for (final ObjectCannedACL cur : ObjectCannedACL.values()) {
+ if (cur.toString().equalsIgnoreCase(cannedACLStringValue)) {
+ return;
+ }
+ }
+
+ throw new BlobStoreException("cannedACL is not valid: [" + cannedACLStringValue + "]");
+ }
+
@Override
protected ByteSizeValue chunkSize() {
return chunkSize;
}
+ @Override
+ public List> getRestrictedSystemRepositorySettings() {
+ List> restrictedSettings = new ArrayList<>();
+ restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings());
+ restrictedSettings.add(BUCKET_SETTING);
+ restrictedSettings.add(BASE_PATH_SETTING);
+ return restrictedSettings;
+ }
+
@Override
protected void doClose() {
final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null);
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java
index 6ef60474afe8c..a80ee0ca35fae 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java
@@ -182,7 +182,8 @@ protected S3Repository createRepository(
priorityExecutorBuilder,
normalExecutorBuilder,
s3AsyncService,
- S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings())
+ S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()),
+ configPath
);
}
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java
index 3a35f6135f28b..d7e47e0ab1bcc 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java
@@ -54,7 +54,7 @@
* Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where
* the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing
* the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself.
- *
+ *
* See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue
*/
class S3RetryingInputStream extends InputStream {
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
index b13672b4179f8..b1b3e19eac275 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
@@ -90,6 +90,7 @@
import java.security.SecureRandom;
import java.time.Duration;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import static java.util.Collections.emptyMap;
@@ -100,7 +101,7 @@ class S3Service implements Closeable {
private static final String DEFAULT_S3_ENDPOINT = "s3.amazonaws.com";
- private volatile Map clientsCache = emptyMap();
+ private volatile Map clientsCache = new ConcurrentHashMap<>();
/**
* Client settings calculated from static configuration and settings in the keystore.
@@ -111,7 +112,7 @@ class S3Service implements Closeable {
* Client settings derived from those in {@link #staticClientSettings} by combining them with settings
* in the {@link RepositoryMetadata}.
*/
- private volatile Map derivedClientSettings = emptyMap();
+ private volatile Map derivedClientSettings = new ConcurrentHashMap<>();
S3Service(final Path configPath) {
staticClientSettings = MapBuilder.newMapBuilder()
diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java
index 8c8524212e08e..6eb8faa746d34 100644
--- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java
+++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java
@@ -64,6 +64,7 @@
import org.mockito.invocation.InvocationOnMock;
+import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
@@ -387,6 +388,7 @@ private S3BlobStore createBlobStore() {
S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY),
S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY),
S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY),
+ BULK_DELETE_SIZE.get(Settings.EMPTY),
repositoryMetadata,
new AsyncTransferManager(
S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(),
diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java
index ecad68474b601..a2214f5218991 100644
--- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java
+++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java
@@ -95,6 +95,7 @@
import static org.opensearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING;
import static org.opensearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING;
import static org.opensearch.repositories.s3.S3ClientSettings.REGION;
+import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
@@ -215,6 +216,7 @@ protected AsyncMultiStreamBlobContainer createBlobContainer(
bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize,
S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY),
S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY),
+ BULK_DELETE_SIZE.get(Settings.EMPTY),
repositoryMetadata,
new AsyncTransferManager(
S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(),
diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
index a87c060dcc60a..2701cae6a733b 100644
--- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
+++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java
@@ -81,7 +81,6 @@
import org.opensearch.common.io.InputStreamContainer;
import org.opensearch.core.action.ActionListener;
import org.opensearch.core.common.unit.ByteSizeUnit;
-import org.opensearch.repositories.s3.async.AsyncTransferManager;
import org.opensearch.test.OpenSearchTestCase;
import java.io.ByteArrayInputStream;
@@ -100,7 +99,6 @@
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@@ -278,10 +276,12 @@ public void testDelete() throws IOException {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
+ int bulkDeleteSize = 5;
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bucket()).thenReturn(bucketName);
when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+ when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize);
final S3Client client = mock(S3Client.class);
doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference();
@@ -299,8 +299,11 @@ public void testDelete() throws IOException {
when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable);
final List keysDeleted = new ArrayList<>();
+ AtomicInteger deleteCount = new AtomicInteger();
doAnswer(invocation -> {
DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0);
+ deleteCount.getAndIncrement();
+ logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size());
keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList()));
return DeleteObjectsResponse.builder().build();
}).when(client).deleteObjects(any(DeleteObjectsRequest.class));
@@ -313,6 +316,8 @@ public void testDelete() throws IOException {
// keysDeleted will have blobPath also
assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1);
assertTrue(keysDeleted.contains(blobPath.buildAsString()));
+ // keysDeleted will have blobPath also
+ assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get());
keysDeleted.remove(blobPath.buildAsString());
assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted));
}
@@ -919,7 +924,7 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberO
testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC);
}
- public void testReadBlobAsync() throws Exception {
+ public void testReadBlobAsyncMultiPart() throws Exception {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final String checksum = randomAlphaOfLength(10);
@@ -932,11 +937,7 @@ public void testReadBlobAsync() throws Exception {
final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference(
AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null)
);
- final AsyncTransferManager asyncTransferManager = new AsyncTransferManager(
- 10000L,
- mock(ExecutorService.class),
- mock(ExecutorService.class)
- );
+
final S3BlobStore blobStore = mock(S3BlobStore.class);
final BlobPath blobPath = new BlobPath();
@@ -944,7 +945,6 @@ public void testReadBlobAsync() throws Exception {
when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
when(blobStore.serverSideEncryption()).thenReturn(false);
when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference);
- when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager);
CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>();
getObjectAttributesResponseCompletableFuture.complete(
@@ -976,7 +976,7 @@ public void testReadBlobAsync() throws Exception {
assertEquals(objectSize, readContext.getBlobSize());
for (int partNumber = 1; partNumber < objectPartCount; partNumber++) {
- InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber);
+ InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber).get().join();
final int offset = partNumber * partSize;
assertEquals(partSize, inputStreamContainer.getContentLength());
assertEquals(offset, inputStreamContainer.getOffset());
@@ -984,6 +984,60 @@ public void testReadBlobAsync() throws Exception {
}
}
+ public void testReadBlobAsyncSinglePart() throws Exception {
+ final String bucketName = randomAlphaOfLengthBetween(1, 10);
+ final String blobName = randomAlphaOfLengthBetween(1, 10);
+ final String checksum = randomAlphaOfLength(10);
+
+ final int objectSize = 100;
+
+ final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+ final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference(
+ AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null)
+ );
+ final S3BlobStore blobStore = mock(S3BlobStore.class);
+ final BlobPath blobPath = new BlobPath();
+
+ when(blobStore.bucket()).thenReturn(bucketName);
+ when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+ when(blobStore.serverSideEncryption()).thenReturn(false);
+ when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference);
+
+ CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>();
+ getObjectAttributesResponseCompletableFuture.complete(
+ GetObjectAttributesResponse.builder()
+ .checksum(Checksum.builder().checksumCRC32(checksum).build())
+ .objectSize((long) objectSize)
+ .build()
+ );
+ when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn(
+ getObjectAttributesResponseCompletableFuture
+ );
+
+ mockObjectResponse(s3AsyncClient, bucketName, blobName, objectSize);
+
+ CountDownLatch countDownLatch = new CountDownLatch(1);
+ CountingCompletionListener readContextActionListener = new CountingCompletionListener<>();
+ LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch);
+
+ final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
+ blobContainer.readBlobAsync(blobName, listener);
+ countDownLatch.await();
+
+ assertEquals(1, readContextActionListener.getResponseCount());
+ assertEquals(0, readContextActionListener.getFailureCount());
+ ReadContext readContext = readContextActionListener.getResponse();
+ assertEquals(1, readContext.getNumberOfParts());
+ assertEquals(checksum, readContext.getBlobChecksum());
+ assertEquals(objectSize, readContext.getBlobSize());
+
+ InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get().get().join();
+ assertEquals(objectSize, inputStreamContainer.getContentLength());
+ assertEquals(0, inputStreamContainer.getOffset());
+ assertEquals(objectSize, inputStreamContainer.getInputStream().readAllBytes().length);
+
+ }
+
public void testReadBlobAsyncFailure() throws Exception {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
@@ -996,11 +1050,7 @@ public void testReadBlobAsyncFailure() throws Exception {
final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference(
AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null)
);
- final AsyncTransferManager asyncTransferManager = new AsyncTransferManager(
- 10000L,
- mock(ExecutorService.class),
- mock(ExecutorService.class)
- );
+
final S3BlobStore blobStore = mock(S3BlobStore.class);
final BlobPath blobPath = new BlobPath();
@@ -1008,7 +1058,6 @@ public void testReadBlobAsyncFailure() throws Exception {
when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
when(blobStore.serverSideEncryption()).thenReturn(false);
when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference);
- when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager);
CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>();
getObjectAttributesResponseCompletableFuture.complete(
@@ -1032,6 +1081,51 @@ public void testReadBlobAsyncFailure() throws Exception {
assertEquals(1, readContextActionListener.getFailureCount());
}
+ public void testReadBlobAsyncOnCompleteFailureMissingData() throws Exception {
+ final String bucketName = randomAlphaOfLengthBetween(1, 10);
+ final String blobName = randomAlphaOfLengthBetween(1, 10);
+ final String checksum = randomAlphaOfLength(10);
+
+ final long objectSize = 100L;
+ final int objectPartCount = 10;
+
+ final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
+ final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference(
+ AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null)
+ );
+
+ final S3BlobStore blobStore = mock(S3BlobStore.class);
+ final BlobPath blobPath = new BlobPath();
+
+ when(blobStore.bucket()).thenReturn(bucketName);
+ when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher());
+ when(blobStore.serverSideEncryption()).thenReturn(false);
+ when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference);
+
+ CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>();
+ getObjectAttributesResponseCompletableFuture.complete(
+ GetObjectAttributesResponse.builder()
+ .checksum(Checksum.builder().build())
+ .objectSize(null)
+ .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build())
+ .build()
+ );
+ when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn(
+ getObjectAttributesResponseCompletableFuture
+ );
+
+ CountDownLatch countDownLatch = new CountDownLatch(1);
+ CountingCompletionListener readContextActionListener = new CountingCompletionListener<>();
+ LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch);
+
+ final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore);
+ blobContainer.readBlobAsync(blobName, listener);
+ countDownLatch.await();
+
+ assertEquals(0, readContextActionListener.getResponseCount());
+ assertEquals(1, readContextActionListener.getFailureCount());
+ }
+
public void testGetBlobMetadata() throws Exception {
final String checksum = randomAlphaOfLengthBetween(1, 10);
final long objectSize = 100L;
@@ -1071,7 +1165,7 @@ public void testGetBlobPartInputStream() throws Exception {
final String blobName = randomAlphaOfLengthBetween(1, 10);
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final long contentLength = 10L;
- final String contentRange = "bytes 0-10/100";
+ final String contentRange = "bytes 10-20/100";
final InputStream inputStream = ResponseInputStream.nullInputStream();
final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
@@ -1095,9 +1189,17 @@ public void testGetBlobPartInputStream() throws Exception {
)
).thenReturn(getObjectPartResponse);
+ // Header based offset in case of a multi part object request
InputStreamContainer inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, 0)
.get();
+ assertEquals(10, inputStreamContainer.getOffset());
+ assertEquals(contentLength, inputStreamContainer.getContentLength());
+ assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available());
+
+ // 0 offset in case of a single part object request
+ inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, null).get();
+
assertEquals(0, inputStreamContainer.getOffset());
assertEquals(contentLength, inputStreamContainer.getContentLength());
assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available());
@@ -1108,28 +1210,65 @@ public void testTransformResponseToInputStreamContainer() throws Exception {
final long contentLength = 10L;
final InputStream inputStream = ResponseInputStream.nullInputStream();
- final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class);
-
GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).build();
+ // Exception when content range absent for multipart object
ResponseInputStream responseInputStreamNoRange = new ResponseInputStream<>(getObjectResponse, inputStream);
- assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange));
+ assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange, true));
+
+ // No exception when content range absent for single part object
+ ResponseInputStream responseInputStreamNoRangeSinglePart = new ResponseInputStream<>(
+ getObjectResponse,
+ inputStream
+ );
+ InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(
+ responseInputStreamNoRangeSinglePart,
+ false
+ );
+ assertEquals(contentLength, inputStreamContainer.getContentLength());
+ assertEquals(0, inputStreamContainer.getOffset());
+ // Exception when length is absent
getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).build();
ResponseInputStream responseInputStreamNoContentLength = new ResponseInputStream<>(
getObjectResponse,
inputStream
);
- assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength));
+ assertThrows(
+ SdkException.class,
+ () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength, true)
+ );
+ // No exception when range and length both are present
getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).contentLength(contentLength).build();
ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream);
- InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream);
+ inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream, true);
assertEquals(contentLength, inputStreamContainer.getContentLength());
assertEquals(0, inputStreamContainer.getOffset());
assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available());
}
+ private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) {
+
+ final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize));
+
+ GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength((long) objectSize).build();
+
+ CompletableFuture> getObjectPartResponse = new CompletableFuture<>();
+ ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream);
+ getObjectPartResponse.complete(responseInputStream);
+
+ GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).build();
+
+ when(
+ s3AsyncClient.getObject(
+ eq(getObjectRequest),
+ ArgumentMatchers.>>any()
+ )
+ ).thenReturn(getObjectPartResponse);
+
+ }
+
private void mockObjectPartResponse(
S3AsyncClient s3AsyncClient,
String bucketName,
diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java
index 533c3aa17009d..e65ca69a5047b 100644
--- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java
+++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java
@@ -36,17 +36,20 @@
import org.opensearch.cluster.metadata.RepositoryMetadata;
import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Setting;
import org.opensearch.common.settings.Settings;
import org.opensearch.core.common.unit.ByteSizeUnit;
import org.opensearch.core.common.unit.ByteSizeValue;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.indices.recovery.RecoverySettings;
import org.opensearch.repositories.RepositoryException;
+import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.repositories.blobstore.BlobStoreTestUtil;
import org.opensearch.test.OpenSearchTestCase;
import org.hamcrest.Matchers;
import java.nio.file.Path;
+import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.containsString;
@@ -122,7 +125,8 @@ public void testBasePathSetting() {
}
public void testDefaultBufferSize() {
- final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY);
+ Settings settings = Settings.builder().build();
+ final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", settings);
try (S3Repository s3repo = createS3Repo(metadata)) {
assertThat(s3repo.getBlobStore(), is(nullValue()));
s3repo.start();
@@ -133,6 +137,26 @@ public void testDefaultBufferSize() {
}
}
+ public void testIsReloadable() {
+ final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY);
+ try (S3Repository s3repo = createS3Repo(metadata)) {
+ assertTrue(s3repo.isReloadable());
+ }
+ }
+
+ public void testRestrictedSettingsDefault() {
+ final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY);
+ try (S3Repository s3repo = createS3Repo(metadata)) {
+ List> restrictedSettings = s3repo.getRestrictedSystemRepositorySettings();
+ assertThat(restrictedSettings.size(), is(5));
+ assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING));
+ assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING));
+ assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY));
+ assertTrue(restrictedSettings.contains(S3Repository.BUCKET_SETTING));
+ assertTrue(restrictedSettings.contains(S3Repository.BASE_PATH_SETTING));
+ }
+ }
+
private S3Repository createS3Repo(RepositoryMetadata metadata) {
return new S3Repository(
metadata,
diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java
index 2cac58262c75a..e1655cc5e0784 100644
--- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java
+++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java
@@ -47,9 +47,9 @@ protected Directory getDirectory(Path file) throws IOException {
@Override
public void testCreateOutputForExistingFile() throws IOException {
- /**
- * This test is disabled because {@link SmbDirectoryWrapper} opens existing file
- * with an explicit StandardOpenOption.TRUNCATE_EXISTING option.
+ /*
+ This test is disabled because {@link SmbDirectoryWrapper} opens existing file
+ with an explicit StandardOpenOption.TRUNCATE_EXISTING option.
*/
}
}
diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java
index 7390759029dfc..6f821147c3079 100644
--- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java
+++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java
@@ -26,9 +26,9 @@ protected Directory getDirectory(Path file) throws IOException {
@Override
public void testCreateOutputForExistingFile() throws IOException {
- /**
- * This test is disabled because {@link SmbDirectoryWrapper} opens existing file
- * with an explicit StandardOpenOption.TRUNCATE_EXISTING option.
+ /*
+ This test is disabled because {@link SmbDirectoryWrapper} opens existing file
+ with an explicit StandardOpenOption.TRUNCATE_EXISTING option.
*/
}
}
diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle
index 45c9f522c09d8..04fff20947b4f 100644
--- a/plugins/telemetry-otel/build.gradle
+++ b/plugins/telemetry-otel/build.gradle
@@ -37,6 +37,7 @@ dependencies {
runtimeOnly "com.squareup.okhttp3:okhttp:4.11.0"
runtimeOnly "com.squareup.okio:okio-jvm:3.5.0"
runtimeOnly "io.opentelemetry:opentelemetry-exporter-sender-okhttp:${versions.opentelemetry}"
+ api "io.opentelemetry:opentelemetry-extension-incubator:${versions.opentelemetry}-alpha"
testImplementation "io.opentelemetry:opentelemetry-sdk-testing:${versions.opentelemetry}"
}
@@ -80,29 +81,12 @@ thirdPartyAudit {
'io.opentelemetry.api.events.EventEmitter',
'io.opentelemetry.api.events.EventEmitterBuilder',
'io.opentelemetry.api.events.EventEmitterProvider',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleHistogramBuilder',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedLongHistogramBuilder',
'io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties',
'io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider',
'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider',
'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider',
- 'io.opentelemetry.extension.incubator.metrics.DoubleCounterAdviceConfigurer',
- 'io.opentelemetry.extension.incubator.metrics.DoubleGauge',
- 'io.opentelemetry.extension.incubator.metrics.DoubleGaugeAdviceConfigurer',
- 'io.opentelemetry.extension.incubator.metrics.DoubleHistogramAdviceConfigurer',
- 'io.opentelemetry.extension.incubator.metrics.DoubleUpDownCounterAdviceConfigurer',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleCounterBuilder',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleGaugeBuilder',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleUpDownCounterBuilder',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedLongCounterBuilder',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedLongGaugeBuilder',
- 'io.opentelemetry.extension.incubator.metrics.ExtendedLongUpDownCounterBuilder',
- 'io.opentelemetry.extension.incubator.metrics.LongCounterAdviceConfigurer',
- 'io.opentelemetry.extension.incubator.metrics.LongGauge',
- 'io.opentelemetry.extension.incubator.metrics.LongGaugeAdviceConfigurer',
- 'io.opentelemetry.extension.incubator.metrics.LongHistogramAdviceConfigurer',
- 'io.opentelemetry.extension.incubator.metrics.LongUpDownCounterAdviceConfigurer',
- 'kotlin.io.path.PathsKt'
+ 'kotlin.io.path.PathsKt',
+ 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider'
)
}
diff --git a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties
index 544f42bd5513b..8dec1119eec66 100644
--- a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties
+++ b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties
@@ -25,3 +25,23 @@ logger.exporter.name = io.opentelemetry.exporter.logging.LoggingSpanExporter
logger.exporter.level = INFO
logger.exporter.appenderRef.tracing.ref = tracing
logger.exporter.additivity = false
+
+
+appender.metrics.type = RollingFile
+appender.metrics.name = metrics
+appender.metrics.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics.log
+appender.metrics.filePermissions = rw-r-----
+appender.metrics.layout.type = PatternLayout
+appender.metrics.layout.pattern = %m%n
+appender.metrics.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics-%i.log.gz
+appender.metrics.policies.type = Policies
+appender.metrics.policies.size.type = SizeBasedTriggeringPolicy
+appender.metrics.policies.size.size = 1GB
+appender.metrics.strategy.type = DefaultRolloverStrategy
+appender.metrics.strategy.max = 4
+
+
+logger.metrics_exporter.name = io.opentelemetry.exporter.logging.LoggingMetricExporter
+logger.metrics_exporter.level = INFO
+logger.metrics_exporter.appenderRef.tracing.ref = metrics
+logger.metrics_exporter.additivity = false
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1
new file mode 100644
index 0000000000000..bde43937e82e4
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.30.1-alpha.jar.sha1
@@ -0,0 +1 @@
+bfcea9bd71f97dd4e8a4f92c15ba5659fb07ff05
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java
similarity index 71%
rename from plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java
rename to plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java
index 4d0966e6b5185..98d265e92ba3c 100644
--- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java
@@ -6,7 +6,9 @@
* compatible open source license.
*/
-package org.opensearch.telemetry.tracing;
+package org.opensearch.telemetry;
+
+import org.opensearch.telemetry.metrics.tags.Tags;
import java.util.Locale;
@@ -16,7 +18,7 @@
/**
* Converts {@link org.opensearch.telemetry.tracing.attributes.Attributes} to OTel {@link Attributes}
*/
-final class OTelAttributesConverter {
+public final class OTelAttributesConverter {
/**
* Constructor.
@@ -28,7 +30,7 @@ private OTelAttributesConverter() {}
* @param attributes attributes
* @return otel attributes.
*/
- static Attributes convert(org.opensearch.telemetry.tracing.attributes.Attributes attributes) {
+ public static Attributes convert(org.opensearch.telemetry.tracing.attributes.Attributes attributes) {
AttributesBuilder attributesBuilder = Attributes.builder();
if (attributes != null) {
attributes.getAttributesMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder));
@@ -49,4 +51,17 @@ private static void addSpanAttribute(String key, Object value, AttributesBuilder
throw new IllegalArgumentException(String.format(Locale.ROOT, "Span attribute value %s type not supported", value));
}
}
+
+ /**
+ * Attribute converter.
+ * @param tags attributes
+ * @return otel attributes.
+ */
+ public static Attributes convert(Tags tags) {
+ AttributesBuilder attributesBuilder = Attributes.builder();
+ if (tags != null) {
+ tags.getTagsMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder));
+ }
+ return attributesBuilder.build();
+ }
}
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java
index 1af88196e3727..b57876c9310f3 100644
--- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java
@@ -12,7 +12,7 @@
import org.opensearch.common.settings.Settings;
import org.opensearch.plugins.Plugin;
import org.opensearch.plugins.TelemetryPlugin;
-import org.opensearch.telemetry.metrics.MetricsTelemetry;
+import org.opensearch.telemetry.metrics.OTelMetricsTelemetry;
import org.opensearch.telemetry.tracing.OTelResourceProvider;
import org.opensearch.telemetry.tracing.OTelTelemetry;
import org.opensearch.telemetry.tracing.OTelTracingTelemetry;
@@ -21,11 +21,18 @@
import java.util.List;
import java.util.Optional;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+
/**
* Telemetry plugin based on Otel
*/
public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin {
+ /**
+ * Instrumentation scope name.
+ */
+ public static final String INSTRUMENTATION_SCOPE_NAME = "org.opensearch.telemetry";
+
static final String OTEL_TRACER_NAME = "otel";
private final Settings settings;
@@ -44,7 +51,8 @@ public List> getSettings() {
OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING,
OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING,
OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING,
- OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING
+ OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING,
+ OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING
);
}
@@ -59,8 +67,11 @@ public String getName() {
}
private Telemetry telemetry(TelemetrySettings telemetrySettings) {
- return new OTelTelemetry(new OTelTracingTelemetry(OTelResourceProvider.get(telemetrySettings, settings)), new MetricsTelemetry() {
- });
+ final OpenTelemetrySdk openTelemetry = OTelResourceProvider.get(telemetrySettings, settings);
+ return new OTelTelemetry(
+ new OTelTracingTelemetry<>(openTelemetry, openTelemetry.getSdkTracerProvider()),
+ new OTelMetricsTelemetry<>(openTelemetry.getSdkMeterProvider())
+ );
}
}
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java
index 59c87cca22986..8e23f724b4570 100644
--- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java
@@ -11,13 +11,16 @@
import org.opensearch.SpecialPermission;
import org.opensearch.common.settings.Setting;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory;
import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory;
import java.security.AccessController;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
+import io.opentelemetry.exporter.logging.LoggingMetricExporter;
import io.opentelemetry.exporter.logging.LoggingSpanExporter;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
import io.opentelemetry.sdk.trace.export.SpanExporter;
/**
@@ -83,4 +86,28 @@ private OTelTelemetrySettings() {}
Setting.Property.NodeScope,
Setting.Property.Final
);
+
+ /**
+ * Metrics Exporter type setting.
+ */
+ @SuppressWarnings("unchecked")
+ public static final Setting> OTEL_METRICS_EXPORTER_CLASS_SETTING = new Setting<>(
+ "telemetry.otel.metrics.exporter.class",
+ LoggingMetricExporter.class.getName(),
+ className -> {
+ // Check we ourselves are not being called by unprivileged code.
+ SpecialPermission.check();
+
+ try {
+ return AccessController.doPrivileged((PrivilegedExceptionAction>) () -> {
+ final ClassLoader loader = OTelMetricsExporterFactory.class.getClassLoader();
+ return (Class) loader.loadClass(className);
+ });
+ } catch (PrivilegedActionException ex) {
+ throw new IllegalStateException("Unable to load span exporter class:" + className, ex.getCause());
+ }
+ },
+ Setting.Property.NodeScope,
+ Setting.Property.Final
+ );
}
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java
new file mode 100644
index 0000000000000..b72f63e027243
--- /dev/null
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java
@@ -0,0 +1,40 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics;
+
+import org.opensearch.telemetry.OTelAttributesConverter;
+import org.opensearch.telemetry.metrics.tags.Tags;
+
+import io.opentelemetry.api.metrics.DoubleCounter;
+
+/**
+ * OTel Counter
+ */
+class OTelCounter implements Counter {
+
+ private final DoubleCounter otelDoubleCounter;
+
+ /**
+ * Constructor
+ * @param otelDoubleCounter delegate counter.
+ */
+ public OTelCounter(DoubleCounter otelDoubleCounter) {
+ this.otelDoubleCounter = otelDoubleCounter;
+ }
+
+ @Override
+ public void add(double value) {
+ otelDoubleCounter.add(value);
+ }
+
+ @Override
+ public void add(double value, Tags tags) {
+ otelDoubleCounter.add(value, OTelAttributesConverter.convert(tags));
+ }
+}
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java
new file mode 100644
index 0000000000000..8598e5976d20d
--- /dev/null
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java
@@ -0,0 +1,67 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics;
+
+import org.opensearch.telemetry.OTelTelemetryPlugin;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+
+import io.opentelemetry.api.metrics.DoubleCounter;
+import io.opentelemetry.api.metrics.DoubleUpDownCounter;
+import io.opentelemetry.api.metrics.Meter;
+import io.opentelemetry.api.metrics.MeterProvider;
+
+/**
+ * OTel implementation for {@link MetricsTelemetry}
+ */
+public class OTelMetricsTelemetry implements MetricsTelemetry {
+ private final Meter otelMeter;
+ private final T meterProvider;
+
+ /**
+ * Creates OTel based {@link MetricsTelemetry}.
+ * @param meterProvider {@link MeterProvider} instance
+ */
+ public OTelMetricsTelemetry(T meterProvider) {
+ this.meterProvider = meterProvider;
+ this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME);
+ }
+
+ @Override
+ public Counter createCounter(String name, String description, String unit) {
+ DoubleCounter doubleCounter = AccessController.doPrivileged(
+ (PrivilegedAction) () -> otelMeter.counterBuilder(name)
+ .setUnit(unit)
+ .setDescription(description)
+ .ofDoubles()
+ .build()
+ );
+ return new OTelCounter(doubleCounter);
+ }
+
+ @Override
+ public Counter createUpDownCounter(String name, String description, String unit) {
+ DoubleUpDownCounter doubleUpDownCounter = AccessController.doPrivileged(
+ (PrivilegedAction) () -> otelMeter.upDownCounterBuilder(name)
+ .setUnit(unit)
+ .setDescription(description)
+ .ofDoubles()
+ .build()
+ );
+ return new OTelUpDownCounter(doubleUpDownCounter);
+ }
+
+ @Override
+ public void close() throws IOException {
+ meterProvider.close();
+ }
+}
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java
new file mode 100644
index 0000000000000..2f40881996f7e
--- /dev/null
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java
@@ -0,0 +1,40 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics;
+
+import org.opensearch.telemetry.OTelAttributesConverter;
+import org.opensearch.telemetry.metrics.tags.Tags;
+
+import io.opentelemetry.api.metrics.DoubleUpDownCounter;
+
+/**
+ * OTel Counter
+ */
+public class OTelUpDownCounter implements Counter {
+
+ private final DoubleUpDownCounter doubleUpDownCounter;
+
+ /**
+ * Constructor
+ * @param doubleUpDownCounter delegate counter.
+ */
+ public OTelUpDownCounter(DoubleUpDownCounter doubleUpDownCounter) {
+ this.doubleUpDownCounter = doubleUpDownCounter;
+ }
+
+ @Override
+ public void add(double value) {
+ doubleUpDownCounter.add(value);
+ }
+
+ @Override
+ public void add(double value, Tags tags) {
+ doubleUpDownCounter.add(value, OTelAttributesConverter.convert(tags));
+ }
+}
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java
new file mode 100644
index 0000000000000..ef5a31e4003ca
--- /dev/null
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java
@@ -0,0 +1,90 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics.exporter;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.SpecialPermission;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.telemetry.OTelTelemetrySettings;
+
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+import java.lang.reflect.Method;
+import java.security.AccessController;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+
+/**
+ * Factory class to create the {@link MetricExporter} instance.
+ */
+public class OTelMetricsExporterFactory {
+
+ private static final Logger logger = LogManager.getLogger(OTelMetricsExporterFactory.class);
+
+ /**
+ * Base constructor.
+ */
+ private OTelMetricsExporterFactory() {
+
+ }
+
+ /**
+ * Creates the {@link MetricExporter} instances based on the OTEL_METRIC_EXPORTER_CLASS_SETTING value.
+ * As of now, it expects the MetricExporter implementations to have a create factory method to instantiate the
+ * MetricExporter.
+ * @param settings settings.
+ * @return MetricExporter instance.
+ */
+ public static MetricExporter create(Settings settings) {
+ Class MetricExporterProviderClass = OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.get(settings);
+ MetricExporter metricExporter = instantiateExporter(MetricExporterProviderClass);
+ logger.info("Successfully instantiated the Metrics MetricExporter class {}", MetricExporterProviderClass);
+ return metricExporter;
+ }
+
+ private static MetricExporter instantiateExporter(Class exporterProviderClass) {
+ try {
+ // Check we ourselves are not being called by unprivileged code.
+ SpecialPermission.check();
+ return AccessController.doPrivileged((PrivilegedExceptionAction) () -> {
+ String methodName = "create";
+ String getDefaultMethod = "getDefault";
+ for (Method m : exporterProviderClass.getMethods()) {
+ if (m.getName().equals(getDefaultMethod)) {
+ methodName = getDefaultMethod;
+ break;
+ }
+ }
+ try {
+ return (MetricExporter) MethodHandles.publicLookup()
+ .findStatic(exporterProviderClass, methodName, MethodType.methodType(exporterProviderClass))
+ .asType(MethodType.methodType(MetricExporter.class))
+ .invokeExact();
+ } catch (Throwable e) {
+ if (e.getCause() instanceof NoSuchMethodException) {
+ throw new IllegalStateException("No create factory method exist in [" + exporterProviderClass.getName() + "]");
+ } else {
+ throw new IllegalStateException(
+ "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]",
+ e.getCause()
+ );
+ }
+ }
+ });
+ } catch (PrivilegedActionException ex) {
+ throw new IllegalStateException(
+ "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]",
+ ex.getCause()
+ );
+ }
+ }
+}
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java
new file mode 100644
index 0000000000000..b48ec3e2336c4
--- /dev/null
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/**
+ * This package contains classes needed for tracing requests.
+ */
+package org.opensearch.telemetry.metrics.exporter;
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java
new file mode 100644
index 0000000000000..803c159eb201a
--- /dev/null
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/**
+ * This package contains classes needed for tracing requests.
+ */
+package org.opensearch.telemetry.metrics;
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java
index fe05cc8bb7a41..a6a1f12aab8a9 100644
--- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java
@@ -10,6 +10,7 @@
import org.opensearch.common.settings.Settings;
import org.opensearch.telemetry.TelemetrySettings;
+import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory;
import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory;
import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler;
import org.opensearch.telemetry.tracing.sampler.RequestSampler;
@@ -18,11 +19,12 @@
import java.security.PrivilegedAction;
import java.util.concurrent.TimeUnit;
-import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator;
import io.opentelemetry.context.propagation.ContextPropagators;
import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader;
import io.opentelemetry.sdk.resources.Resource;
import io.opentelemetry.sdk.trace.SdkTracerProvider;
import io.opentelemetry.sdk.trace.export.BatchSpanProcessor;
@@ -44,11 +46,11 @@ private OTelResourceProvider() {}
* Creates OpenTelemetry instance with default configuration
* @param telemetrySettings telemetry settings
* @param settings cluster settings
- * @return OpenTelemetry instance
+ * @return OpenTelemetrySdk instance
*/
- public static OpenTelemetry get(TelemetrySettings telemetrySettings, Settings settings) {
+ public static OpenTelemetrySdk get(TelemetrySettings telemetrySettings, Settings settings) {
return AccessController.doPrivileged(
- (PrivilegedAction) () -> get(
+ (PrivilegedAction) () -> get(
settings,
OTelSpanExporterFactory.create(settings),
ContextPropagators.create(W3CTraceContextPropagator.getInstance()),
@@ -63,17 +65,46 @@ public static OpenTelemetry get(TelemetrySettings telemetrySettings, Settings se
* @param spanExporter span exporter instance
* @param contextPropagators context propagator instance
* @param sampler sampler instance
- * @return Opentelemetry instance
+ * @return OpenTelemetrySdk instance
*/
- public static OpenTelemetry get(Settings settings, SpanExporter spanExporter, ContextPropagators contextPropagators, Sampler sampler) {
+ public static OpenTelemetrySdk get(
+ Settings settings,
+ SpanExporter spanExporter,
+ ContextPropagators contextPropagators,
+ Sampler sampler
+ ) {
Resource resource = Resource.create(Attributes.of(ResourceAttributes.SERVICE_NAME, "OpenSearch"));
- SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder()
+ SdkTracerProvider sdkTracerProvider = createSdkTracerProvider(settings, spanExporter, sampler, resource);
+ SdkMeterProvider sdkMeterProvider = createSdkMetricProvider(settings, resource);
+ return OpenTelemetrySdk.builder()
+ .setTracerProvider(sdkTracerProvider)
+ .setMeterProvider(sdkMeterProvider)
+ .setPropagators(contextPropagators)
+ .buildAndRegisterGlobal();
+ }
+
+ private static SdkMeterProvider createSdkMetricProvider(Settings settings, Resource resource) {
+ return SdkMeterProvider.builder()
+ .setResource(resource)
+ .registerMetricReader(
+ PeriodicMetricReader.builder(OTelMetricsExporterFactory.create(settings))
+ .setInterval(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS)
+ .build()
+ )
+ .build();
+ }
+
+ private static SdkTracerProvider createSdkTracerProvider(
+ Settings settings,
+ SpanExporter spanExporter,
+ Sampler sampler,
+ Resource resource
+ ) {
+ return SdkTracerProvider.builder()
.addSpanProcessor(spanProcessor(settings, spanExporter))
.setResource(resource)
.setSampler(sampler)
.build();
-
- return OpenTelemetrySdk.builder().setTracerProvider(sdkTracerProvider).setPropagators(contextPropagators).buildAndRegisterGlobal();
}
private static BatchSpanProcessor spanProcessor(Settings settings, SpanExporter spanExporter) {
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java
index 8ad03d807d9da..fc917968579e1 100644
--- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java
@@ -57,7 +57,9 @@ public void addAttribute(String key, Boolean value) {
@Override
public void setError(Exception exception) {
- delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage());
+ if (exception != null) {
+ delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage());
+ }
}
@Override
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java
index 53066ad4ad444..f88afe623fd56 100644
--- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java
@@ -8,41 +8,38 @@
package org.opensearch.telemetry.tracing;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
+import org.opensearch.telemetry.OTelAttributesConverter;
+import org.opensearch.telemetry.OTelTelemetryPlugin;
import java.io.Closeable;
import java.io.IOException;
import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.trace.TracerProvider;
import io.opentelemetry.context.Context;
/**
* OTel based Telemetry provider
*/
-public class OTelTracingTelemetry implements TracingTelemetry {
-
- private static final Logger logger = LogManager.getLogger(OTelTracingTelemetry.class);
+public class OTelTracingTelemetry implements TracingTelemetry {
private final OpenTelemetry openTelemetry;
+ private final T tracerProvider;
private final io.opentelemetry.api.trace.Tracer otelTracer;
/**
- * Creates OTel based Telemetry
+ * Creates OTel based {@link TracingTelemetry}
* @param openTelemetry OpenTelemetry instance
+ * @param tracerProvider {@link TracerProvider} instance.
*/
- public OTelTracingTelemetry(OpenTelemetry openTelemetry) {
+ public OTelTracingTelemetry(OpenTelemetry openTelemetry, T tracerProvider) {
this.openTelemetry = openTelemetry;
- this.otelTracer = openTelemetry.getTracer("os-tracer");
-
+ this.tracerProvider = tracerProvider;
+ this.otelTracer = tracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME);
}
@Override
- public void close() {
- try {
- ((Closeable) openTelemetry).close();
- } catch (IOException e) {
- logger.warn("Error while closing Opentelemetry", e);
- }
+ public void close() throws IOException {
+ tracerProvider.close();
}
@Override
diff --git a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy
index 726db3d3f4700..9d529ed5a2a56 100644
--- a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy
+++ b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy
@@ -11,6 +11,7 @@ grant {
permission java.lang.RuntimePermission "accessDeclaredMembers";
permission java.net.NetPermission "getProxySelector";
permission java.net.SocketPermission "*", "connect,resolve";
+ permission java.util.PropertyPermission "*", "read,write";
};
diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java
index 8c2b5d14733e2..2fcf89947e537 100644
--- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java
+++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java
@@ -12,12 +12,15 @@
import org.opensearch.common.settings.Setting;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.FeatureFlags;
+import org.opensearch.telemetry.metrics.MetricsTelemetry;
+import org.opensearch.telemetry.metrics.OTelMetricsTelemetry;
import org.opensearch.telemetry.tracing.OTelTracingTelemetry;
import org.opensearch.telemetry.tracing.TracingTelemetry;
import org.opensearch.test.OpenSearchTestCase;
import org.junit.After;
import org.junit.Before;
+import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
@@ -25,6 +28,7 @@
import java.util.Set;
import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME;
+import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING;
import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING;
import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING;
import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING;
@@ -34,41 +38,47 @@
public class OTelTelemetryPluginTests extends OpenSearchTestCase {
- private OTelTelemetryPlugin oTelTracerModulePlugin;
+ private OTelTelemetryPlugin oTelTelemetryPlugin;
private Optional telemetry;
private TracingTelemetry tracingTelemetry;
+ private MetricsTelemetry metricsTelemetry;
+
@Before
public void setup() {
// TRACER_EXPORTER_DELAY_SETTING should always be less than 10 seconds because
// io.opentelemetry.sdk.OpenTelemetrySdk.close waits only for 10 seconds for shutdown to complete.
Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build();
- oTelTracerModulePlugin = new OTelTelemetryPlugin(settings);
- telemetry = oTelTracerModulePlugin.getTelemetry(
+ oTelTelemetryPlugin = new OTelTelemetryPlugin(settings);
+ telemetry = oTelTelemetryPlugin.getTelemetry(
new TelemetrySettings(Settings.EMPTY, new ClusterSettings(settings, Set.of(TRACER_ENABLED_SETTING, TRACER_SAMPLER_PROBABILITY)))
);
tracingTelemetry = telemetry.get().getTracingTelemetry();
+ metricsTelemetry = telemetry.get().getMetricsTelemetry();
}
public void testGetTelemetry() {
Set> allTracerSettings = new HashSet<>();
ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add));
- assertEquals(OTEL_TRACER_NAME, oTelTracerModulePlugin.getName());
+ assertEquals(OTEL_TRACER_NAME, oTelTelemetryPlugin.getName());
assertTrue(tracingTelemetry instanceof OTelTracingTelemetry);
+ assertTrue(metricsTelemetry instanceof OTelMetricsTelemetry);
assertEquals(
Arrays.asList(
TRACER_EXPORTER_BATCH_SIZE_SETTING,
TRACER_EXPORTER_DELAY_SETTING,
TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING,
- OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING
+ OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING,
+ OTEL_METRICS_EXPORTER_CLASS_SETTING
),
- oTelTracerModulePlugin.getSettings()
+ oTelTelemetryPlugin.getSettings()
);
}
@After
- public void cleanup() {
+ public void cleanup() throws IOException {
tracingTelemetry.close();
+ metricsTelemetry.close();
}
}
diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java
new file mode 100644
index 0000000000000..233c93e6b9a36
--- /dev/null
+++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java
@@ -0,0 +1,107 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics;
+
+import org.opensearch.telemetry.OTelAttributesConverter;
+import org.opensearch.telemetry.OTelTelemetryPlugin;
+import org.opensearch.telemetry.metrics.tags.Tags;
+import org.opensearch.test.OpenSearchTestCase;
+
+import io.opentelemetry.api.metrics.DoubleCounter;
+import io.opentelemetry.api.metrics.DoubleCounterBuilder;
+import io.opentelemetry.api.metrics.DoubleUpDownCounter;
+import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder;
+import io.opentelemetry.api.metrics.LongCounterBuilder;
+import io.opentelemetry.api.metrics.LongUpDownCounterBuilder;
+import io.opentelemetry.api.metrics.Meter;
+import io.opentelemetry.api.metrics.MeterProvider;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class OTelMetricsTelemetryTests extends OpenSearchTestCase {
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public void testCounter() {
+ String counterName = "test-counter";
+ String description = "test";
+ String unit = "1";
+ Meter mockMeter = mock(Meter.class);
+ DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class);
+ LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class);
+ DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class);
+ MeterProvider meterProvider = mock(MeterProvider.class);
+ when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter);
+ MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider);
+ when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder);
+ when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder);
+ when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder);
+ when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder);
+ when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter);
+
+ Counter counter = metricsTelemetry.createCounter(counterName, description, unit);
+ counter.add(1.0);
+ verify(mockOTelDoubleCounter).add(1.0);
+ Tags tags = Tags.create().addTag("test", "test");
+ counter.add(2.0, tags);
+ verify(mockOTelDoubleCounter).add(2.0, OTelAttributesConverter.convert(tags));
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public void testCounterNegativeValue() {
+ String counterName = "test-counter";
+ String description = "test";
+ String unit = "1";
+ Meter mockMeter = mock(Meter.class);
+ DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class);
+ LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class);
+ DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class);
+
+ MeterProvider meterProvider = mock(MeterProvider.class);
+ when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter);
+ MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider);
+ when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder);
+ when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder);
+ when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder);
+ when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder);
+ when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter);
+
+ Counter counter = metricsTelemetry.createCounter(counterName, description, unit);
+ counter.add(-1.0);
+ verify(mockOTelDoubleCounter).add(-1.0);
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public void testUpDownCounter() {
+ String counterName = "test-counter";
+ String description = "test";
+ String unit = "1";
+ Meter mockMeter = mock(Meter.class);
+ DoubleUpDownCounter mockOTelUpDownDoubleCounter = mock(DoubleUpDownCounter.class);
+ LongUpDownCounterBuilder mockOTelLongUpDownCounterBuilder = mock(LongUpDownCounterBuilder.class);
+ DoubleUpDownCounterBuilder mockOTelDoubleUpDownCounterBuilder = mock(DoubleUpDownCounterBuilder.class);
+
+ MeterProvider meterProvider = mock(MeterProvider.class);
+ when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter);
+ MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(meterProvider);
+ when(mockMeter.upDownCounterBuilder(counterName)).thenReturn(mockOTelLongUpDownCounterBuilder);
+ when(mockOTelLongUpDownCounterBuilder.setDescription(description)).thenReturn(mockOTelLongUpDownCounterBuilder);
+ when(mockOTelLongUpDownCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongUpDownCounterBuilder);
+ when(mockOTelLongUpDownCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleUpDownCounterBuilder);
+ when(mockOTelDoubleUpDownCounterBuilder.build()).thenReturn(mockOTelUpDownDoubleCounter);
+
+ Counter counter = metricsTelemetry.createUpDownCounter(counterName, description, unit);
+ counter.add(1.0);
+ verify(mockOTelUpDownDoubleCounter).add(1.0);
+ Tags tags = Tags.create().addTag("test", "test");
+ counter.add(-2.0, tags);
+ verify(mockOTelUpDownDoubleCounter).add((-2.0), OTelAttributesConverter.convert(tags));
+ }
+}
diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java
new file mode 100644
index 0000000000000..65c52911dbef9
--- /dev/null
+++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics.exporter;
+
+import java.util.Collection;
+
+import io.opentelemetry.sdk.common.CompletableResultCode;
+import io.opentelemetry.sdk.metrics.InstrumentType;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+
+public class DummyMetricExporter implements MetricExporter {
+ @Override
+ public CompletableResultCode export(Collection metrics) {
+ return null;
+ }
+
+ @Override
+ public CompletableResultCode flush() {
+ return null;
+ }
+
+ @Override
+ public CompletableResultCode shutdown() {
+ return null;
+ }
+
+ @Override
+ public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) {
+ return null;
+ }
+}
diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java
new file mode 100644
index 0000000000000..e68da030bfb52
--- /dev/null
+++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java
@@ -0,0 +1,78 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics.exporter;
+
+import org.opensearch.common.settings.Settings;
+import org.opensearch.telemetry.OTelTelemetrySettings;
+import org.opensearch.test.OpenSearchTestCase;
+
+import io.opentelemetry.exporter.logging.LoggingMetricExporter;
+import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+
+public class OTelMetricsExporterFactoryTests extends OpenSearchTestCase {
+
+ public void testMetricsExporterDefault() {
+ Settings settings = Settings.builder().build();
+ MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings);
+ assertTrue(metricExporter instanceof LoggingMetricExporter);
+ }
+
+ public void testMetricsExporterLogging() {
+ Settings settings = Settings.builder()
+ .put(
+ OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(),
+ "io.opentelemetry.exporter.logging.LoggingMetricExporter"
+ )
+ .build();
+ MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings);
+ assertTrue(metricExporter instanceof LoggingMetricExporter);
+ }
+
+ public void testMetricExporterInvalid() {
+ Settings settings = Settings.builder().put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "abc").build();
+ assertThrows(IllegalArgumentException.class, () -> OTelMetricsExporterFactory.create(settings));
+ }
+
+ public void testMetricExporterNoCreateFactoryMethod() {
+ Settings settings = Settings.builder()
+ .put(
+ OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(),
+ "org.opensearch.telemetry.metrics.exporter.DummyMetricExporter"
+ )
+ .build();
+ IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings));
+ assertEquals(
+ "MetricExporter instantiation failed for class [org.opensearch.telemetry.metrics.exporter.DummyMetricExporter]",
+ exception.getMessage()
+ );
+ }
+
+ public void testMetricExporterNonMetricExporterClass() {
+ Settings settings = Settings.builder()
+ .put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "java.lang.String")
+ .build();
+ IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings));
+ assertEquals("MetricExporter instantiation failed for class [java.lang.String]", exception.getMessage());
+ assertTrue(exception.getCause() instanceof NoSuchMethodError);
+
+ }
+
+ public void testMetricExporterGetDefaultMethod() {
+ Settings settings = Settings.builder()
+ .put(
+ OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(),
+ "io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter"
+ )
+ .build();
+
+ assertTrue(OTelMetricsExporterFactory.create(settings) instanceof OtlpGrpcMetricExporter);
+ }
+
+}
diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java
index d992daec1b7bb..ee67384d01759 100644
--- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java
+++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java
@@ -8,6 +8,8 @@
package org.opensearch.telemetry.tracing;
+import org.opensearch.telemetry.OTelAttributesConverter;
+import org.opensearch.telemetry.metrics.tags.Tags;
import org.opensearch.telemetry.tracing.attributes.Attributes;
import org.opensearch.test.OpenSearchTestCase;
@@ -19,13 +21,13 @@
public class OTelAttributesConverterTests extends OpenSearchTestCase {
public void testConverterNullAttributes() {
- io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(null);
+ io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert((Attributes) null);
assertEquals(0, otelAttributes.size());
}
public void testConverterEmptyAttributes() {
Attributes attributes = Attributes.EMPTY;
- io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(null);
+ io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(attributes);
assertEquals(0, otelAttributes.size());
}
@@ -47,4 +49,12 @@ public void testConverterMultipleAttributes() {
assertEquals(4, otelAttributes.size());
otelAttributes.asMap().forEach((x, y) -> assertEquals(attributeMap.get(x.getKey()), y));
}
+
+ public void testConverterMultipleTags() {
+ Tags tags = Tags.create().addTag("key1", 1l).addTag("key2", 1.0).addTag("key3", true).addTag("key4", "value4");
+ Map tagsMap = tags.getTagsMap();
+ io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(tags);
+ assertEquals(4, otelAttributes.size());
+ otelAttributes.asMap().forEach((x, y) -> assertEquals(tagsMap.get(x.getKey()), y));
+ }
}
diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java
index 505756318ff62..1a508ed252493 100644
--- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java
+++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java
@@ -8,16 +8,15 @@
package org.opensearch.telemetry.tracing;
+import org.opensearch.telemetry.OTelTelemetryPlugin;
import org.opensearch.telemetry.tracing.attributes.Attributes;
import org.opensearch.test.OpenSearchTestCase;
-import java.util.Collections;
-import java.util.Map;
-
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.api.trace.SpanBuilder;
import io.opentelemetry.api.trace.Tracer;
+import io.opentelemetry.api.trace.TracerProvider;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
@@ -26,29 +25,31 @@
import static org.mockito.Mockito.when;
public class OTelTracingTelemetryTests extends OpenSearchTestCase {
-
+ @SuppressWarnings({ "rawtypes", "unchecked" })
public void testCreateSpanWithoutParent() {
OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class);
Tracer mockTracer = mock(Tracer.class);
- when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer);
+ TracerProvider mockTracerProvider = mock(TracerProvider.class);
+ when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer);
SpanBuilder mockSpanBuilder = mock(SpanBuilder.class);
when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder);
when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder);
when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class));
when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder);
- Map attributeMap = Collections.singletonMap("name", "value");
Attributes attributes = Attributes.create().addAttribute("name", "value");
- TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry);
+ TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider);
Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), null);
verify(mockSpanBuilder, never()).setParent(any());
verify(mockSpanBuilder).setAllAttributes(createAttribute(attributes));
assertNull(span.getParentSpan());
}
+ @SuppressWarnings({ "rawtypes", "unchecked" })
public void testCreateSpanWithParent() {
OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class);
Tracer mockTracer = mock(Tracer.class);
- when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer);
+ TracerProvider mockTracerProvider = mock(TracerProvider.class);
+ when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer);
SpanBuilder mockSpanBuilder = mock(SpanBuilder.class);
when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder);
when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder);
@@ -58,7 +59,7 @@ public void testCreateSpanWithParent() {
Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null);
- TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry);
+ TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider);
Attributes attributes = Attributes.create().addAttribute("name", 1l);
Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan);
@@ -69,10 +70,12 @@ public void testCreateSpanWithParent() {
assertEquals("parent_span", span.getParentSpan().getSpanName());
}
+ @SuppressWarnings({ "rawtypes", "unchecked" })
public void testCreateSpanWithParentWithMultipleAttributes() {
OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class);
Tracer mockTracer = mock(Tracer.class);
- when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer);
+ TracerProvider mockTracerProvider = mock(TracerProvider.class);
+ when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer);
SpanBuilder mockSpanBuilder = mock(SpanBuilder.class);
when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder);
when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder);
@@ -82,7 +85,7 @@ public void testCreateSpanWithParentWithMultipleAttributes() {
Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null);
- TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry);
+ TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider);
Attributes attributes = Attributes.create()
.addAttribute("key1", 1l)
.addAttribute("key2", 2.0)
@@ -115,12 +118,14 @@ private io.opentelemetry.api.common.Attributes createAttributeLong(Attributes at
return attributesBuilder.build();
}
+ @SuppressWarnings({ "rawtypes", "unchecked" })
public void testGetContextPropagator() {
OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class);
Tracer mockTracer = mock(Tracer.class);
- when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer);
+ TracerProvider mockTracerProvider = mock(TracerProvider.class);
+ when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer);
- TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry);
+ TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry, mockTracerProvider);
assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator);
}
diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1
new file mode 100644
index 0000000000000..aaf2e35302d77
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1
@@ -0,0 +1 @@
+39b05d2d4027971bf99111a9be1d7035a116bb55
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1
deleted file mode 100644
index 8430355365996..0000000000000
--- a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1
new file mode 100644
index 0000000000000..a77333ea8ae47
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1
@@ -0,0 +1 @@
+9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1
deleted file mode 100644
index 7a36dc1f2724f..0000000000000
--- a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-384ba4d75670befbedb45c4d3b497a93639c206d
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1
new file mode 100644
index 0000000000000..6f26bf4e6a9b5
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1
@@ -0,0 +1 @@
+992623e7d8f2d96e41faf1687bb963f5433e3517
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1
deleted file mode 100644
index 37b78a32f741f..0000000000000
--- a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-af78acec783ffd77c63d8aeecc21041fd39ac54f
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1
new file mode 100644
index 0000000000000..d2ff72db60d1f
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1
@@ -0,0 +1 @@
+847f942381145de23f21c836d05b0677474271d3
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1
deleted file mode 100644
index 1bdfec3aae6ba..0000000000000
--- a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7cceacaf11df8dc63f23d0fb58e9d4640fc88404
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1
new file mode 100644
index 0000000000000..f12a6046e96d0
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1
@@ -0,0 +1 @@
+4c0acdb8bb73647ebb3847ac2d503d53d72c02b4
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1
deleted file mode 100644
index 8b7b50a6fc9c6..0000000000000
--- a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1
new file mode 100644
index 0000000000000..8e4179ba15942
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1
@@ -0,0 +1 @@
+fe62f9ccd41b8660d07639dbbab8ae1edd6f2720
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1
deleted file mode 100644
index 032959e98d009..0000000000000
--- a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-cec8348108dc76c47cf87c669d514be52c922144
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1
new file mode 100644
index 0000000000000..ab2819da570fd
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1
@@ -0,0 +1 @@
+6620fbfb47667a5eb6050e35c7b4c88000bcd77f
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1
deleted file mode 100644
index 107863c1b3c9d..0000000000000
--- a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f37380d23c9bb079bc702910833b2fd532c9abd0
\ No newline at end of file
diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java
index d25ef33c2ce29..5abd6f2710198 100644
--- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java
+++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java
@@ -257,7 +257,7 @@ public FullHttpRequest nettyRequest() {
/**
* A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications
* and due to the underlying implementation, it performs case insensitive lookups of key to values.
- *
+ *
* It is important to note that this implementation does have some downsides in that each invocation of the
* {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a
* view of the underlying values.
diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java
index dfa72d6d59a0d..55920bab4efd3 100644
--- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java
+++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java
@@ -52,6 +52,7 @@
import org.opensearch.nio.NioSelector;
import org.opensearch.nio.NioSocketChannel;
import org.opensearch.nio.ServerChannelContext;
+import org.opensearch.telemetry.tracing.Tracer;
import org.opensearch.threadpool.ThreadPool;
import org.opensearch.transport.TcpTransport;
import org.opensearch.transport.TransportSettings;
@@ -84,9 +85,10 @@ protected NioTransport(
PageCacheRecycler pageCacheRecycler,
NamedWriteableRegistry namedWriteableRegistry,
CircuitBreakerService circuitBreakerService,
- NioGroupFactory groupFactory
+ NioGroupFactory groupFactory,
+ Tracer tracer
) {
- super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService);
+ super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer);
this.pageAllocator = new PageAllocator(pageCacheRecycler);
this.groupFactory = groupFactory;
}
diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java
index ec266d76eff3d..d4be876867651 100644
--- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java
+++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java
@@ -91,7 +91,8 @@ public Map> getTransports(
PageCacheRecycler pageCacheRecycler,
CircuitBreakerService circuitBreakerService,
NamedWriteableRegistry namedWriteableRegistry,
- NetworkService networkService
+ NetworkService networkService,
+ Tracer tracer
) {
return Collections.singletonMap(
NIO_TRANSPORT_NAME,
@@ -103,7 +104,8 @@ public Map> getTransports(
pageCacheRecycler,
namedWriteableRegistry,
circuitBreakerService,
- getNioGroupFactory(settings)
+ getNioGroupFactory(settings),
+ tracer
)
);
}
diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java
index 24cc38c17a9d1..f5d1c618f5ace 100644
--- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java
+++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java
@@ -44,6 +44,7 @@
import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
import org.opensearch.core.common.transport.TransportAddress;
import org.opensearch.core.indices.breaker.NoneCircuitBreakerService;
+import org.opensearch.telemetry.tracing.noop.NoopTracer;
import org.opensearch.test.transport.MockTransportService;
import org.opensearch.test.transport.StubbableTransport;
import org.opensearch.transport.AbstractSimpleTransportTestCase;
@@ -81,7 +82,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti
new MockPageCacheRecycler(settings),
namedWriteableRegistry,
new NoneCircuitBreakerService(),
- new NioGroupFactory(settings, logger)
+ new NioGroupFactory(settings, logger),
+ NoopTracer.INSTANCE
) {
@Override
diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java
index aabc3aee8887f..2675e9b62de35 100644
--- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java
+++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java
@@ -78,7 +78,7 @@
* PercolatorFieldMapper#createQueryBuilderField(...) method). Using the query builders writable contract. This test
* does best effort verifying that we don't break bwc for query builders between the first previous major version and
* the latest current major release.
- *
+ *
* The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the
* json format of a query being tested here then feel free to change this.
*/
diff --git a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java
index c3c332aecfd4c..8ca90791f649e 100644
--- a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java
+++ b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java
@@ -65,7 +65,7 @@
/**
* Create a simple "daemon controller", put it in the right place and check that it runs.
- *
+ *
* Extends LuceneTestCase rather than OpenSearchTestCase as OpenSearchTestCase installs a system call filter, and
* that prevents the Spawner class from doing its job. Also needs to run in a separate JVM to other
* tests that extend OpenSearchTestCase for the same reason.
diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java
index 02a613be320c2..4bb3877fc04a8 100644
--- a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java
+++ b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java
@@ -441,7 +441,7 @@ public static Path createTempDir(String prefix) throws IOException {
/**
* Run the given action with a temporary copy of the config directory.
- *
+ *
* Files under the path passed to the action may be modified as necessary for the
* test to execute, and running OpenSearch with {@link #startOpenSearch()} will
* use the temporary directory.
diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java
index 7904d1a046916..958de24848178 100644
--- a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java
+++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java
@@ -51,7 +51,7 @@
/**
* Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission
* set is what we expect.
- *
+ *
* This class saves information about its failed matches in instance variables and so instances should not be reused
*/
public class FileMatcher extends TypeSafeMatcher {
diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java
index 25cefa948ff10..26af39d66cad3 100644
--- a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java
+++ b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java
@@ -137,7 +137,7 @@ public static Installation ofContainer(Shell sh, Distribution distribution) {
/**
* Returns the user that owns this installation.
- *
+ *
* For packages this is root, and for archives it is the user doing the installation.
*/
public String getOwner() {
diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java
index b80ae422bda9a..e9ebf28042b46 100644
--- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java
+++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java
@@ -194,11 +194,11 @@ private static void verifyInstallation(Installation opensearch, Distribution dis
// we shell out here because java's posix file permission view doesn't support special modes
assertThat(opensearch.config, file(Directory, "root", "opensearch", p750));
- assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750"));
+ assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750"));
final Path jvmOptionsDirectory = opensearch.config.resolve("jvm.options.d");
assertThat(jvmOptionsDirectory, file(Directory, "root", "opensearch", p750));
- assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750"));
+ assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750"));
Stream.of("opensearch.keystore", "opensearch.yml", "jvm.options", "log4j2.properties")
.forEach(configFile -> assertThat(opensearch.config(configFile), file(File, "root", "opensearch", p660)));
diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java
index aef363058b394..f963f8d221bb5 100644
--- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java
+++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java
@@ -98,11 +98,11 @@ private void waitForSearchableDocs(String index, int shardCount, int replicaCoun
// Verify segment store
assertBusy(() -> {
- /**
- * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by
- * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging
- * to primary while remaining *replicaCount* records belongs to replica copies
- * */
+ /*
+ Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by
+ line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging
+ to primary while remaining *replicaCount* records belongs to replica copies
+ */
Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica");
segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count");
Response segrepStatsResponse = client().performRequest(segrepStatsRequest);
@@ -259,7 +259,8 @@ public void testIndexing() throws Exception {
* This test verifies that during rolling upgrades the segment replication does not break when replica shards can
* be running on older codec versions.
*
- * @throws Exception exception
+ * @throws Exception if index creation fail
+ * @throws UnsupportedOperationException if cluster type is unknown
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679")
public void testIndexingWithSegRep() throws Exception {
diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md
new file mode 100644
index 0000000000000..d7e9182f2a656
--- /dev/null
+++ b/release-notes/opensearch.release-notes-2.11.0.md
@@ -0,0 +1,75 @@
+## 2023-10-12 Version 2.11.0 Release Notes
+
+## [2.11]
+
+### Added
+- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386))
+- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681))
+- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694))
+- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666))
+- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131))
+- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189))
+- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562))
+- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204))
+- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839))
+- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261))
+
+### Dependencies
+- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575))
+- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968))
+- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950))
+- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972))
+- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971))
+- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022))
+- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022))
+- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098))
+- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125))
+- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752))
+- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126))
+- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299))
+- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298))
+- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209))
+- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
+- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
+- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
+- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276))
+- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295))
+- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302))
+- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303))
+- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301))
+- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210))
+- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297))
+- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506))
+- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508))
+- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294))
+- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564))
+
+### Changed
+- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415))
+- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916))
+- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840))
+- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036))
+- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042))
+- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122))
+- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246))
+- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200))
+- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143))
+- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356))
+- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569))
+- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241))
+- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379))
+- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562))
+
+### Removed
+- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447))
+
+### Fixed
+- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725))
+- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045))
+- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082))
+- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089))
+- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101))
+- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194))
+- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256))
+- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370))
+- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496))
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
index 3f79227ce64e8..784c7b52b18b4 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
@@ -141,8 +141,8 @@
---
"Metric - indexing doc_status":
- skip:
- version: " - 2.99.99"
- reason: "To be introduced in future release :: TODO: change if/when we backport to 2.x"
+ version: " - 2.10.99"
+ reason: "Doc Status Stats were introduced in v2.11.0"
- do:
nodes.info: {}
- set:
diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index 4ac89f2e792d7..0000000000000
--- a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8c82be3d997d781bb72d6d0eadade064dd2cd6db
\ No newline at end of file
diff --git a/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..6ad304fa52c12
--- /dev/null
+++ b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1
@@ -0,0 +1 @@
+36f0363325ca7bf62c180160d1ed5165c7c37795
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index 624b5174a444f..0000000000000
--- a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4c261d17c681c0d91171c67e192abfef59adea2e
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..f104c4207d390
--- /dev/null
+++ b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1
@@ -0,0 +1 @@
+e98fb408028f40170e6d87c16422bfdc0bb2e392
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index 70baf1270cd5d..0000000000000
--- a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d2f7fbc5b2c49ca777a169d579f41082a9a57cc7
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.8.0.jar.sha1 b/server/licenses/lucene-core-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..f9a3e2f3cbee6
--- /dev/null
+++ b/server/licenses/lucene-core-9.8.0.jar.sha1
@@ -0,0 +1 @@
+5e8421c5f8573bcf22e9265fc7e19469545a775a
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index 20ddb9ae3ef27..0000000000000
--- a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8d1cf3d6db43fad6630376ba59451f848f4d387c
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.8.0.jar.sha1 b/server/licenses/lucene-grouping-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..ab132121b2edc
--- /dev/null
+++ b/server/licenses/lucene-grouping-9.8.0.jar.sha1
@@ -0,0 +1 @@
+d39184518351178c404ed9669fc6cb6111f2288d
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index c3ad03ca53b13..0000000000000
--- a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-83ab97638bb5269f950d75bba5675d3cfb63f2fa
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.8.0.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..c7cb678fb7b72
--- /dev/null
+++ b/server/licenses/lucene-highlighter-9.8.0.jar.sha1
@@ -0,0 +1 @@
+1ac38c8278dbd63dfab30744a41dd955a415a31c
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index c2a4c5334b314..0000000000000
--- a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-97c26362151908dc892263edda3872abbacb71a8
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.8.0.jar.sha1 b/server/licenses/lucene-join-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..2b6cb8af4faf6
--- /dev/null
+++ b/server/licenses/lucene-join-9.8.0.jar.sha1
@@ -0,0 +1 @@
+3d64fc57bb6e718d906413a9f73c713e6d4d8bb0
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index 32534d07e47dc..0000000000000
--- a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8337eddc0dddd0d7dd50c5aa0d17e5e31592f9fa
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.8.0.jar.sha1 b/server/licenses/lucene-memory-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..5fdfee401dd0a
--- /dev/null
+++ b/server/licenses/lucene-memory-9.8.0.jar.sha1
@@ -0,0 +1 @@
+5283ac71d6ccecb5e00c7b52df2faec012f2625a
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index 7db245cc521c7..0000000000000
--- a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a2e3fae930295f0e2b401effe04eafc25692a414
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.8.0.jar.sha1 b/server/licenses/lucene-misc-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..cf815cba15862
--- /dev/null
+++ b/server/licenses/lucene-misc-9.8.0.jar.sha1
@@ -0,0 +1 @@
+9a57b049cf51a5e9c9c1909c420f645f1b6f9a54
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index d01a6d733196e..0000000000000
--- a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e88d8a464e6cfa345b946c9c8822ba7ee2a9159f
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.8.0.jar.sha1 b/server/licenses/lucene-queries-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..09f369ef18e12
--- /dev/null
+++ b/server/licenses/lucene-queries-9.8.0.jar.sha1
@@ -0,0 +1 @@
+628db4ef46f1c6a05145bdac1d1bc4ace6341b13
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index c7b9640bad170..0000000000000
--- a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9905790675c01e8dc24f9a5e6b9b28b879c65a52
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.8.0.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..2a42a8956b18b
--- /dev/null
+++ b/server/licenses/lucene-queryparser-9.8.0.jar.sha1
@@ -0,0 +1 @@
+982faf2bfa55542bf57fbadef54c19ac00f57cae
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index c4cd9e47624f8..0000000000000
--- a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d6c8be427ec8ffc7e8233ffbf0d190d95a56cf14
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.8.0.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..64a0b07f72d29
--- /dev/null
+++ b/server/licenses/lucene-sandbox-9.8.0.jar.sha1
@@ -0,0 +1 @@
+06493dbd14d02537716822254866a94458f4d842
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index dfee145d3ea26..0000000000000
--- a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-11716d61288feaa692593bf699affa8de2b564c4
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..d1bcb0581435c
--- /dev/null
+++ b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1
@@ -0,0 +1 @@
+9d9a731822ad6eefa1ba288a0c158d478522f165
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index c7410086ba86c..0000000000000
--- a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3a888e06c0535403b9e58a8dcddeb5e6513a4930
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..d17459cc569a9
--- /dev/null
+++ b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1
@@ -0,0 +1 @@
+ce752a52b2d4eac90633c7df7982e29504f99e76
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1
deleted file mode 100644
index 6d8d4205f4d02..0000000000000
--- a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-52dfc8bf135ed29f5baf0a967c1bb63dedb9a069
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.8.0.jar.sha1 b/server/licenses/lucene-suggest-9.8.0.jar.sha1
new file mode 100644
index 0000000000000..ff47b87672d2c
--- /dev/null
+++ b/server/licenses/lucene-suggest-9.8.0.jar.sha1
@@ -0,0 +1 @@
+f977f96f2093b7fddea6b67caa2e1c5b10edebf6
\ No newline at end of file
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java
index 6343bd127c458..4c9f49df71257 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java
@@ -60,8 +60,8 @@
public class HotThreadsIT extends OpenSearchIntegTestCase {
public void testHotThreadsDontFail() throws ExecutionException, InterruptedException {
- /**
- * This test just checks if nothing crashes or gets stuck etc.
+ /*
+ This test just checks if nothing crashes or gets stuck etc.
*/
createIndex("test");
final int iters = scaledRandomIntBetween(2, 20);
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java
index 0197ccf059737..44ba585016d8e 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java
@@ -112,7 +112,7 @@ protected int numberOfEvents(String actionMasks, Function findEvents(String actionMasks, Function, Boolean> criteria) {
List events = new ArrayList<>();
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java
index c733329a1b5f7..e6fd9139d45f2 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java
@@ -30,7 +30,7 @@
/**
* Integration tests for task management API with Concurrent Segment Search
- *
+ *
* The way the test framework bootstraps the test cluster makes it difficult to parameterize the feature flag.
* Once concurrent search is moved behind a cluster setting we can parameterize these tests behind the setting.
*/
@@ -72,7 +72,7 @@ protected Settings featureFlagSettings() {
/**
* Tests the number of threads that worked on a search task.
- *
+ *
* Currently, we try to control concurrency by creating an index with 7 segments and rely on
* the way concurrent search creates leaf slices from segments. Once more concurrency controls are introduced
* we should improve this test to use those methods.
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
index aff7c5d9876ac..36fe3748e9d10 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
@@ -46,7 +46,7 @@
/**
* This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only.
- *
+ *
* The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only".
*/
@ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
index 347011721c728..78fb01b07b6b1 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
@@ -53,7 +53,7 @@
/**
* This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only.
- *
+ *
* The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only".
*/
@ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java
index 737c0acc309fd..cd6cb0ca3b172 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java
@@ -194,7 +194,7 @@ private static void indexDocs(BulkProcessor processor, int numDocs) {
/**
* Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number
* of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load).
- *
+ *
* This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread
* as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code.
*/
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java
index c62c61d5919d6..aefabcb9bc14f 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java
@@ -69,7 +69,7 @@
/**
* The purpose of this test is to verify that when a processor executes an operation asynchronously that
* the expected result is the same as if the same operation happens synchronously.
- *
+ *
* In this test two test processor are defined that basically do the same operation, but a single processor
* executes asynchronously. The result of the operation should be the same and also the order in which the
* bulk responses are returned should be the same as how the corresponding index requests were defined.
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java
index f0a3b5a5901ce..b1934f901ac65 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java
@@ -109,8 +109,8 @@ public List getAggregations() {
@Override
public List getFetchSubPhases(FetchPhaseConstructionContext context) {
- /**
- * Set up a fetch sub phase that throws an exception on indices whose name that start with "boom".
+ /*
+ Set up a fetch sub phase that throws an exception on indices whose name that start with "boom".
*/
return Collections.singletonList(fetchContext -> new FetchSubPhaseProcessor() {
@Override
diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java
index 4c8bf24b1655a..84648eda3d38c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java
@@ -317,8 +317,8 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception {
);
Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0));
Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1));
- internalCluster().stopRandomNonClusterManagerNode();
- internalCluster().stopRandomNonClusterManagerNode();
+ internalCluster().stopRandomNodeNotCurrentClusterManager();
+ internalCluster().stopRandomNodeNotCurrentClusterManager();
logger.info("--> verify that there is no cluster-manager anymore on remaining node");
// spin here to wait till the state is set
diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java
index 38b86d307d197..737b272613a44 100644
--- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java
@@ -461,7 +461,7 @@ public boolean validateClusterForming() {
/**
* Tests that indices are properly deleted even if there is a cluster-manager transition in between.
- * Test for https://github.com/elastic/elasticsearch/issues/11665
+ * Test for Elasticsearch issue #11665
*/
public void testIndicesDeleted() throws Exception {
final String idxName = "test";
diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java
index a2864b6dfd1da..70124c8c46700 100644
--- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java
@@ -136,7 +136,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception {
// shutting down the nodes, to avoid the leakage check tripping
// on the states associated with the commit requests we may have dropped
- internalCluster().stopRandomNonClusterManagerNode();
+ internalCluster().stopRandomNodeNotCurrentClusterManager();
}
public void testClusterFormingWithASlowNode() {
diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java
index 2bab61f3e1c4c..229cd7bffad2f 100644
--- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java
@@ -53,7 +53,7 @@
import org.opensearch.env.NodeEnvironment;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
-import org.opensearch.index.MergePolicyConfig;
+import org.opensearch.index.MergePolicyProvider;
import org.opensearch.index.engine.Engine;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.index.shard.ShardPath;
@@ -519,7 +519,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception {
.put("number_of_replicas", 1)
// disable merges to keep segments the same
- .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
+ .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false)
// expire retention leases quickly
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms")
diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java
index bb08b19df765b..c394a1f631690 100644
--- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java
@@ -650,7 +650,15 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul
}
}
};
- final IndexShard newShard = newIndexShard(indexService, shard, wrapper, getInstanceFromNode(CircuitBreakerService.class), listener);
+ NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
+ final IndexShard newShard = newIndexShard(
+ indexService,
+ shard,
+ wrapper,
+ getInstanceFromNode(CircuitBreakerService.class),
+ env.nodeId(),
+ listener
+ );
shardRef.set(newShard);
recoverShard(newShard);
@@ -674,6 +682,7 @@ public static final IndexShard newIndexShard(
final IndexShard shard,
CheckedFunction wrapper,
final CircuitBreakerService cbs,
+ final String nodeId,
final IndexingOperationListener... listeners
) throws IOException {
ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
@@ -702,7 +711,9 @@ public static final IndexShard newIndexShard(
SegmentReplicationCheckpointPublisher.EMPTY,
null,
null,
- () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL
+ () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL,
+ nodeId,
+ null
);
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java
index f8c2acbf99f70..b431079476624 100644
--- a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java
@@ -73,7 +73,7 @@
import org.opensearch.env.TestEnvironment;
import org.opensearch.gateway.GatewayMetaState;
import org.opensearch.index.IndexSettings;
-import org.opensearch.index.MergePolicyConfig;
+import org.opensearch.index.MergePolicyProvider;
import org.opensearch.index.MockEngineFactoryPlugin;
import org.opensearch.index.seqno.SeqNoStats;
import org.opensearch.index.translog.TestTranslog;
@@ -135,7 +135,7 @@ public void testCorruptIndex() throws Exception {
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
- .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
+ .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false)
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1")
.put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true)
.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum")
diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java
index 7e1d0792e3ddb..8291fef5d177b 100644
--- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java
@@ -72,7 +72,7 @@
import org.opensearch.core.index.shard.ShardId;
import org.opensearch.env.NodeEnvironment;
import org.opensearch.index.IndexSettings;
-import org.opensearch.index.MergePolicyConfig;
+import org.opensearch.index.MergePolicyProvider;
import org.opensearch.index.shard.IndexEventListener;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.IndexShardState;
@@ -167,7 +167,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1")
- .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
+ .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false)
// no checkindex - we corrupt shards on purpose
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
// no translog based flush - it might change the .liv / segments.N files
@@ -286,7 +286,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted
prepareCreate("test").setSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0")
- .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
+ .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on
// purpose
// no translog based flush - it might change the .liv / segments.N files
@@ -552,7 +552,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I
prepareCreate("test").setSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test
- .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
+ .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false)
// no checkindex - we corrupt shards on purpose
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
// no translog based flush - it might change the .liv / segments.N files
@@ -624,7 +624,7 @@ public void testReplicaCorruption() throws Exception {
prepareCreate("test").setSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1)
- .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
+ .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on
// purpose
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java
index bdefd7a5e199a..f485d4e402b41 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java
@@ -91,7 +91,7 @@ public void testGlobalPrimaryAllocation() throws Exception {
/**
* This test verifies the happy path where primary shard allocation is balanced when multiple indices are created.
- *
+ *
* This test in general passes without primary shard balance as well due to nature of allocation algorithm which
* assigns all primary shards first followed by replica copies.
*/
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java
index 8e68a8bde39d5..1d93eecd6b245 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java
@@ -197,9 +197,10 @@ protected IndexShard getIndexShard(String node, ShardId shardId, String indexNam
protected IndexShard getIndexShard(String node, String indexName) {
final Index index = resolveIndex(indexName);
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
- IndexService indexService = indicesService.indexServiceSafe(index);
+ IndexService indexService = indicesService.indexService(index);
+ assertNotNull(indexService);
final Optional shardId = indexService.shardIds().stream().findFirst();
- return indexService.getShard(shardId.get());
+ return shardId.map(indexService::getShard).orElse(null);
}
protected boolean segmentReplicationWithRemoteEnabled() {
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
index 33bc5a8f3afe6..81556cc270151 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
@@ -24,6 +24,7 @@
import org.apache.lucene.util.BytesRef;
import org.opensearch.action.admin.indices.alias.Alias;
import org.opensearch.action.admin.indices.flush.FlushRequest;
+import org.opensearch.action.admin.indices.recovery.RecoveryResponse;
import org.opensearch.action.admin.indices.stats.IndicesStatsRequest;
import org.opensearch.action.admin.indices.stats.IndicesStatsResponse;
import org.opensearch.action.get.GetResponse;
@@ -58,6 +59,7 @@
import org.opensearch.common.lucene.index.OpenSearchDirectoryReader;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.core.common.bytes.BytesArray;
import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
import org.opensearch.core.index.shard.ShardId;
import org.opensearch.core.xcontent.XContentBuilder;
@@ -71,6 +73,7 @@
import org.opensearch.index.engine.NRTReplicationReaderManager;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.indices.recovery.FileChunkRequest;
+import org.opensearch.indices.recovery.RecoveryState;
import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint;
import org.opensearch.indices.replication.common.ReplicationType;
import org.opensearch.node.NodeClosedException;
@@ -82,6 +85,7 @@
import org.opensearch.test.InternalTestCluster;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.transport.MockTransportService;
+import org.opensearch.transport.TransportRequest;
import org.opensearch.transport.TransportService;
import org.junit.Before;
@@ -94,6 +98,7 @@
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import static java.util.Arrays.asList;
@@ -1777,4 +1782,134 @@ public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException {
}
+ public void testSendCorruptBytesToReplica() throws Exception {
+ // this test stubs transport calls specific to node-node replication.
+ assumeFalse(
+ "Skipping the test as its not compatible with segment replication with remote store.",
+ segmentReplicationWithRemoteEnabled()
+ );
+ final String primaryNode = internalCluster().startDataOnlyNode();
+ createIndex(
+ INDEX_NAME,
+ Settings.builder()
+ .put(indexSettings())
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("index.refresh_interval", -1)
+ .build()
+ );
+ ensureYellow(INDEX_NAME);
+ final String replicaNode = internalCluster().startDataOnlyNode();
+ ensureGreen(INDEX_NAME);
+
+ MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance(
+ TransportService.class,
+ primaryNode
+ ));
+ CountDownLatch latch = new CountDownLatch(1);
+ AtomicBoolean failed = new AtomicBoolean(false);
+ primaryTransportService.addSendBehavior(
+ internalCluster().getInstance(TransportService.class, replicaNode),
+ (connection, requestId, action, request, options) -> {
+ if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) {
+ FileChunkRequest req = (FileChunkRequest) request;
+ logger.info("SENDING CORRUPT file chunk [{}] lastChunk: {}", req, req.lastChunk());
+ TransportRequest corrupt = new FileChunkRequest(
+ req.recoveryId(),
+ ((FileChunkRequest) request).requestSeqNo(),
+ ((FileChunkRequest) request).shardId(),
+ ((FileChunkRequest) request).metadata(),
+ ((FileChunkRequest) request).position(),
+ new BytesArray("test"),
+ false,
+ 0,
+ 0L
+ );
+ connection.sendRequest(requestId, action, corrupt, options);
+ latch.countDown();
+ } else {
+ connection.sendRequest(requestId, action, request, options);
+ }
+ }
+ );
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex(INDEX_NAME)
+ .setId(String.valueOf(i))
+ .setSource(jsonBuilder().startObject().field("field", i).endObject())
+ .get();
+ }
+ final long originalRecoveryTime = getRecoveryStopTime(replicaNode);
+ assertNotEquals(originalRecoveryTime, 0);
+ refresh(INDEX_NAME);
+ latch.await();
+ assertTrue(failed.get());
+ waitForNewPeerRecovery(replicaNode, originalRecoveryTime);
+ // reset checkIndex to ensure our original shard doesn't throw
+ resetCheckIndexStatus();
+ waitForSearchableDocs(100, primaryNode, replicaNode);
+ }
+
+ public void testWipeSegmentBetweenSyncs() throws Exception {
+ internalCluster().startClusterManagerOnlyNode();
+ final String primaryNode = internalCluster().startDataOnlyNode();
+ createIndex(
+ INDEX_NAME,
+ Settings.builder()
+ .put(indexSettings())
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("index.refresh_interval", -1)
+ .build()
+ );
+ ensureYellow(INDEX_NAME);
+ final String replicaNode = internalCluster().startDataOnlyNode();
+ ensureGreen(INDEX_NAME);
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex(INDEX_NAME)
+ .setId(String.valueOf(i))
+ .setSource(jsonBuilder().startObject().field("field", i).endObject())
+ .get();
+ }
+ refresh(INDEX_NAME);
+ ensureGreen(INDEX_NAME);
+ final long originalRecoveryTime = getRecoveryStopTime(replicaNode);
+
+ final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME);
+ waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode));
+ indexShard.store().directory().deleteFile("_0.si");
+
+ for (int i = 11; i < 21; i++) {
+ client().prepareIndex(INDEX_NAME)
+ .setId(String.valueOf(i))
+ .setSource(jsonBuilder().startObject().field("field", i).endObject())
+ .get();
+ }
+ refresh(INDEX_NAME);
+ waitForNewPeerRecovery(replicaNode, originalRecoveryTime);
+ resetCheckIndexStatus();
+ waitForSearchableDocs(20, primaryNode, replicaNode);
+ }
+
+ private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception {
+ assertBusy(() -> {
+ // assert we have a peer recovery after the original
+ final long time = getRecoveryStopTime(replicaNode);
+ assertNotEquals(time, 0);
+ assertNotEquals(originalRecoveryTime, time);
+
+ }, 1, TimeUnit.MINUTES);
+ }
+
+ private long getRecoveryStopTime(String nodeName) {
+ final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get();
+ final List recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME);
+ logger.info("Recovery states {}", recoveryResponse);
+ for (RecoveryState recoveryState : recoveryStates) {
+ if (recoveryState.getTargetNode().getName().equals(nodeName)) {
+ return recoveryState.getTimer().stopTime();
+ }
+ }
+ return 0L;
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java
index ae88dd76d54e0..547f9e7a8d380 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java
@@ -509,7 +509,7 @@ public Settings onNodeStopped(String nodeName) throws Exception {
}
/**
- * Test for https://github.com/elastic/elasticsearch/issues/47276 which checks that the persisted metadata on a data node does not
+ * Test for Elasticsearch issue #47276 which checks that the persisted metadata on a data node does not
* become inconsistent when using replicated closed indices.
*/
public void testRelocatedClosedIndexIssue() throws Exception {
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java
index a0f01acd1f8e9..0967acb37d3e8 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java
@@ -66,8 +66,8 @@
import org.opensearch.index.IndexModule;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
-import org.opensearch.index.MergePolicyConfig;
import org.opensearch.index.MergeSchedulerConfig;
+import org.opensearch.index.TieredMergePolicyProvider;
import org.opensearch.index.VersionType;
import org.opensearch.index.cache.query.QueryCacheStats;
import org.opensearch.index.engine.VersionConflictEngineException;
@@ -589,8 +589,8 @@ public void testNonThrottleStats() throws Exception {
prepareCreate("test").setSettings(
settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0")
- .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
- .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000")
)
@@ -621,8 +621,8 @@ public void testThrottleStats() throws Exception {
prepareCreate("test").setSettings(
settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0")
- .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
- .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1")
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name())
diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java
index f636185fd4649..d28df90216beb 100644
--- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java
@@ -198,11 +198,11 @@ public void testNoRebalanceOnRollingRestart() throws Exception {
// see https://github.com/elastic/elasticsearch/issues/14387
internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(3);
- /**
- * We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations.
- * Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject
- * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated.
- * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen.
+ /*
+ We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations.
+ Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject
+ to relocating to the restarting node since all had 2 shards and now one node has nothing allocated.
+ We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen.
*/
prepareCreate("test").setSettings(
Settings.builder()
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java
index bc55f6cc2cbcb..2053800504c89 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java
@@ -33,7 +33,6 @@
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT;
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY;
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends AbstractSnapshotIntegTestCase {
@@ -107,11 +106,11 @@ public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailu
.build();
}
- protected void deleteRepo() {
- logger.info("--> Deleting the repository={}", REPOSITORY_NAME);
- assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME));
- logger.info("--> Deleting the repository={}", TRANSLOG_REPOSITORY_NAME);
- assertAcked(clusterAdmin().prepareDeleteRepository(TRANSLOG_REPOSITORY_NAME));
+ protected void cleanupRepo() {
+ logger.info("--> Cleanup the repository={}", REPOSITORY_NAME);
+ clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).execute().actionGet();
+ logger.info("--> Cleanup the repository={}", TRANSLOG_REPOSITORY_NAME);
+ clusterAdmin().prepareCleanupRepository(TRANSLOG_REPOSITORY_NAME).execute().actionGet();
}
protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) {
@@ -125,6 +124,8 @@ protected String setup(Path repoLocation, double ioFailureRate, String skipExcep
settings.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT);
}
+ disableRepoConsistencyCheck("Remote Store Creates System Repository");
+
internalCluster().startClusterManagerOnlyNode(settings.build());
String dataNodeName = internalCluster().startDataOnlyNode(settings.build());
createIndex(INDEX_NAME);
@@ -159,7 +160,7 @@ private String getLocalSegmentFilename(String remoteFilename) {
return remoteFilename.split(RemoteSegmentStoreDirectory.SEGMENT_NAME_UUID_SEPARATOR)[0];
}
- private IndexResponse indexSingleDoc() {
+ protected IndexResponse indexSingleDoc() {
return client().prepareIndex(INDEX_NAME)
.setId(UUIDs.randomBase64UUID())
.setSource(randomAlphaOfLength(5), randomAlphaOfLength(5))
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java
index 4eb1cc7703735..c957f1b338bfe 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java
@@ -23,7 +23,6 @@
import java.nio.file.Path;
import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteIndexRecoveryIT extends IndexRecoveryIT {
@@ -57,7 +56,7 @@ public Settings indexSettings() {
@After
public void teardown() {
- assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME));
+ clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get();
}
@Override
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java
index 4ebccb9b9e551..865b2d13f189e 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java
@@ -57,7 +57,7 @@ public void setup() {
@After
public void teardown() {
- assertAcked(clusterAdmin().prepareDeleteRepository(BASE_REMOTE_REPO));
+ clusterAdmin().prepareCleanupRepository(BASE_REMOTE_REPO).get();
}
@Override
@@ -422,7 +422,7 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException
assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2);
}
- public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException {
+ public void testRestoreShallowSnapshotRepository() throws ExecutionException, InterruptedException {
String indexName1 = "testindex1";
String snapshotRepoName = "test-restore-snapshot-repo";
String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX;
@@ -464,22 +464,7 @@ public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionExce
assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards()));
assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS));
- createRepository(BASE_REMOTE_REPO, "fs", absolutePath2);
-
- RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
- .cluster()
- .prepareRestoreSnapshot(snapshotRepoName, snapshotName1)
- .setWaitForCompletion(true)
- .setIndices(indexName1)
- .setRenamePattern(indexName1)
- .setRenameReplacement(restoredIndexName1)
- .get();
-
- assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0);
-
- ensureRed(restoredIndexName1);
-
- client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get();
+ client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get();
createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath);
RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin()
.cluster()
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java
similarity index 58%
rename from server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java
rename to server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java
index d02c5bf54fbed..98586b60dcc69 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java
@@ -11,13 +11,20 @@
import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats;
import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse;
import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.unit.TimeValue;
+import org.opensearch.common.util.concurrent.AbstractAsyncTask;
+import org.opensearch.common.util.concurrent.UncategorizedExecutionException;
import org.opensearch.core.common.bytes.BytesArray;
import org.opensearch.core.common.bytes.BytesReference;
import org.opensearch.core.common.unit.ByteSizeUnit;
import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException;
import org.opensearch.core.xcontent.MediaTypeRegistry;
+import org.opensearch.index.IndexService;
import org.opensearch.index.remote.RemoteSegmentTransferTracker;
+import org.opensearch.index.shard.IndexShard;
+import org.opensearch.indices.IndicesService;
import org.opensearch.repositories.RepositoriesService;
import org.opensearch.snapshots.mockstore.MockRepository;
import org.opensearch.test.OpenSearchIntegTestCase;
@@ -33,7 +40,7 @@
import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
-public class RemoteStoreBackpressureIT extends AbstractRemoteStoreMockRepositoryIntegTestCase {
+public class RemoteStoreBackpressureAndResiliencyIT extends AbstractRemoteStoreMockRepositoryIntegTestCase {
public void testWritesRejectedDueToConsecutiveFailureBreach() throws Exception {
// Here the doc size of the request remains same throughout the test. After initial indexing, all remote store interactions
// fail leading to consecutive failure limit getting exceeded and leading to rejections.
@@ -112,7 +119,7 @@ private void validateBackpressure(
stats = stats();
indexDocAndRefresh(initialSource, initialDocsToIndex);
assertEquals(rejectionCount, stats.rejectionCount);
- deleteRepo();
+ cleanupRepo();
}
private RemoteSegmentTransferTracker.Stats stats() {
@@ -156,4 +163,98 @@ private String generateString(int sizeInBytes) {
sb.append("}");
return sb.toString();
}
+
+ /**
+ * Fixes Github#10398
+ */
+ public void testAsyncTrimTaskSucceeds() {
+ Path location = randomRepoPath().toAbsolutePath();
+ String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE);
+
+ logger.info("Increasing the frequency of async trim task to ensure it runs in background while indexing");
+ IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next();
+ ((AbstractAsyncTask) indexService.getTrimTranslogTask()).setInterval(TimeValue.timeValueMillis(100));
+
+ logger.info("--> Indexing data");
+ indexData(randomIntBetween(2, 5), true);
+ logger.info("--> Indexing succeeded");
+
+ MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName)
+ .repository(TRANSLOG_REPOSITORY_NAME);
+ logger.info("--> Failing all remote store interaction");
+ translogRepo.setRandomControlIOExceptionRate(1d);
+
+ for (int i = 0; i < randomIntBetween(5, 10); i++) {
+ UncategorizedExecutionException exception = assertThrows(UncategorizedExecutionException.class, this::indexSingleDoc);
+ assertEquals("Failed execution", exception.getMessage());
+ }
+
+ translogRepo.setRandomControlIOExceptionRate(0d);
+ indexSingleDoc();
+ logger.info("Indexed single doc successfully");
+ }
+
+ /**
+ * Fixes Github#10400
+ */
+ public void testSkipLoadGlobalCheckpointToReplicationTracker() {
+ Path location = randomRepoPath().toAbsolutePath();
+ String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE);
+
+ logger.info("--> Indexing data");
+ indexData(randomIntBetween(1, 2), true);
+ logger.info("--> Indexing succeeded");
+
+ IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next();
+ IndexShard indexShard = indexService.getShard(0);
+ indexShard.failShard("failing shard", null);
+
+ ensureRed(INDEX_NAME);
+
+ MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName)
+ .repository(TRANSLOG_REPOSITORY_NAME);
+ logger.info("--> Failing all remote store interaction");
+ translogRepo.setRandomControlIOExceptionRate(1d);
+ client().admin().cluster().prepareReroute().setRetryFailed(true).get();
+ // CLuster stays red still as the remote interactions are still failing
+ ensureRed(INDEX_NAME);
+
+ logger.info("Retrying to allocate failed shards");
+ client().admin().cluster().prepareReroute().setRetryFailed(true).get();
+ // CLuster stays red still as the remote interactions are still failing
+ ensureRed(INDEX_NAME);
+
+ logger.info("Stop failing all remote store interactions");
+ translogRepo.setRandomControlIOExceptionRate(0d);
+ client().admin().cluster().prepareReroute().setRetryFailed(true).get();
+ ensureGreen(INDEX_NAME);
+ }
+
+ public void testFlushDuringRemoteUploadFailures() {
+ Path location = randomRepoPath().toAbsolutePath();
+ String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE);
+
+ logger.info("--> Indexing data");
+ indexData(randomIntBetween(1, 2), true);
+ logger.info("--> Indexing succeeded");
+ ensureGreen(INDEX_NAME);
+
+ MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName)
+ .repository(TRANSLOG_REPOSITORY_NAME);
+ logger.info("--> Failing all remote store interaction");
+ translogRepo.setRandomControlIOExceptionRate(1d);
+
+ Exception ex = assertThrows(UncategorizedExecutionException.class, () -> indexSingleDoc());
+ assertEquals("Failed execution", ex.getMessage());
+
+ FlushResponse flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet();
+ assertEquals(1, flushResponse.getFailedShards());
+ ensureGreen(INDEX_NAME);
+
+ logger.info("--> Stop failing all remote store interactions");
+ translogRepo.setRandomControlIOExceptionRate(0d);
+ flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet();
+ assertEquals(1, flushResponse.getSuccessfulShards());
+ assertEquals(0, flushResponse.getFailedShards());
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java
index 157f8e41fee24..e2ef5f85abc74 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java
@@ -27,6 +27,7 @@
import org.opensearch.index.IndexSettings;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.indices.replication.common.ReplicationType;
+import org.opensearch.repositories.RepositoriesService;
import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.repositories.fs.FsRepository;
import org.opensearch.test.OpenSearchIntegTestCase;
@@ -50,7 +51,6 @@
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT;
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY;
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase {
protected static final String REPOSITORY_NAME = "test-remote-store-repo";
@@ -271,7 +271,6 @@ public static Settings buildRemoteStoreNodeAttributes(
if (withRateLimiterAttributes) {
settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean())
- .put(segmentRepoSettingsAttributeKeyPrefix + "max_remote_download_bytes_per_sec", "4kb")
.put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES);
}
@@ -314,8 +313,8 @@ public void teardown() {
clusterSettingsSuppliedByTest = false;
assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME);
assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME);
- assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME));
- assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME));
+ clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get();
+ clusterAdmin().prepareCleanupRepository(REPOSITORY_2_NAME).get();
}
public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) {
@@ -343,11 +342,18 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) {
.custom(RepositoriesMetadata.TYPE);
RepositoryMetadata actualRepository = repositories.repository(repositoryName);
+ final RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class);
+ final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName);
+
for (String nodeName : internalCluster().getNodeNames()) {
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName);
DiscoveryNode node = clusterService.localNode();
RepositoryMetadata expectedRepository = buildRepositoryMetadata(node, repositoryName);
- assertTrue(actualRepository.equalsIgnoreGenerations(expectedRepository));
+
+ // Validated that all the restricted settings are entact on all the nodes.
+ repository.getRestrictedSystemRepositorySettings()
+ .stream()
+ .forEach(setting -> assertEquals(setting.get(actualRepository.settings()), setting.get(expectedRepository.settings())));
}
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java
index b97e93f323fb2..acdb21d072320 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java
@@ -28,9 +28,13 @@
public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase {
public void testRemoteRefreshRetryOnFailure() throws Exception {
-
Path location = randomRepoPath().toAbsolutePath();
setup(location, randomDoubleBetween(0.1, 0.15, true), "metadata", 10L);
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setPersistentSettings(Settings.builder().put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), false))
+ .get();
// Here we are having flush/refresh after each iteration of indexing. However, the refresh will not always succeed
// due to IOExceptions that are thrown while doing uploadBlobs.
@@ -56,7 +60,7 @@ public void testRemoteRefreshRetryOnFailure() throws Exception {
logger.info("Local files = {}, Repo files = {}", sortedFilesInLocal, sortedFilesInRepo);
assertTrue(filesInRepo.containsAll(filesInLocal));
}, 90, TimeUnit.SECONDS);
- deleteRepo();
+ cleanupRepo();
}
public void testRemoteRefreshSegmentPressureSettingChanged() {
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java
index 4d56a1e94e3fc..ef2dcf3217df6 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java
@@ -8,17 +8,31 @@
package org.opensearch.remotestore;
+import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
+import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
+import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.opensearch.client.Client;
+import org.opensearch.cluster.metadata.RepositoryMetadata;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.xcontent.XContentType;
+import org.opensearch.core.common.unit.ByteSizeValue;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
+import org.opensearch.core.xcontent.ToXContent;
+import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.plugins.Plugin;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.disruption.NetworkDisruption;
import org.opensearch.test.transport.MockTransportService;
+import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
+import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING;
+
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase {
@@ -94,4 +108,77 @@ public void testMultiNodeClusterRandomNodeRecoverNetworkIsolation() {
internalCluster().clearDisruptionScheme();
}
+
+ public void testMultiNodeClusterRandomNodeRecoverNetworkIsolationPostNonRestrictedSettingsUpdate() {
+ Set nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new));
+ Set nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new));
+ ensureStableCluster(6);
+
+ NetworkDisruption networkDisruption = new NetworkDisruption(
+ new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide),
+ NetworkDisruption.DISCONNECT
+ );
+ internalCluster().setDisruptionScheme(networkDisruption);
+
+ networkDisruption.startDisrupting();
+
+ final Client client = client(nodesInOneSide.iterator().next());
+ RepositoryMetadata repositoryMetadata = client.admin()
+ .cluster()
+ .prepareGetRepositories(REPOSITORY_NAME)
+ .get()
+ .repositories()
+ .get(0);
+ Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20));
+ updatedSettings.remove("system_repository");
+
+ client.admin()
+ .cluster()
+ .preparePutRepository(repositoryMetadata.name())
+ .setType(repositoryMetadata.type())
+ .setSettings(updatedSettings)
+ .get();
+
+ ensureStableCluster(3, nodesInOneSide.stream().findAny().get());
+ networkDisruption.stopDisrupting();
+
+ ensureStableCluster(6);
+
+ internalCluster().clearDisruptionScheme();
+ }
+
+ public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception {
+ internalCluster().startClusterManagerOnlyNode();
+ internalCluster().startNodes(3);
+
+ final Client client = client();
+ RepositoryMetadata repositoryMetadata = client.admin()
+ .cluster()
+ .prepareGetRepositories(REPOSITORY_NAME)
+ .get()
+ .repositories()
+ .get(0);
+ Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20));
+ updatedSettings.remove("system_repository");
+
+ client.admin()
+ .cluster()
+ .preparePutRepository(repositoryMetadata.name())
+ .setType(repositoryMetadata.type())
+ .setSettings(updatedSettings)
+ .get();
+
+ internalCluster().restartRandomDataNode();
+
+ ensureStableCluster(4);
+ }
+
+ public void testSystemRepositorySettingIsHiddenForGetRepositoriesRequest() throws IOException {
+ GetRepositoriesRequest request = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME });
+ GetRepositoriesResponse repositoriesResponse = client().execute(GetRepositoriesAction.INSTANCE, request).actionGet();
+ XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.JSON));
+ XContentBuilder xContentBuilder = repositoriesResponse.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ repositoriesResponse = GetRepositoriesResponse.fromXContent(createParser(xContentBuilder));
+ assertEquals(false, SYSTEM_REPOSITORY_SETTING.get(repositoriesResponse.repositories().get(0).settings()));
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java
index 65335f444a2df..212f797180077 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java
@@ -10,24 +10,39 @@
import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest;
import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse;
+import org.opensearch.action.admin.indices.get.GetIndexRequest;
+import org.opensearch.action.admin.indices.get.GetIndexResponse;
import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.cluster.health.ClusterHealthStatus;
+import org.opensearch.cluster.metadata.IndexMetadata;
+import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.cluster.service.ClusterService;
+import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.core.common.unit.ByteSizeUnit;
import org.opensearch.repositories.RepositoriesService;
+import org.opensearch.repositories.Repository;
+import org.opensearch.test.CorruptionUtils;
import org.opensearch.test.InternalTestCluster;
import org.opensearch.test.OpenSearchIntegTestCase;
import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.HashMap;
+import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.greaterThan;
-@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0)
+@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT {
/**
@@ -388,14 +403,41 @@ public void testRTSRestoreDataOnlyInTranslog() throws Exception {
public void testRateLimitedRemoteDownloads() throws Exception {
clusterSettingsSuppliedByTest = true;
int shardCount = randomIntBetween(1, 3);
+ Path segmentRepoPath = randomRepoPath();
+ Path tlogRepoPath = randomRepoPath();
prepareCluster(
1,
3,
INDEX_NAME,
0,
shardCount,
- buildRemoteStoreNodeAttributes(REPOSITORY_NAME, randomRepoPath(), REPOSITORY_2_NAME, randomRepoPath(), true)
+ buildRemoteStoreNodeAttributes(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, tlogRepoPath, true)
);
+
+ // validate inplace repository metadata update
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+ DiscoveryNode node = clusterService.localNode();
+ String settingsAttributeKeyPrefix = String.format(
+ Locale.getDefault(),
+ REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX,
+ REPOSITORY_NAME
+ );
+ Map settingsMap = node.getAttributes()
+ .keySet()
+ .stream()
+ .filter(key -> key.startsWith(settingsAttributeKeyPrefix))
+ .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key)));
+ Settings.Builder settings = Settings.builder();
+ settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue()));
+ settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB);
+
+ assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get());
+
+ for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
+ Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME);
+ assertEquals("4096b", segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec"));
+ }
+
Map indexStats = indexData(5, false, INDEX_NAME);
assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME)));
@@ -414,6 +456,40 @@ public void testRateLimitedRemoteDownloads() throws Exception {
assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
assertEquals(0, getNumShards(INDEX_NAME).numReplicas);
verifyRestoredData(indexStats, INDEX_NAME);
+
+ // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown
+ // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700
+ settings.remove("max_remote_download_bytes_per_sec");
+ assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get());
+ for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
+ Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME);
+ assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec"));
+ }
+ }
+
+ public void testRestoreCorruptSegmentShouldFail() throws IOException, ExecutionException, InterruptedException {
+ prepareCluster(1, 3, INDEX_NAME, 0, 1);
+ indexData(randomIntBetween(3, 4), true, INDEX_NAME);
+
+ GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest()).get();
+ String indexUUID = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID);
+
+ logger.info("--> Corrupting segment files in remote segment store");
+ Path path = segmentRepoPath.resolve(indexUUID).resolve("0").resolve("segments").resolve("data");
+ try (Stream dataPath = Files.list(path)) {
+ CorruptionUtils.corruptFile(random(), dataPath.toArray(Path[]::new));
+ }
+
+ logger.info("--> Stop primary");
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME)));
+
+ logger.info("--> Close and restore the index");
+ client().admin()
+ .cluster()
+ .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).waitForCompletion(true), PlainActionFuture.newFuture());
+
+ logger.info("--> Check for index status, should be red due to corruption");
+ ensureRed(INDEX_NAME);
}
// TODO: Restore flow - index aliases
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java
index 45c3ef7f5bae5..23864c35ad154 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java
@@ -17,7 +17,6 @@
import java.nio.file.Path;
import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
/**
* This class runs Segment Replication Integ test suite with remote store enabled.
@@ -50,6 +49,6 @@ public void setup() {
@After
public void teardown() {
- assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME));
+ clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get();
}
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java
index 0da4d81a8871e..6cfc76b7e3223 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java
@@ -17,7 +17,6 @@
import java.nio.file.Path;
import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
/**
* This class executes the SegmentReplicationPressureIT suite with remote store integration enabled.
@@ -49,6 +48,6 @@ public void setup() {
@After
public void teardown() {
- assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME));
+ clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get();
}
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java
index 98fab139f4902..3dfde6f472525 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java
@@ -108,8 +108,15 @@ public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String nam
}
public void testRateLimitedRemoteUploads() throws Exception {
+ clusterSettingsSuppliedByTest = true;
overrideBuildRepositoryMetadata = true;
- internalCluster().startNode();
+ Settings.Builder clusterSettings = Settings.builder()
+ .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryLocation, REPOSITORY_2_NAME, repositoryLocation));
+ clusterSettings.put(
+ String.format(Locale.getDefault(), "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, REPOSITORY_NAME),
+ MockFsRepositoryPlugin.TYPE
+ );
+ internalCluster().startNode(clusterSettings.build());
Client client = client();
logger.info("--> updating repository");
assertAcked(
@@ -119,7 +126,6 @@ public void testRateLimitedRemoteUploads() throws Exception {
.setType(MockFsRepositoryPlugin.TYPE)
.setSettings(
Settings.builder()
- .put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true)
.put("location", repositoryLocation)
.put("compress", compress)
.put("max_remote_upload_bytes_per_sec", "1kb")
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java
index 079753de95680..36987ac2d4991 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java
@@ -27,6 +27,7 @@
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
@@ -124,11 +125,11 @@ public void readBlobAsync(String blobName, ActionListener listener)
long contentLength = listBlobs().get(blobName).length();
long partSize = contentLength / 10;
int numberOfParts = (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1);
- List blobPartStreams = new ArrayList<>();
+ List blobPartStreams = new ArrayList<>();
for (int partNumber = 0; partNumber < numberOfParts; partNumber++) {
long offset = partNumber * partSize;
InputStreamContainer blobPartStream = new InputStreamContainer(readBlob(blobName, offset, partSize), partSize, offset);
- blobPartStreams.add(blobPartStream);
+ blobPartStreams.add(() -> CompletableFuture.completedFuture(blobPartStream));
}
ReadContext blobReadContext = new ReadContext(contentLength, blobPartStreams, null);
listener.onResponse(blobReadContext);
diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java
index f149d538cc47a..b8415f4b41815 100644
--- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java
@@ -108,4 +108,16 @@ public void testUpdateRepository() {
final Repository updatedRepository = repositoriesService.repository(repositoryName);
assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository));
}
+
+ public void testSystemRepositoryCantBeCreated() {
+ internalCluster();
+ final String repositoryName = "test-repo";
+ final Client client = client();
+ final Settings.Builder repoSettings = Settings.builder().put("system_repository", true).put("location", randomRepoPath());
+
+ assertThrows(
+ RepositoryException.class,
+ () -> client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get()
+ );
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java
index 4ce8af3e0f081..ee94e574228df 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java
@@ -124,7 +124,7 @@ protected Settings featureFlagSettings() {
}
private ZonedDateTime date(String date) {
- return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date));
+ return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date));
}
private static String format(ZonedDateTime date, String pattern) {
@@ -1481,7 +1481,7 @@ public void testExceptionOnNegativeInterval() {
/**
* https://github.com/elastic/elasticsearch/issues/31760 shows an edge case where an unmapped "date" field in two indices
* that are queried simultaneously can lead to the "format" parameter in the aggregation not being preserved correctly.
- *
+ *
* The error happens when the bucket from the "unmapped" index is received first in the reduce phase, however the case can
* be recreated when aggregating about a single index with an unmapped date field and also getting "empty" buckets.
*/
@@ -1624,8 +1624,8 @@ public void testScriptCaching() throws Exception {
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
.get()
);
- String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1));
- String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1));
+ String date = DateFieldMapper.getDefaultDateTimeFormatter().format(date(1, 1));
+ String date2 = DateFieldMapper.getDefaultDateTimeFormatter().format(date(2, 1));
indexRandom(
true,
client().prepareIndex("cache_test_idx").setId("1").setSource("d", date),
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java
index 04115f69172da..d44071e1ef9c5 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java
@@ -92,7 +92,7 @@ protected Settings featureFlagSettings() {
}
private ZonedDateTime date(String date) {
- return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date));
+ return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date));
}
@Before
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java
index 5e95073209c71..865dd670fbf68 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java
@@ -221,6 +221,10 @@ public void testNestedDiversity() throws Exception {
}
public void testNestedSamples() throws Exception {
+ assumeFalse(
+ "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10046",
+ internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
+ );
// Test samples nested under samples
int MAX_DOCS_PER_AUTHOR = 1;
int MAX_DOCS_PER_GENRE = 2;
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java
index bb90c1294ecb8..dc3b690c7f78f 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java
@@ -544,13 +544,13 @@ public void testNested() throws Exception {
/**
* https://github.com/elastic/elasticsearch/issues/33514
- *
+ *
* This bug manifests as the max_bucket agg ("peak") being added to the response twice, because
* the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps.
* The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms
* delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then
* execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values.
- *
+ *
* Applies to any pipeline agg, not just max.
*/
public void testFieldIsntWrittenOutTwice() throws Exception {
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java
index 0cf89778c6e99..2aad0d2d38901 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java
@@ -1168,7 +1168,7 @@ public void testHoltWintersMinimization() {
* the default settings. Which means our mock histo will match the generated result (which it won't
* if the minimizer is actually working, since the coefficients will be different and thus generate different
* data)
- *
+ *
* We can simulate this by setting the window size == size of histo
*/
public void testMinimizeNotEnoughData() {
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
index 4cdf5ae8e674f..42d91ac945662 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
@@ -3343,6 +3343,10 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception {
}
public void testHighlightQueryRewriteDatesWithNow() throws Exception {
+ assumeFalse(
+ "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10434",
+ internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
+ );
assertAcked(
client().admin()
.indices()
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java
index f5d1b8234558e..6b95405b3ebd4 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java
@@ -244,6 +244,10 @@ public void testWithIndexAlias() {
}
public void testWithIndexFilter() throws InterruptedException {
+ assumeFalse(
+ "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10433",
+ internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
+ );
assertAcked(prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword"));
assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long"));
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java
index 43b7179a335f8..4a178e7066846 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java
@@ -93,8 +93,8 @@ public void testDeletePit() throws Exception {
assertTrue(deletePitInfo.isSuccessful());
}
validatePitStats("index", 0, 10);
- /**
- * Checking deleting the same PIT id again results in succeeded
+ /*
+ Checking deleting the same PIT id again results in succeeded
*/
deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest);
deletePITResponse = deleteExecute.get();
@@ -113,8 +113,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception {
pitIds.add(pitResponse.getId());
validatePitStats("index", 5, 0);
- /**
- * Delete Pit #1
+ /*
+ Delete Pit #1
*/
DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds);
ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest);
@@ -128,8 +128,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception {
pitResponse = execute.get();
pitIds.add(pitResponse.getId());
validatePitStats("index", 5, 5);
- /**
- * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present)
+ /*
+ Delete PIT with both Ids #1 (which is deleted) and #2 (which is present)
*/
deletePITRequest = new DeletePitRequest(pitIds);
deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest);
@@ -165,9 +165,9 @@ public void testDeleteAllPits() throws Exception {
validatePitStats("index1", 5, 0);
DeletePitRequest deletePITRequest = new DeletePitRequest("_all");
- /**
- * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context
- * not found exceptions don't result in failures ( as deletion in one node is successful )
+ /*
+ When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context
+ not found exceptions don't result in failures ( as deletion in one node is successful )
*/
ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest);
DeletePitResponse deletePITResponse = execute.get();
@@ -207,9 +207,9 @@ public Settings onNodeStopped(String nodeName) throws Exception {
});
ensureGreen();
- /**
- * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context
- * not found exceptions don't result in failures ( as deletion in one node is successful )
+ /*
+ When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context
+ not found exceptions don't result in failures ( as deletion in one node is successful )
*/
ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest);
DeletePitResponse deletePITResponse = execute.get();
@@ -242,9 +242,9 @@ public Settings onNodeStopped(String nodeName) throws Exception {
}
});
ensureGreen();
- /**
- * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and
- * once the node restarts, all active contexts are cleared in the node )
+ /*
+ When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and
+ once the node restarts, all active contexts are cleared in the node )
*/
ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest);
DeletePitResponse deletePITResponse = execute.get();
@@ -278,8 +278,8 @@ public void testDeleteWhileSearch() throws Exception {
}
}
} catch (Exception e) {
- /**
- * assert for exception once delete pit goes through. throw error in case of any exeption before that.
+ /*
+ assert for exception once delete pit goes through. throw error in case of any exeption before that.
*/
if (deleted.get() == true) {
Throwable t = ExceptionsHelper.unwrapCause(e.getCause());
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java
index f16b9a4d67b49..27002b844da1d 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java
@@ -119,7 +119,7 @@ public void testScanScrollWithShardExceptions() throws Exception {
assertThat(numHits, equalTo(100L));
clearScroll("_all");
- internalCluster().stopRandomNonClusterManagerNode();
+ internalCluster().stopRandomDataNode();
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).setScroll(TimeValue.timeValueMinutes(1)).get();
assertThat(searchResponse.getSuccessfulShards(), lessThan(searchResponse.getTotalShards()));
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java
index 00ac574b8bd72..b99f66850e9e3 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java
@@ -220,8 +220,8 @@ public void testPitWithSearchAfter() throws Exception {
.setPointInTime(new PointInTimeBuilder(pitResponse.getId()))
.get();
assertEquals(3, sr.getHits().getHits().length);
- /**
- * Add new data and assert PIT results remain the same and normal search results gets refreshed
+ /*
+ Add new data and assert PIT results remain the same and normal search results gets refreshed
*/
indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102));
sr = client().prepareSearch()
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java
deleted file mode 100644
index 719b75079da92..0000000000000
--- a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java
+++ /dev/null
@@ -1,608 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*
- * Modifications Copyright OpenSearch Contributors. See
- * GitHub history for details.
- */
-
-package org.opensearch.search.simple;
-
-import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
-
-import org.opensearch.action.index.IndexRequestBuilder;
-import org.opensearch.action.search.SearchPhaseExecutionException;
-import org.opensearch.action.search.SearchRequestBuilder;
-import org.opensearch.action.search.SearchResponse;
-import org.opensearch.action.support.WriteRequest.RefreshPolicy;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.common.util.FeatureFlags;
-import org.opensearch.common.xcontent.XContentFactory;
-import org.opensearch.common.xcontent.json.JsonXContent;
-import org.opensearch.core.rest.RestStatus;
-import org.opensearch.core.xcontent.MediaTypeRegistry;
-import org.opensearch.core.xcontent.XContentParser;
-import org.opensearch.index.IndexSettings;
-import org.opensearch.index.mapper.MapperService;
-import org.opensearch.index.query.QueryBuilders;
-import org.opensearch.index.query.TermQueryBuilder;
-import org.opensearch.search.rescore.QueryRescorerBuilder;
-import org.opensearch.search.sort.SortOrder;
-import org.opensearch.test.ParameterizedOpenSearchIntegTestCase;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-
-import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
-import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
-import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
-import static org.opensearch.index.query.QueryBuilders.boolQuery;
-import static org.opensearch.index.query.QueryBuilders.matchAllQuery;
-import static org.opensearch.index.query.QueryBuilders.queryStringQuery;
-import static org.opensearch.index.query.QueryBuilders.rangeQuery;
-import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
-import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures;
-import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.equalTo;
-
-public class ParameterizedSimpleSearchIT extends ParameterizedOpenSearchIntegTestCase {
-
- public ParameterizedSimpleSearchIT(Settings settings) {
- super(settings);
- }
-
- @ParametersFactory
- public static Collection