diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 40c8fb2e01dd7..b5b0a815b02b2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,5 +1,5 @@ # CODEOWNERS manages notifications, not PR approvals -# For PR approvals see /.github/workflows/maintainer-approval.yml +# For PR approvals see /.github/workflows/maintainer-approval.yml # Files have a single rule applied, the last match decides the owner # If you would like to more specifically apply ownership, include existing owner in new sub fields @@ -24,4 +24,4 @@ /.github/ @peternied -/MAINTAINERS.md @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 1f5c187c28e7d..f610ff5c31049 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -30,11 +30,31 @@ jobs: - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | + echo "event_name=pull_request_target" >> $GITHUB_ENV + echo "branch_name=$(jq --raw-output .pull_request.base.ref $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output .pull_request.user.login $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --ascii-output .pull_request.body $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "post_merge_action=false" >> $GITHUB_ENV + + # to get the PR data that can be used for post merge actions + - uses: actions/github-script@v7 + if: github.event_name == 'push' + id: get_pr_data + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + return ( + await github.rest.repos.listPullRequestsAssociatedWithCommit({ + commit_sha: context.sha, + owner: context.repo.owner, + repo: context.repo.repo, + }) + ).data[0]; - name: Setup environment variables (Push) if: github.event_name == 'push' @@ -43,11 +63,15 @@ jobs: ref_id=$(git rev-parse HEAD) branch_name=$(git rev-parse --abbrev-ref HEAD) echo "branch_name=$branch_name" >> $GITHUB_ENV + echo "event_name=push" >> $GITHUB_ENV echo "pr_from_sha=$ref_id" >> $GITHUB_ENV echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV - echo "pr_number=Null" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output '.commits[0].author.username' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo 'pr_number=${{ fromJson(steps.get_pr_data.outputs.result).number }}' >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --ascii-output .head_commit.message $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "post_merge_action=true" >> $GITHUB_ENV - name: Checkout opensearch-build repo uses: actions/checkout@v4 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f868dd76039b..4b4a5e5f4f981 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,22 +5,29 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add latency metrics for instrumenting critical clusterManager code paths ([#12333](https://github.com/opensearch-project/OpenSearch/pull/12333)) - Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423)) - Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478)) - Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293)) +- [Remote Store] Add dynamic cluster settings to set timeout for segments upload to Remote Store ([#13679](https://github.com/opensearch-project/OpenSearch/pull/13679)) +- [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) ### Dependencies - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) - Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) - Bump `org.apache.commons:commons-text` from 1.11.0 to 1.12.0 ([#13557](https://github.com/opensearch-project/OpenSearch/pull/13557)) - Bump `org.hdrhistogram:HdrHistogram` from 2.1.12 to 2.2.1 ([#13556](https://github.com/opensearch-project/OpenSearch/pull/13556)) -- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.3 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641)) +- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.4 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641), [#13753](https://github.com/opensearch-project/OpenSearch/pull/13753)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.3.6 to 3.4.0 ([#13642](https://github.com/opensearch-project/OpenSearch/pull/13642)) - Bump `mockito` from 5.11.0 to 5.12.0 ([#13665](https://github.com/opensearch-project/OpenSearch/pull/13665)) +- Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752)) +- Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756)) +- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) - Refactor implementations of query phase searcher, allow QueryCollectorContext to have zero collectors ([#13481](https://github.com/opensearch-project/OpenSearch/pull/13481)) +- Adds support to inject telemetry instances to plugins ([#13636](https://github.com/opensearch-project/OpenSearch/pull/13636)) ### Deprecated @@ -31,6 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix negative RequestStats metric issue ([#13553](https://github.com/opensearch-project/OpenSearch/pull/13553)) - Fix get field mapping API returns 404 error in mixed cluster with multiple versions ([#13624](https://github.com/opensearch-project/OpenSearch/pull/13624)) - Allow clearing `remote_store.compatibility_mode` setting ([#13646](https://github.com/opensearch-project/OpenSearch/pull/13646)) +- Fix ReplicaShardBatchAllocator to batch shards without duplicates ([#13710](https://github.com/opensearch-project/OpenSearch/pull/13710)) ### Security diff --git a/MAINTAINERS.md b/MAINTAINERS.md index cce92167473b6..6855281a488ca 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -14,6 +14,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | +| Jay Deng | [jed326](https://github.com/jed326) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 1e097d586aded..d8779cf8c2778 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -27,7 +27,7 @@ google_http_client = 1.44.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.109.Final +netty = 4.1.110.Final joda = 2.12.7 # project reactor diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 9942651ccdd67..f40c35dde83de 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -119,6 +119,8 @@ public class TieredSpilloverCache implements ICache { .setValueType(builder.cacheConfig.getValueType()) .setSettings(builder.cacheConfig.getSettings()) .setWeigher(builder.cacheConfig.getWeigher()) + .setKeySerializer(builder.cacheConfig.getKeySerializer()) + .setValueSerializer(builder.cacheConfig.getValueSerializer()) .setDimensionNames(builder.cacheConfig.getDimensionNames()) .setStatsTrackingEnabled(false) .build(), diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index 2058faa5181b1..69e2060f7ea2f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -141,6 +141,10 @@ public MockDiskCacheFactory(long delay, int maxSize, boolean statsTrackingEnable @Override @SuppressWarnings({ "unchecked" }) public ICache create(CacheConfig config, CacheType cacheType, Map cacheFactories) { + // As we can't directly IT with the tiered cache and ehcache, check that we receive non-null serializers, as an ehcache disk + // cache would require. + assert config.getKeySerializer() != null; + assert config.getValueSerializer() != null; return new Builder().setKeySerializer((Serializer) config.getKeySerializer()) .setValueSerializer((Serializer) config.getValueSerializer()) .setMaxSize(maxSize) diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 6d5ee91326338..6c49341591589 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -16,6 +16,7 @@ import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.serializer.Serializer; import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; @@ -32,6 +33,8 @@ import org.junit.Before; import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -166,6 +169,8 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception .setKeyType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings(settings) .setDimensionNames(dimensionNames) .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken @@ -318,6 +323,8 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { .setKeyType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setDimensionNames(dimensionNames) .setSettings( Settings.builder() @@ -830,6 +837,8 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio .setKeyType(String.class) .setWeigher((k, v) -> 150) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings( Settings.builder() .put( @@ -1014,6 +1023,8 @@ public void testTookTimePolicyFromFactory() throws Exception { .setKeyType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings(settings) .setMaxSizeInBytes(onHeapCacheSize * keyValueSize) .setDimensionNames(dimensionNames) @@ -1415,6 +1426,8 @@ private TieredSpilloverCache intializeTieredSpilloverCache( .setSettings(settings) .setDimensionNames(dimensionNames) .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) .setSettings( Settings.builder() .put( @@ -1479,4 +1492,26 @@ private ImmutableCacheStats getStatsSnapshotForTier(TieredSpilloverCache t } return snapshot; } + + // Duplicated here from EhcacheDiskCacheTests.java, we can't add a dependency on that plugin + static class StringSerializer implements Serializer { + private final Charset charset = StandardCharsets.UTF_8; + + @Override + public byte[] serialize(String object) { + return object.getBytes(charset); + } + + @Override + public String deserialize(byte[] bytes) { + if (bytes == null) { + return null; + } + return new String(bytes, charset); + } + + public boolean equals(String object, byte[] bytes) { + return object.equals(deserialize(bytes)); + } + } } diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 6536d474f5abc..27cef3f7d7251 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -47,6 +47,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.junit.annotations.TestIssueLogging; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; import org.junit.Before; @@ -63,6 +64,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; +@TestIssueLogging(value = "_root:TRACE", issueUrl = "https://github.com/opensearch-project/OpenSearch/issues/9117") public class RepositoryURLClientYamlTestSuiteIT extends OpenSearchClientYamlSuiteTestCase { public RepositoryURLClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 83c4db80b7798..a8a165df637a2 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -235,11 +235,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle index 4fc5e44f58c3a..5747624e2fb69 100644 --- a/plugins/cache-ehcache/build.gradle +++ b/plugins/cache-ehcache/build.gradle @@ -24,6 +24,7 @@ versions << [ dependencies { api "org.ehcache:ehcache:${versions.ehcache}" + api "org.slf4j:slf4j-api:${versions.slf4j}" } thirdPartyAudit { @@ -78,10 +79,9 @@ thirdPartyAudit { 'org.osgi.framework.BundleActivator', 'org.osgi.framework.BundleContext', 'org.osgi.framework.ServiceReference', - 'org.slf4j.Logger', - 'org.slf4j.LoggerFactory', - 'org.slf4j.Marker', - 'org.slf4j.event.Level' + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder' ) } @@ -90,13 +90,3 @@ tasks.named("bundlePlugin").configure { into 'config' } } - -test { - // TODO: Adding permission in plugin-security.policy doesn't seem to work. - systemProperty 'tests.security.manager', 'false' -} - -internalClusterTest { - // TODO: Remove this later once we have a way. - systemProperty 'tests.security.manager', 'false' -} diff --git a/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/cache-ehcache/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/cache-ehcache/licenses/slf4j-api-LICENSE.txt b/plugins/cache-ehcache/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..54512cc08d16b --- /dev/null +++ b/plugins/cache-ehcache/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2022 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/plugins/cache-ehcache/licenses/slf4j-api-NOTICE.txt b/plugins/cache-ehcache/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 9a4dce1067b61..b4c62fbf85cb8 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -42,6 +42,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.time.Duration; import java.util.Arrays; import java.util.Iterator; @@ -175,57 +177,60 @@ private EhcacheDiskCache(Builder builder) { @SuppressWarnings({ "rawtypes" }) private Cache buildCache(Duration expireAfterAccess, Builder builder) { - try { - return this.cacheManager.createCache( - this.diskCacheAlias, - CacheConfigurationBuilder.newCacheConfigurationBuilder( - ICacheKey.class, - ByteArrayWrapper.class, - ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) - ).withExpiry(new ExpiryPolicy<>() { - @Override - public Duration getExpiryForCreation(ICacheKey key, ByteArrayWrapper value) { - return INFINITE; - } - - @Override - public Duration getExpiryForAccess(ICacheKey key, Supplier value) { - return expireAfterAccess; - } - - @Override - public Duration getExpiryForUpdate( - ICacheKey key, - Supplier oldValue, - ByteArrayWrapper newValue - ) { - return INFINITE; - } - }) - .withService(getListenerConfiguration(builder)) - .withService( - new OffHeapDiskStoreConfiguration( - this.threadPoolAlias, - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_WRITE_CONCURRENCY_KEY) - .get(settings), - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings) + // Creating the cache requires permissions specified in plugin-security.policy + return AccessController.doPrivileged((PrivilegedAction>) () -> { + try { + return this.cacheManager.createCache( + this.diskCacheAlias, + CacheConfigurationBuilder.newCacheConfigurationBuilder( + ICacheKey.class, + ByteArrayWrapper.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) + ).withExpiry(new ExpiryPolicy<>() { + @Override + public Duration getExpiryForCreation(ICacheKey key, ByteArrayWrapper value) { + return INFINITE; + } + + @Override + public Duration getExpiryForAccess(ICacheKey key, Supplier value) { + return expireAfterAccess; + } + + @Override + public Duration getExpiryForUpdate( + ICacheKey key, + Supplier oldValue, + ByteArrayWrapper newValue + ) { + return INFINITE; + } + }) + .withService(getListenerConfiguration(builder)) + .withService( + new OffHeapDiskStoreConfiguration( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_CONCURRENCY_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings) + ) ) - ) - .withKeySerializer(new KeySerializerWrapper(keySerializer)) - .withValueSerializer(new ByteArrayWrapperSerializer()) + .withKeySerializer(new KeySerializerWrapper(keySerializer)) + .withValueSerializer(new ByteArrayWrapperSerializer()) // We pass ByteArrayWrapperSerializer as ehcache's value serializer. If V is an interface, and we pass its // serializer directly to ehcache, ehcache requires the classes match exactly before/after serialization. // This is not always feasible or necessary, like for BytesReference. So, we handle the value serialization // before V hits ehcache. - ); - } catch (IllegalArgumentException ex) { - logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); - throw ex; - } catch (IllegalStateException ex) { - logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); - throw ex; - } + ); + } catch (IllegalArgumentException ex) { + logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); + throw ex; + } catch (IllegalStateException ex) { + logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); + throw ex; + } + }); } private CacheEventListenerConfigurationBuilder getListenerConfiguration(Builder builder) { @@ -252,25 +257,28 @@ Map, CompletableFuture, V>>> getCompletableFutur @SuppressForbidden(reason = "Ehcache uses File.io") private PersistentCacheManager buildCacheManager() { // In case we use multiple ehCaches, we can define this cache manager at a global level. - return CacheManagerBuilder.newCacheManagerBuilder() - .with(CacheManagerBuilder.persistence(new File(storagePath))) - - .using( - PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() - .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks - // like event listeners - .pool( - this.threadPoolAlias, - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_WRITE_MIN_THREADS_KEY) - .get(settings), - (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) - .get(DISK_WRITE_MAXIMUM_THREADS_KEY) - .get(settings) - ) - .build() - ) - .build(true); + // Creating the cache manager also requires permissions specified in plugin-security.policy + return AccessController.doPrivileged((PrivilegedAction) () -> { + return CacheManagerBuilder.newCacheManagerBuilder() + .with(CacheManagerBuilder.persistence(new File(storagePath))) + + .using( + PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() + .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks + // like event listeners + .pool( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MIN_THREADS_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MAXIMUM_THREADS_KEY) + .get(settings) + ) + .build() + ) + .build(true); + }); } @Override diff --git a/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy index 40007eea62dba..85c82824d5d65 100644 --- a/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy @@ -9,5 +9,8 @@ grant { permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; permission java.lang.RuntimePermission "createClassLoader"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java index 7f4a9b8ca0ac7..a022b8b9bf8b0 100644 --- a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java +++ b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java @@ -32,54 +32,67 @@ package org.opensearch.ingest.attachment; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.lucene.tests.util.LuceneTestCase.SuppressFileSystems; import org.apache.lucene.tests.util.TestUtil; import org.apache.tika.metadata.Metadata; import org.opensearch.common.io.PathUtils; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Map; /** - * Evil test-coverage cheat, we parse a bunch of docs from tika - * so that we have a nice grab-bag variety, and assert some content - * comes back and no exception. + * Parse sample tika documents and assert the contents has not changed according to previously recorded checksums. + * Uncaught changes to tika parsing could potentially pose bwc issues. + * Note: In some cases tika will access a user's locale to inform the parsing of a file. + * The checksums of these files are left empty, and we only validate that parsed content is not null. */ @SuppressFileSystems("ExtrasFS") // don't try to parse extraN public class TikaDocTests extends OpenSearchTestCase { - /** some test files from tika test suite, zipped up */ + /** some test files from the apache tika unit test suite with accompanying sha1 checksums */ static final String TIKA_FILES = "/org/opensearch/ingest/attachment/test/tika-files/"; + static final String TIKA_CHECKSUMS = "/org/opensearch/ingest/attachment/test/.checksums"; - public void testFiles() throws Exception { - Path tmp = createTempDir(); - logger.debug("unzipping all tika sample files"); - try (DirectoryStream stream = Files.newDirectoryStream(PathUtils.get(getClass().getResource(TIKA_FILES).toURI()))) { - for (Path doc : stream) { - String filename = doc.getFileName().toString(); - TestUtil.unzip(getClass().getResourceAsStream(TIKA_FILES + filename), tmp); - } - } + public void testParseSamples() throws Exception { + String checksumJson = Files.readString(PathUtils.get(getClass().getResource(TIKA_CHECKSUMS).toURI())); + Map checksums = XContentHelper.convertToMap(JsonXContent.jsonXContent, checksumJson, false); + DirectoryStream stream = Files.newDirectoryStream(unzipToTemp(TIKA_FILES)); - try (DirectoryStream stream = Files.newDirectoryStream(tmp)) { - for (Path doc : stream) { - logger.debug("parsing: {}", doc); - assertParseable(doc); + for (Path doc : stream) { + String parsedContent = tryParse(doc); + assertNotNull(parsedContent); + assertFalse(parsedContent.isEmpty()); + + String check = checksums.get(doc.getFileName().toString()).toString(); + if (!check.isEmpty()) { + assertEquals(check, DigestUtils.sha1Hex(parsedContent)); } } + + stream.close(); } - void assertParseable(Path fileName) throws Exception { - try { - byte bytes[] = Files.readAllBytes(fileName); - String parsedContent = TikaImpl.parse(bytes, new Metadata(), -1); - assertNotNull(parsedContent); - assertFalse(parsedContent.isEmpty()); - logger.debug("extracted content: {}", parsedContent); - } catch (Exception e) { - throw new RuntimeException("parsing of filename: " + fileName.getFileName() + " failed", e); + private Path unzipToTemp(String zipDir) throws Exception { + Path tmp = createTempDir(); + DirectoryStream stream = Files.newDirectoryStream(PathUtils.get(getClass().getResource(zipDir).toURI())); + + for (Path doc : stream) { + String filename = doc.getFileName().toString(); + TestUtil.unzip(getClass().getResourceAsStream(zipDir + filename), tmp); } + + stream.close(); + return tmp; + } + + private String tryParse(Path doc) throws Exception { + byte bytes[] = Files.readAllBytes(doc); + return TikaImpl.parse(bytes, new Metadata(), -1); } } diff --git a/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/.checksums b/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/.checksums new file mode 100644 index 0000000000000..227d7d833a231 --- /dev/null +++ b/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/.checksums @@ -0,0 +1,209 @@ +{ + "testWORD_tabular_symbol.doc": "c708d7ef841f7e1748436b8ef5670d0b2de1a227", + "testWORD_1img.docx": "367e2ade13ca3c19bcd8a323e21d51d407e017ac", + "testMasterFooter.odp": "bcc59df70699c739423a50e362c722b81ae76498", + "testTXTNonASCIIUTF8.txt": "1ef514431ca8d838f11e99f8e4a0637730b77aa0", + "EmbeddedOutlook.docx": "c544a6765c19ba11b0bf3edb55c79e1bd8565c6e", + "testWORD_override_list_numbering.docx": "4e892319b921322916225def763f451e4bbb4e16", + "testTextBoxes.key": "b01581d5bd2483ce649a1a1406136359f4b93167", + "testPPT_masterText.pptx": "9fee8337b76dc3e196f4554dcde22b9dd1c3b3e8", + "testComment.docx": "333b9009686f27265b4729e8172b3e62048ec7ec", + "testRTFInvalidUnicode.rtf": "32b3e3d8e5c5a1b66cb15fc964b9341bea7048f4", + "testEXCEL_headers_footers.xlsx": "9e8d2a700fc431fe29030e86e08162fc8ecf2c1a", + "testWORD6.doc": "1479de589755c7212815445799c44dab69d4587c", + "testPagesHeadersFootersFootnotes.pages": "99d434be7de4902dc70700aa9c2a31624583c1f1", + "testPDF_no_extract_yes_accessibility_owner_empty.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "testOpenOffice2.odt": "564b3e1999a53073a04142e01b663757a6e7fb08", + "testTables.key": "250cff75db7fc3c8b95b2cbd3f37308826e0c93d", + "testDOCX_Thumbnail.docx": "fce6a43271bc242e2bb8341afa659ed166e08050", + "testWORD_3imgs.docx": "292ca6fa41d32b462e66061e89adb19423721975", + "testPDF_acroform3.pdf": "dcf6588cb5e41701b168606ea6bfbadecdcd3bc9", + "testWORD_missing_ooxml_bean1.docx": "c3058f2513fecc0a6d76d3ecf55676f236b085ff", + "testPDFTwoTextBoxes.pdf": "4adf324ce030076b1755fdb3a6cce676ee325ae4", + "testRTFUnicodeGothic.rtf": "f9932470ff686b0c217ea94ed5d4f2fd85f7998e", + "headers.mbox": "75ec25789fe870b6d25365e4ea73d731fc274847", + "testPPT_embeded.ppt": "", + "testXML3.xml": "804d4812408eb324ae8483d2140b648ec871dd2a", + "testOptionalHyphen.doc": "10f9ca38cc2985e94967aa2c454bfe40aff76976", + "testComment.doc": "66e57653d5d08478556ca640408b172b65855cc7", + "testEXCEL_headers_footers.xls": "18977c66fc8bcb8c44de3063b69b65a3de9c3f25", + "testWORD_embedded_rtf.doc": "cc2d289acfe3d1068a2649b7fa0c06c50bb6ceda", + "testEXCEL_custom_props.xlsx": "6b72ae08362a204b37dbba0a30b4134ae3e7918f", + "testOptionalHyphen.docx": "5b8ffc0df1691a8fed7d63aa9b256e9e02e36d71", + "testPPT_various.pptx": "d149de9af8071141a6ba6e2cd4ef5f6d9431a826", + "testWORD_closingSmartQInHyperLink.doc": "9859f378c603b70bf0d44a281169ae5b16a21878", + "test_embedded_zip.pptx": "d19406edcec09440d066877c451ceba60abc3483", + "testRTFUmlautSpaces.rtf": "155b39879c5b5fbad22fd650be37ae7f91489eb2", + "protectedFile.xlsx": "ee08eeaf05c35c960243f831c3a974d9ee07aa28", + "Doc1_ole.doc": "fb63220506ab666f1fe87b0608e1447fd4fd3489", + "testEXCEL_embeded.xlsx": "", + "EmbeddedDocument.docx": "", + "testODFwithOOo3.odt": "3815d6fb7f5829db882ea8ebd664f252711e6e60", + "testPagesHeadersFootersRomanUpper.pages": "85b3cd545ba6c33e5d44b844a6afea8cb6eaec0b", + "testPPT_comment.ppt": "88fd667fd0292785395a8d0d229304aa91110556", + "testPPT_2imgs.pptx": "66eda11ad472918153100dad8ee5be0f1f8e2e04", + "testPagesHeadersFootersAlphaUpper.pages": "56bef0d1eaedfd7599aae29031d2eeb0e3fe4688", + "testWORD_text_box.docx": "e01f7b05c6aac3449b9a699c3e4d2e62ff3368a3", + "testWORD_missing_text.docx": "3814332884a090b6d1020bff58d0531486710c45", + "testComment.pdf": "60e181061a00454c2e622bd37a9878234c13231d", + "testPDF_no_extract_no_accessibility_owner_empty.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "test_embedded_package.rtf": "cd90adb3f777e68aa0288fd23e8f4fbce260a763", + "testPDF_bom.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "testOptionalHyphen.ppt": "7e016e42860bd408054bb8653fef39b2756119d9", + "testHTML_utf8.html": "3ba828044754772e4c9df5f9a2213beaa75842ef", + "testPPT_comment.pptx": "25fab588194dabd5902fd2ef880ee9542d036776", + "testRTFWithCurlyBraces.rtf": "019cab63b73ff89d094823cf50c0a721bec08ee2", + "testFooter.ods": "846e1d0415b23fa27631b536b0cf566abbf8fcc1", + "testPPT.ppt": "933ee556884b1d9e28b801daa0d77bbaa4f4be62", + "testEXCEL-formats.xls": "", + "testPPT_masterFooter.pptx": "29bb97006b3608b7db6ff72b94d20157878d94dd", + "testWORD_header_hyperlink.doc": "914bbec0730c54948ad307ea3e375ef0c100abf1", + "testRTFHyperlink.rtf": "2b2ffb1997aa495fbab1af490d134051de168c97", + "testExtraSpaces.pdf": "b5575400309b01c1050a927d8d1ecf8761062abc", + "testRTFWindowsCodepage1250.rtf": "7ba418843f401634f97d21c844c2c4093b7194fb", + "testRTFTableCellSeparation2.rtf": "62782ca40ff0ed6c3ba90f8055ee724b44af203f", + "testPagesHeadersFootersRomanLower.pages": "2410fc803907001eb39c201ad4184b243e271c6d", + "headerPic.docx": "c704bb648feac7975dff1024a5f762325be7cbc2", + "testHTMLNoisyMetaEncoding_4.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testRTFBoldItalic.rtf": "0475d224078682cf3f9f3f4cbc14a63456c5a0d8", + "test-outlook.msg": "1f202fc11a873e305d5b4d4607409f3f734065ec", + "testRTFVarious.rtf": "bf6ea9cf57886e680c5e6743a66a12b950a09083", + "testXHTML.html": "c6da900f81c1c550518e65d579d3dd62dd7c5c0c", + "EmbeddedPDF.docx": "454476bdf4a968189a6f53e75c146382bf58a434", + "testXML.xml": "e1615e9b31be58f7af9ad963e5a112efa5cdaffa", + "testWORD_no_format.docx": "9a3f5d8a4c8c0f077cc615bcfc554dc87d5926aa", + "testPPT_masterText.ppt": "f5ff5e2d45ccb180cf371ed99b7dfeb2a93539b3", + "testPDF_PDFEncodedStringInXMP.pdf": "78fd59d394f72d28a9908739fa562099978dafa1", + "testPPT_custom_props.pptx": "72152d28afbc23a50cc71fa37d1dce9ef03ca72d", + "testRTFListOverride.rtf": "f8c61d8a66afdaa07f3740e859497818bfc2ca01", + "testEXCEL_1img.xls": "", + "testWORD_1img.doc": "0826d299a7770e93603f5667d89dccb7b74d904c", + "testNPEOpenDocument.odt": "4210b973c80084c58463ec637fa43e911f77d6fe", + "testRTFWord2010CzechCharacters.rtf": "9443011aac32434240ab8dbff360c970fc1c7074", + "testPDF_Version.8.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPPT.ppsx": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "testPPT_autodate.pptx": "50467dbb37d1c74b8b37fe93eddf6f9e87d21bf3", + "testWordArt.pptx": "3566bbee790704b3654fe78319957f9e0cddb6d9", + "NullHeader.docx": "18430c968ba29173b52610efdaa723424b3c4d79", + "testRTFWordPadCzechCharacters.rtf": "5dbb58452a3507c384008662f8fce90063f12189", + "resume.html": "fbfb9d8264f6eebd79847fe7a7f1b81edd4a027d", + "testPagesLayout.pages": "5db1ab91c93e6183d0af8513f62c7b87964704af", + "testOptionalHyphen.pptx": "c2977eefe7d2cad8c671f550d7883185ec65591b", + "testWORD_numbered_list.docx": "07194c58165993468e66bc4eba4f5bd89d5bee09", + "testEXCEL_1img.xlsx": "", + "testPDFTripleLangTitle.pdf": "6eb693dac68fece3bf3cd1aa9880ea9b23fc927c", + "protect.xlsx": "ee08eeaf05c35c960243f831c3a974d9ee07aa28", + "testWORD_bold_character_runs2.docx": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "testXLSX_Thumbnail.xlsx": "020bf155ae157661c11727c54e6694cf9cd2c0d3", + "testWORD_embedded_pdf.docx": "d8adb797aaaac92afd8dd9b499bd197347f15688", + "testOptionalHyphen.rtf": "2f77b61bab5b4502b4ddd5018b454be157091d07", + "testEXCEL-charts.xls": "", + "testWORD_override_list_numbering.doc": "60e47a3e71ba08af20af96131d61740a1f0bafa3", + "testPDF_twoAuthors.pdf": "c5f0296cc21f9ae99ceb649b561c55f99d7d9452", + "testPDF_Version.10.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testHTMLNoisyMetaEncoding_2.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testFooter.odt": "cd5d0fcbcf48d6f005d087c47d00e84f39bcc321", + "testPPT.pptm": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "testPPT_various.ppt": "399e27a9893284f106dc44f15b5e636454db681e", + "testRTFListMicrosoftWord.rtf": "0303eb3e2f30530621a7a407847b759a3b21467e", + "testWORD_bold_character_runs2.doc": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "boilerplate-whitespace.html": "a9372bc75d7d84cbcbb0bce68fcaed73ad8ef52c", + "testEXCEL_95.xls": "20d9b9b0f3aecd28607516b4b837c8bab3524b6c", + "testPPT_embedded_two_slides.pptx": "", + "testPDF_bookmarks.pdf": "5fc486c443511452db4f1aa6530714c6aa49c831", + "test_recursive_embedded.docx": "afc32b07ce07ad273e5b3d1a43390a9d2b6dd0a9", + "testEXCEL-formats.xlsx": "", + "testPPT_masterText2.pptx": "2b01eab5d0349e3cfe791b28c70c2dbf4efc884d", + "test.doc": "774be3106edbb6d80be36dbb548d62401dcfa0fe", + "test_recursive_embedded_npe.docx": "afc32b07ce07ad273e5b3d1a43390a9d2b6dd0a9", + "testPPT_embedded2.ppt": "80e106b3fc68107e7f9579cff04e3b15bdfc557a", + "testWORD_custom_props.docx": "e7a737a5237a6aa9c6b3fc677eb8fa65c30d6dfe", + "testPDF_Version.4.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testBinControlWord.rtf": "ef858fbb7584ea7f92ffed8d0a08c1cc35ffee07", + "testWORD_null_style.docx": "0be9dcfb83423c78a06af514ec21e4e7770ec48e", + "test-outlook2003.msg": "bb3c35eb7e95d657d7977c1d3d52862734f9f329", + "testPDFVarious.pdf": "c66bbbacb10dd27430f7d0bed9518e75793cedae", + "testHTMLNoisyMetaEncoding_3.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testRTFCorruptListOverride.rtf": "116a782d02a7f25010a15cbbb189bf98e6b89855", + "testEXCEL_custom_props.xls": "b5584d9b13ab1566ce539238dc75e7eb3449ba7f", + "testPDF_Version.7.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPDFEmbeddingAndEmbedded.docx": "e7b648adb15cd16cdd84437c2b9524a8eeb213e4", + "testHTMLNoisyMetaEncoding_1.html": "630e14e3495a78580c4e26fa3bbe3123ccf4fd8a", + "testWORD_3imgs.doc": "818aa8c6c44dd78c49100c3c38e95abdf3812981", + "testRTFEmbeddedLink.rtf": "2720ffb5ff3a6bbb2c5c1cb43fb4922362ed788a", + "testKeynote.key": "11387b59fc6339bb73653fcbb26d387521b98ec9", + "testPDF.pdf": "5a377554685367764eaf73d093408ace323fcec7", + "protectedSheets.xlsx": "", + "testWORD.doc": "cdd41377e699287cbbe17fbb1498cfe5814dde23", + "testComment.xlsx": "d4be580bb97c1c90be379281179c7932b37a18c0", + "testPDFPackage.pdf": "75d6fa216b4e2880a65ced55d17ca2b599d2606c", + "testWORD_embeded.doc": "", + "testHTML.html": "6548b16c5ea33e907577615ce60ca4876a3936ef", + "testEXCEL_5.xls": "a174f098333c659d331317641d4d1d9d83055288", + "pictures.ppt": "95bbfdbf2f60f74371285c337d3445d0acd59a9b", + "testPPT_masterText2.ppt": "f5ff5e2d45ccb180cf371ed99b7dfeb2a93539b3", + "testPDF-custommetadata.pdf": "a84b914655db55574e6002b6f37209ecd4c3d462", + "testWORD_embeded.docx": "", + "testStyles.odt": "c25dd05633e3aab7132d2f5608126e2b4b03848f", + "testPDF_multiFormatEmbFiles.pdf": "2103b2c30b44d5bb3aa790ab04a6741a10ea235a", + "testXML2.xml": "a8c85a327716fad93faa4eb0f993057597d6f471", + "testPagesComments.pages": "cbb45131cf45b9c454e754a07af3ae927b1a69cc", + "testEXCEL_4.xls": "8d5e6156222151faaccb079d46ddb5393dd25771", + "testWORD_no_format.doc": "88feaf03fe58ee5cc667916c6a54cbd5d605cc1c", + "testPages.pages": "288e6db2f39604e372a2095257509c78dba22cbb", + "footnotes.docx": "33b01b73a12f9e14efbcc340890b11ee332dca8e", + "testWORD_bold_character_runs.doc": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "testWORD_custom_props.doc": "e7a737a5237a6aa9c6b3fc677eb8fa65c30d6dfe", + "testPDF_Version.11.x.PDFA-1b.pdf": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "testAnnotations.pdf": "5f599e7916198540e1b52c3e472a525f50fd45f6", + "tika434.html": "7d74122631f52f003a48018cc376026ccd8d984e", + "testPagesHeadersFootersAlphaLower.pages": "fc1d766908134ff4689fa63fa3e91c3e9b08d975", + "testRTFRegularImages.rtf": "756b1db45cb05357ceaf9c8efcf0b76e3913e190", + "testRTFUmlautSpaces2.rtf": "1fcd029357062241d74d789e93477c101ff24e3f", + "testWORD_numbered_list.doc": "e06656dd9b79ac970f3cd065fa8b630a4981556f", + "testPPT_autodate.ppt": "05b93967ea0248ad263b2f24586e125df353fd3d", + "testBulletPoints.key": "92242d67c3dbc1b22aac3f98e47061d09e7719f9", + "testMasterSlideTable.key": "1d61e2fa3c3f3615500c7f72f62971391b9e9a2f", + "testWORD_various.doc": "8cbdf1a4e0d78471eb90403612c4e92866acf0cb", + "testEXCEL_textbox.xlsx": "1e81121e91e58a74d838e414ae0fc0055a4b4100", + "big-preamble.html": "a9d759b46b6c6c1857d0d89c3a75ee2f3ace70c9", + "testWORD.docx": "f72140bef19475e950e56084d1ab1cb926697b19", + "testComment.rtf": "f6351d0f1f20c4ee0fff70adca6abbc6e638610e", + "testRTFUnicodeUCNControlWordCharacterDoubling.rtf": "3e6f2f38682e38ffc96a476ca51bec2291a27fa7", + "testPDF_Version.5.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPPTX_Thumbnail.pptx": "6aa019154289317c7b7832fe46556e6d61cd0a9f", + "testRTFTableCellSeparation.rtf": "5647290a3197c1855fad10201dc7be60ea7b0e42", + "testRTFControls.rtf": "aee6afb80e8b09cf49f056020c037f70c2757e49", + "testEXCEL.xls": "", + "testRTFJapanese.rtf": "08976f9a7d6d3a155cad84d7fa23295cb972a17a", + "testPageNumber.pdf": "96b03d2cc6782eba653af28228045964e68422b5", + "testOptionalHyphen.pdf": "12edd450ea76ea4e79f80ebd3442999ec2180dbc", + "testPDFFileEmbInAnnotation.pdf": "97a6e5781bbaa6aea040546d797c4916f9d90c86", + "testFontAfterBufferedText.rtf": "d1c8757b3ed91f2d7795234405c43005868affa3", + "testPPT_masterFooter.ppt": "8c9104385820c2631ddda20814231808fac03d4d", + "testWORD_various.docx": "189df989e80afb09281901aefc458c6630a8530b", + "testComment.ppt": "21842dd9cb8a7d4af0f102543c192861c9789705", + "testPopupAnnotation.pdf": "1717b1d16c0a4b9ff5790cac90fc8e0fba170a35", + "testWORD_bold_character_runs.docx": "f10e562d8825ec2e17e0d9f58646f8084a658cfa", + "testOverlappingText.pdf": "726da7d6c184512ed8d44af2a5085d65523c4572", + "testRTF.rtf": "91e830ceba556741116c9e83b0c69a0d6c5c9304", + "testRTFIgnoredControlWord.rtf": "1eb6a2f2fd32b1bb4227c0c02a35cb6027d9ec8c", + "testComment.xls": "4de962f16452159ce302fc4a412b06a06cf9a0f6", + "testPPT.ppsm": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "boilerplate.html": "b3558f02c3179e4aeeb6057594d87bda79964e7b", + "testEXCEL_embeded.xls": "", + "testEXCEL.xlsx": "", + "testPPT_2imgs.ppt": "9a68072ffcf171389e78cf8bc018c4b568a6202d", + "testComment.pptx": "6ae6052f469b8f901fd4fd8bc70f8e267255a58e", + "testPDF_Version.6.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testPPT.pptx": "71333ef84f7825d8ad6aba2ba993d04b4bab41c6", + "testPPT_custom_props.ppt": "edf196acc12701accc7be5dfe63e053436db45e6", + "testPPT_embeded.pptx": "", + "testRTFListLibreOffice.rtf": "4c38d9e2f0a8c9a4c2cc8d2a52db9591ab759abe", + "testPDF_Version.9.x.pdf": "03b60dfc8c103dbabeedfd682e979f96dd8983a2", + "testRTFHexEscapeInsideWord.rtf": "6cffda07e774c55b5465d8134a0bdcb8c30f3386", + "testRTFNewlines.rtf": "2375ca14e2b0d8f7ff6bbda5191544b3ee7c09fb", + "testRTF-ms932.rtf": "5f9db1b83bf8e9c4c6abb065adaeb151307d33f2", + "test_TIKA-1251.doc": "5a9394c34274964055fdd9272b4f7dc314b99ecf", + "test_list_override.rtf": "9fe8b4a36c5222fe7ed2e9b54e2330aec8fa9423" +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java index 273b69e483e8c..2f353f2a53329 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java @@ -21,6 +21,7 @@ import org.opensearch.plugin.insights.settings.QueryInsightsSettings; import org.opensearch.plugins.ActionPlugin; import org.opensearch.rest.RestHandler; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.ScalingExecutorBuilder; @@ -50,8 +51,7 @@ public void setup() { clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - clusterService = new ClusterService(settings, clusterSettings, threadPool); - + clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool); } public void testGetSettings() { diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java index f340950017a5c..328ed0cd2ed15 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java @@ -22,6 +22,7 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -57,7 +58,7 @@ public void setup() { clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - clusterService = new ClusterService(settings, clusterSettings, null); + clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService); } diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java index a5f36b6e8cce0..d05cf7b6a636f 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java @@ -17,6 +17,7 @@ import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -33,7 +34,7 @@ public class TransportTopQueriesActionTests extends OpenSearchTestCase { private final Settings.Builder settingsBuilder = Settings.builder(); private final Settings settings = settingsBuilder.build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - private final ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); + private final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool); private final TransportService transportService = mock(TransportService.class); private final QueryInsightsService topQueriesByLatencyService = mock(QueryInsightsService.class); private final ActionFilters actionFilters = mock(ActionFilters.class); diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b13a709f1c449..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c4ca8f15e85c5 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5caf947d87a1b..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f4f0c0dd54c578af2c613a0db7172bf7dca9c79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..9f6e95ba38d2e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +4d54c8d5b95b14756043efb59b8c3e62ec67aa43 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 deleted file mode 100644 index e0f52ab04ea84..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a77224107f586a7f9e3dc5d12fc0d4d8f0c04803 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..f31396d94c2ec --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b7fb401dd47c79e6b99f2319ac3b561c50c31c30 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b42cdc2835eb0..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..18d122acd2c44 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index e019a878dfcf0..d97ff72cf2ebb 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.11.3' - api 'com.google.code.gson:gson:2.10.1' + api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.7.0' diff --git a/plugins/repository-hdfs/licenses/gson-2.10.1.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.10.1.jar.sha1 deleted file mode 100644 index 9810309d1013a..0000000000000 --- a/plugins/repository-hdfs/licenses/gson-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3add478d4382b78ea20b1671390a858002feb6c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 new file mode 100644 index 0000000000000..0414a49526895 --- /dev/null +++ b/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 @@ -0,0 +1 @@ +527175ca6d81050b53bdd4c457a6d6e017626b0e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 deleted file mode 100644 index a874755cc29da..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ba1acc8ff088334f2ac5556663f8b737eb8b571 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..8f8d86e6065b2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +db3f4d3ad3d16e26991a64d50b749ae09e0e0c8e \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 560d12d14395d..00decbe4fa9cd 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -577,11 +577,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', ) } diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 deleted file mode 100644 index 83fc39246ef0a..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7307c8acbc9b331fce3496750a5112bdc726fd2a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..408f3aa5d1339 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3ca1cff0bf82bfd38e89f6946e54f24cbb3424a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index acf0c5e83a17b..b489a3cc85037 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -78,7 +78,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; @@ -143,9 +143,9 @@ public boolean blobExists(String blobName) { @ExperimentalApi @Override - public FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { S3RetryingInputStream s3RetryingInputStream = new S3RetryingInputStream(blobStore, buildKey(blobName)); - return new FetchBlobResult(s3RetryingInputStream, s3RetryingInputStream.getMetadata()); + return new InputStreamWithMetadata(s3RetryingInputStream, s3RetryingInputStream.getMetadata()); } @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index de815f9202f44..f688be9216b8f 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -244,6 +244,11 @@ public Map> extendedStats() { return extendedStats; } + @Override + public boolean isBlobMetadataEnabled() { + return true; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 8c0ee8ba718ac..ee557aa0efc79 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -173,11 +173,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 7d7eb330b4a55..1a94def3fdff1 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -249,11 +249,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b13a709f1c449..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c4ca8f15e85c5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b42cdc2835eb0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..18d122acd2c44 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index 4be049c9a9109..a1122f279c7e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -52,6 +52,7 @@ import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.concurrent.ExecutionException; @@ -60,6 +61,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase { @Override @@ -139,6 +141,7 @@ public void testCreateCloneIndex() { } public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { + asyncUploadMockFsRepo = false; Version version = VersionUtils.randomIndexCompatibleVersion(random()); int numPrimaryShards = 1; prepareCreate("source").setSettings( diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java index 282eb9c6ad95e..cd19a0ee1ff77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java @@ -48,7 +48,9 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; +import org.junit.Before; import java.util.Arrays; import java.util.Map; @@ -61,12 +63,18 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteShrinkIndexIT extends RemoteStoreBaseIntegTestCase { @Override protected boolean forbidPrivateIndexSettings() { return false; } + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) @@ -84,6 +92,7 @@ public void testCreateShrinkIndexToN() { int[] shardSplits = randomFrom(possibleShardSplits); assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); for (int i = 0; i < 20; i++) { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java index dd4252d24f314..dc3c8793a93f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -67,6 +67,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -86,6 +87,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index 61b34af5be3ba..42120aa32eb47 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -18,6 +18,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -40,6 +41,11 @@ public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { private static String INDEX_NAME = "test-index"; + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index ae2295cb874f5..766ca2c1189e5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -1077,24 +1077,6 @@ public void testDynamicStalenessThresholdUpdate() throws Exception { // staleness threshold dynamic updates should throw exceptions on invalid input public void testInvalidStalenessThresholdUpdateThrowsException() throws Exception { - int cacheCleanIntervalInMillis = 1; - String node = internalCluster().startNode( - Settings.builder() - .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90) - .put( - IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, - TimeValue.timeValueMillis(cacheCleanIntervalInMillis) - ) - ); - Client client = client(node); - String index1 = "index1"; - setupIndex(client, index1); - - // create first cache entry in index1 - createCacheEntry(client, index1, "hello"); - assertCacheState(client, index1, 0, 1); - assertTrue(getRequestCacheStats(client, index1).getMemorySizeInBytes() > 0); - // Update indices.requests.cache.cleanup.staleness_threshold to "10%" with illegal argument assertThrows("Ratio should be in [0-1.0]", IllegalArgumentException.class, () -> { ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); @@ -1103,15 +1085,6 @@ public void testInvalidStalenessThresholdUpdateThrowsException() throws Exceptio ); client().admin().cluster().updateSettings(updateSettingsRequest).actionGet(); }); - - // everything else should continue to work fine later on. - // force refresh so that it creates 1 stale key - flushAndRefresh(index1); - // sleep until cache cleaner would have cleaned up the stale key from index 2 - assertBusy(() -> { - // cache cleaner should NOT have cleaned from index 1 - assertEquals(0, getRequestCacheStats(client, index1).getMemorySizeInBytes()); - }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); } // closing the Index after caching will clean up from Indices Request Cache diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index 45679598dc551..53f4ef3fe281f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -512,5 +512,6 @@ private void assertCustomIndexMetadata(String index) { logger.info("---> Asserting custom index metadata"); IndexMetadata iMd = internalCluster().client().admin().cluster().prepareState().get().getState().metadata().index(index); assertNotNull(iMd.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + assertNotNull(iMd.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY).get(IndexMetadata.TRANSLOG_METADATA_KEY)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index d29dacb001434..280fd13f0fdcf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -11,15 +11,18 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; -import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; @@ -39,7 +42,7 @@ public Settings indexSettings(int shards, int replicas) { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } protected void restore(String... indices) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java index e14a4062f7775..6b94e638a6876 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -30,7 +30,6 @@ import org.junit.Before; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; @@ -50,7 +49,7 @@ public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Before diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index d7ad0daa43524..740aee69f7d80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -36,6 +36,9 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.remotestore.translogmetadata.mocks.MockFsMetadataSupportedRepositoryPlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.ReloadableFsRepository; @@ -48,6 +51,7 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -55,6 +59,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -74,6 +79,8 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected Path segmentRepoPath; protected Path translogRepoPath; protected boolean clusterSettingsSuppliedByTest = false; + protected boolean asyncUploadMockFsRepo = randomBoolean(); + private boolean metadataSupportedType = randomBoolean(); private final List documentKeys = List.of( randomAlphaOfLength(5), randomAlphaOfLength(5), @@ -129,6 +136,19 @@ protected Map indexData(int numberOfIterations, boolean invokeFlus return indexingStats; } + @Override + protected Collection> nodePlugins() { + if (!clusterSettingsSuppliedByTest && asyncUploadMockFsRepo) { + if (metadataSupportedType) { + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsMetadataSupportedRepositoryPlugin.class)) + .collect(Collectors.toList()); + } else { + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); + } + } + return super.nodePlugins(); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { if (segmentRepoPath == null || translogRepoPath == null) { @@ -138,10 +158,27 @@ protected Settings nodeSettings(int nodeOrdinal) { if (clusterSettingsSuppliedByTest) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); } else { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) - .build(); + if (asyncUploadMockFsRepo) { + String repoType = metadataSupportedType ? MockFsMetadataSupportedRepositoryPlugin.TYPE_MD : MockFsRepositoryPlugin.TYPE; + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + repoType, + REPOSITORY_2_NAME, + translogRepoPath, + repoType + ) + ) + .build(); + } else { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .build(); + } } } @@ -221,6 +258,8 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFiel @After public void teardown() { clusterSettingsSuppliedByTest = false; + asyncUploadMockFsRepo = randomBoolean(); + metadataSupportedType = randomBoolean(); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 8c8209b80bfd8..b22817ef19d1b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -38,6 +38,7 @@ import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.file.Files; @@ -68,6 +69,11 @@ public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { static final Setting MOCK_SETTING = Setting.simpleString("mock-setting"); static final String[] EXCLUDED_NODES = { "ex-1", "ex-2" }; + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java index 0bcde4b44c734..d957dda1ba04f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java @@ -19,11 +19,12 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -37,7 +38,7 @@ public class RemoteStoreForceMergeIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index ca0ae3ca9a700..7721b18a4fe6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -38,7 +38,6 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; -import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -48,7 +47,6 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -56,6 +54,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -81,7 +80,7 @@ public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, MockFsRepositoryPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Override @@ -797,25 +796,8 @@ public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionExcep // Test local only translog files which are not uploaded to remote store (no metadata present in remote) // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { - clusterSettingsSuppliedByTest = true; - - // Overriding settings to use AsyncMultiStreamBlobContainer - Settings settings = Settings.builder() - .put(super.nodeSettings(1)) - .put( - remoteStoreClusterSettings( - REPOSITORY_NAME, - segmentRepoPath, - MockFsRepositoryPlugin.TYPE, - REPOSITORY_2_NAME, - translogRepoPath, - MockFsRepositoryPlugin.TYPE - ) - ) - .build(); - - internalCluster().startClusterManagerOnlyNode(settings); - String dataNode = internalCluster().startDataOnlyNode(settings); + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNode(); // 1. Create index with 0 replica createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index 65016c4976157..7ae08bf968ade 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -26,7 +26,6 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; -import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index ef2dcf3217df6..b0827dcfe4892 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -25,11 +25,11 @@ import org.opensearch.test.transport.MockTransportService; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; @@ -38,7 +38,7 @@ public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTes @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } public void testSingleNodeClusterRepositoryRegistration() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 4a0af206b9d89..31c73e2fc03ae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -54,7 +54,7 @@ public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } public void setup() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java index 9b30dacfced13..44c02dbb6d611 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java @@ -41,6 +41,7 @@ protected Settings nodeSettings(int nodeOrdinal) { * wherever not required. */ public void testRemoteIndexPathFileCreation() throws ExecutionException, InterruptedException, IOException { + asyncUploadMockFsRepo = false; String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNodes(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java new file mode 100644 index 0000000000000..109a884ff6c5d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.InputStreamWithMetadata; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.remotestore.multipart.mocks.MockFsAsyncBlobContainer; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +public class MockFsMetadataSupportedBlobContainer extends MockFsAsyncBlobContainer { + + private static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; + + public MockFsMetadataSupportedBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { + super(blobStore, blobPath, path, triggerDataIntegrityFailure); + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException { + // If the upload writeContext have a non-null metadata, we store the metadata content as translog.ckp file. + if (writeContext.getMetadata() != null) { + String base64String = writeContext.getMetadata().get(CHECKPOINT_FILE_DATA_KEY); + byte[] decodedBytes = Base64.getDecoder().decode(base64String); + ByteArrayInputStream inputStream = new ByteArrayInputStream(decodedBytes); + int length = decodedBytes.length; + String ckpFileName = getCheckpointFileName(writeContext.getFileName()); + writeBlob(ckpFileName, inputStream, length, true); + } + super.asyncBlobUpload(writeContext, completionListener); + } + + // This is utility to get the translog.ckp file name for a given translog.tlog file. + private String getCheckpointFileName(String translogFileName) { + if (!translogFileName.endsWith(".tlog")) { + throw new IllegalArgumentException("Invalid translog file name format: " + translogFileName); + } + + int dotIndex = translogFileName.lastIndexOf('.'); + String baseName = translogFileName.substring(0, dotIndex); + return baseName + ".ckp"; + } + + public static String convertToBase64(InputStream inputStream) throws IOException { + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + byte[] buffer = new byte[128]; + int bytesRead; + int totalBytesRead = 0; + + while ((bytesRead = inputStream.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + totalBytesRead += bytesRead; + if (totalBytesRead > 1024) { + // We enforce a limit of 1KB on the size of the checkpoint file. + throw new AssertionError("Input stream exceeds 1KB limit"); + } + } + + byte[] bytes = byteArrayOutputStream.toByteArray(); + return Base64.getEncoder().encodeToString(bytes); + } + } + + // during readBlobWithMetadata call we separately download translog.ckp file and return it as metadata. + @Override + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { + String ckpFileName = getCheckpointFileName(blobName); + InputStream inputStream = readBlob(blobName); + try (InputStream ckpInputStream = readBlob(ckpFileName)) { + String ckpString = convertToBase64(ckpInputStream); + Map metadata = new HashMap<>(); + metadata.put(CHECKPOINT_FILE_DATA_KEY, ckpString); + return new InputStreamWithMetadata(inputStream, metadata); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java new file mode 100644 index 0000000000000..89dd91c8222ac --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class MockFsMetadataSupportedBlobStore extends FsBlobStore { + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedBlobStore(int bufferSizeInBytes, Path path, boolean readonly, boolean triggerDataIntegrityFailure) + throws IOException { + super(bufferSizeInBytes, path, readonly); + this.triggerDataIntegrityFailure = triggerDataIntegrityFailure; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new MockFsMetadataSupportedBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + + // Make MockFs metadata supported + @Override + public boolean isBlobMetadataEnabled() { + return true; + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java new file mode 100644 index 0000000000000..333fba413ce4e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.fs.FsRepository; + +public class MockFsMetadataSupportedRepository extends FsRepository { + + public static Setting TRIGGER_DATA_INTEGRITY_FAILURE = Setting.boolSetting( + "mock_fs_repository.trigger_data_integrity_failure", + false + ); + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + triggerDataIntegrityFailure = TRIGGER_DATA_INTEGRITY_FAILURE.get(metadata.settings()); + } + + @Override + protected BlobStore createBlobStore() throws Exception { + FsBlobStore fsBlobStore = (FsBlobStore) super.createBlobStore(); + return new MockFsMetadataSupportedBlobStore( + fsBlobStore.bufferSizeInBytes(), + fsBlobStore.path(), + isReadOnly(), + triggerDataIntegrityFailure + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java new file mode 100644 index 0000000000000..71ae652a6b23d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.RepositoryPlugin; +import org.opensearch.repositories.Repository; + +import java.util.Collections; +import java.util.Map; + +public class MockFsMetadataSupportedRepositoryPlugin extends Plugin implements RepositoryPlugin { + + public static final String TYPE_MD = "fs_metadata_supported_repository"; + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + "fs_metadata_supported_repository", + metadata -> new MockFsMetadataSupportedRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java b/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java new file mode 100644 index 0000000000000..d48f82a388245 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ClusterManagerMetrics.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.Objects; +import java.util.Optional; + +/** + * Class containing metrics (counters/latency) specific to ClusterManager. + * + * @opensearch.internal + */ +public final class ClusterManagerMetrics { + + private static final String LATENCY_METRIC_UNIT_MS = "ms"; + + public final Histogram clusterStateAppliersHistogram; + public final Histogram clusterStateListenersHistogram; + public final Histogram rerouteHistogram; + public final Histogram clusterStateComputeHistogram; + public final Histogram clusterStatePublishHistogram; + + public ClusterManagerMetrics(MetricsRegistry metricsRegistry) { + clusterStateAppliersHistogram = metricsRegistry.createHistogram( + "cluster.state.appliers.latency", + "Histogram for tracking the latency of cluster state appliers", + LATENCY_METRIC_UNIT_MS + ); + clusterStateListenersHistogram = metricsRegistry.createHistogram( + "cluster.state.listeners.latency", + "Histogram for tracking the latency of cluster state listeners", + LATENCY_METRIC_UNIT_MS + ); + rerouteHistogram = metricsRegistry.createHistogram( + "allocation.reroute.latency", + "Histogram for recording latency of shard re-routing", + LATENCY_METRIC_UNIT_MS + ); + clusterStateComputeHistogram = metricsRegistry.createHistogram( + "cluster.state.new.compute.latency", + "Histogram for recording time taken to compute new cluster state", + LATENCY_METRIC_UNIT_MS + ); + clusterStatePublishHistogram = metricsRegistry.createHistogram( + "cluster.state.publish.success.latency", + "Histogram for recording time taken to publish a new cluster state", + LATENCY_METRIC_UNIT_MS + ); + } + + public void recordLatency(Histogram histogram, Double value) { + histogram.record(value); + } + + public void recordLatency(Histogram histogram, Double value, Optional tags) { + if (Objects.isNull(tags) || tags.isEmpty()) { + histogram.record(value); + return; + } + histogram.record(value, tags.get()); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index aa9101090b6d5..f56c906db1002 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -146,7 +146,8 @@ public ClusterModule( List clusterPlugins, ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService, - ThreadContext threadContext + ThreadContext threadContext, + ClusterManagerMetrics clusterManagerMetrics ) { this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); @@ -159,7 +160,8 @@ public ClusterModule( shardsAllocator, clusterInfoService, snapshotsInfoService, - settings + settings, + clusterManagerMetrics ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 80b78cfe154f1..8d2851c2e0561 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -636,6 +636,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { static final String KEY_SYSTEM = "system"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String REMOTE_STORE_CUSTOM_KEY = "remote_store"; + public static final String TRANSLOG_METADATA_KEY = "translog_metadata"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 121f8d935cf48..16edec112f123 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -89,10 +89,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.remote.RemoteStoreCustomMetadataResolver; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.remote.RemoteStorePathStrategyResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -104,6 +104,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -177,7 +178,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathStrategyResolver remoteStorePathStrategyResolver; + private final RemoteStoreCustomMetadataResolver remoteStoreCustomMetadataResolver; public MetadataCreateIndexService( final Settings settings, @@ -193,7 +194,8 @@ public MetadataCreateIndexService( final SystemIndices systemIndices, final boolean forbidPrivateIndexSettings, final AwarenessReplicaBalance awarenessReplicaBalance, - final RemoteStoreSettings remoteStoreSettings + final RemoteStoreSettings remoteStoreSettings, + final Supplier repositoriesServiceSupplier ) { this.settings = settings; this.clusterService = clusterService; @@ -212,8 +214,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); Supplier minNodeVersionSupplier = () -> clusterService.state().nodes().getMinNodeVersion(); - remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathStrategyResolver(remoteStoreSettings, minNodeVersionSupplier) + remoteStoreCustomMetadataResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStoreCustomMetadataResolver(remoteStoreSettings, minNodeVersionSupplier, repositoriesServiceSupplier, settings) : null; } @@ -562,7 +564,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - addRemoteStorePathStrategyInCustomData(tmpImdBuilder, true); + addRemoteStoreCustomMetadata(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -572,13 +574,13 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( } /** - * Adds the remote store path type information in custom data of index metadata. + * Adds the 1) remote store path type 2) ckp as translog metadata information in custom data of index metadata. * * @param tmpImdBuilder index metadata builder. * @param assertNullOldType flag to verify that the old remote store path type is null */ - public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { - if (remoteStorePathStrategyResolver == null) { + public void addRemoteStoreCustomMetadata(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStoreCustomMetadataResolver == null) { return; } // It is possible that remote custom data exists already. In such cases, we need to only update the path type @@ -586,14 +588,21 @@ public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdB Map existingCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); assert assertNullOldType == false || Objects.isNull(existingCustomData); - // Determine the path type for use using the remoteStorePathResolver. - RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); Map remoteCustomData = new HashMap<>(); + + // Determine if the ckp would be stored as translog metadata + boolean isTranslogMetadataEnabled = remoteStoreCustomMetadataResolver.isTranslogMetadataEnabled(); + remoteCustomData.put(IndexMetadata.TRANSLOG_METADATA_KEY, Boolean.toString(isTranslogMetadataEnabled)); + + // Determine the path type for use using the remoteStorePathResolver. + RemoteStorePathStrategy newPathStrategy = remoteStoreCustomMetadataResolver.getPathStrategy(); remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); if (Objects.nonNull(newPathStrategy.getHashAlgorithm())) { remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); } - logger.trace(() -> new ParameterizedMessage("Added newStrategy={}, replaced oldStrategy={}", remoteCustomData, existingCustomData)); + logger.trace( + () -> new ParameterizedMessage("Added newCustomData={}, replaced oldCustomData={}", remoteCustomData, existingCustomData) + ); tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 71e562253bf58..3864e282a310b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.RestoreInProgress; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -56,10 +57,12 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.gateway.PriorityComparator; import org.opensearch.gateway.ShardsBatchGatewayAllocator; import org.opensearch.snapshots.SnapshotsInfoService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import java.util.ArrayList; import java.util.Collections; @@ -96,6 +99,7 @@ public class AllocationService { private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; private SnapshotsInfoService snapshotsInfoService; + private final ClusterManagerMetrics clusterManagerMetrics; // only for tests that use the GatewayAllocator as the unique ExistingShardsAllocator public AllocationService( @@ -105,7 +109,13 @@ public AllocationService( ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService ) { - this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService); + this( + allocationDeciders, + shardsAllocator, + clusterInfoService, + snapshotsInfoService, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ); setExistingShardsAllocators(Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator)); } @@ -113,9 +123,10 @@ public AllocationService( AllocationDeciders allocationDeciders, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService, - SnapshotsInfoService snapshotsInfoService + SnapshotsInfoService snapshotsInfoService, + ClusterManagerMetrics clusterManagerMetrics ) { - this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService, Settings.EMPTY); + this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService, Settings.EMPTY, clusterManagerMetrics); } public AllocationService( @@ -123,14 +134,15 @@ public AllocationService( ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService, - Settings settings - + Settings settings, + ClusterManagerMetrics clusterManagerMetrics ) { this.allocationDeciders = allocationDeciders; this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; this.snapshotsInfoService = snapshotsInfoService; this.settings = settings; + this.clusterManagerMetrics = clusterManagerMetrics; } /** @@ -550,11 +562,15 @@ private void reroute(RoutingAllocation allocation) { assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; assert assertInitialized(); - + long rerouteStartTimeNS = System.nanoTime(); removeDelayMarkers(allocation); allocateExistingUnassignedShards(allocation); // try to allocate existing shard copies first shardsAllocator.allocate(allocation); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.rerouteHistogram, + (double) Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - rerouteStartTimeNS)) + ); assert RoutingNodes.assertShardStats(allocation.routingNodes()); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index ddcccd597e894..2431f57a6a1f9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -176,7 +176,7 @@ public Metadata applyChanges(Metadata oldMetadata, RoutingTable newRoutingTable, oldMetadata.settings(), logger ); - migrationImdUpdater.maybeUpdateRemoteStorePathStrategy(indexMetadataBuilder, index.getName()); + migrationImdUpdater.maybeUpdateRemoteStoreCustomMetadata(indexMetadataBuilder, index.getName()); migrationImdUpdater.maybeAddRemoteIndexSettings(indexMetadataBuilder, index.getName()); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 26a04de31ce39..61e7aaed5ecff 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -44,7 +44,6 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import java.util.Locale; import java.util.function.BiFunction; import static org.opensearch.cluster.routing.allocation.decider.Decision.THROTTLE; @@ -211,20 +210,9 @@ private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNo allocation, currentInRecoveries, replicasInitialRecoveries, - (x, y) -> getInitialPrimaryNodeOutgoingRecoveries(x, y), + this::getInitialPrimaryNodeOutgoingRecoveries, replicasInitialRecoveries, - String.format( - Locale.ROOT, - "[%s=%d]", - CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), - replicasInitialRecoveries - ), - String.format( - Locale.ROOT, - "[%s=%d]", - CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), - replicasInitialRecoveries - ) + true ); } @@ -238,22 +226,9 @@ private Decision allocateNonInitialShardCopies(ShardRouting shardRouting, Routin allocation, currentInRecoveries, concurrentIncomingRecoveries, - (x, y) -> getPrimaryNodeOutgoingRecoveries(x, y), + this::getPrimaryNodeOutgoingRecoveries, concurrentOutgoingRecoveries, - String.format( - Locale.ROOT, - "[%s=%d] (can also be set via [%s])", - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), - concurrentIncomingRecoveries, - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() - ), - String.format( - Locale.ROOT, - "[%s=%d] (can also be set via [%s])", - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), - concurrentOutgoingRecoveries, - CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() - ) + false ); } @@ -274,18 +249,30 @@ private Decision allocateShardCopies( int inRecoveriesLimit, BiFunction primaryNodeOutRecoveriesFunc, int outRecoveriesLimit, - String incomingRecoveriesSettingMsg, - String outGoingRecoveriesSettingMsg + boolean isInitialShardCopies ) { // Allocating a shard to this node will increase the incoming recoveries if (currentInRecoveries >= inRecoveriesLimit) { - return allocation.decision( - THROTTLE, - NAME, - "reached the limit of incoming shard recoveries [%d], cluster setting %s", - currentInRecoveries, - incomingRecoveriesSettingMsg - ); + if (isInitialShardCopies) { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of incoming shard recoveries [%d], cluster setting [%s=%d]", + currentInRecoveries, + CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), + inRecoveriesLimit + ); + } else { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of incoming shard recoveries [%d], cluster setting [%s=%d] (can also be set via [%s])", + currentInRecoveries, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), + inRecoveriesLimit, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() + ); + } } else { // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); @@ -294,14 +281,30 @@ private Decision allocateShardCopies( } int primaryNodeOutRecoveries = primaryNodeOutRecoveriesFunc.apply(shardRouting, allocation); if (primaryNodeOutRecoveries >= outRecoveriesLimit) { - return allocation.decision( - THROTTLE, - NAME, - "reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " + "cluster setting %s", - primaryNodeOutRecoveries, - primaryShard.currentNodeId(), - outGoingRecoveriesSettingMsg - ); + if (isInitialShardCopies) { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " + + "cluster setting [%s=%d]", + primaryNodeOutRecoveries, + primaryShard.currentNodeId(), + CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), + inRecoveriesLimit + ); + } else { + return allocation.decision( + THROTTLE, + NAME, + "reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " + + "cluster setting [%s=%d] (can also be set via [%s])", + primaryNodeOutRecoveries, + primaryShard.currentNodeId(), + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), + outRecoveriesLimit, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey() + ); + } } else { return allocation.decision( YES, diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index a55721fb13cdc..2ac95178d2ff9 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.ClusterStateListener; @@ -61,6 +62,7 @@ import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -68,6 +70,7 @@ import java.util.Collection; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -120,8 +123,15 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements private final String nodeName; private NodeConnectionsService nodeConnectionsService; - - public ClusterApplierService(String nodeName, Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + private final ClusterManagerMetrics clusterManagerMetrics; + + public ClusterApplierService( + String nodeName, + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { this.clusterSettings = clusterSettings; this.threadPool = threadPool; this.state = new AtomicReference<>(); @@ -132,6 +142,7 @@ public ClusterApplierService(String nodeName, Settings settings, ClusterSettings CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold ); + this.clusterManagerMetrics = clusterManagerMetrics; } private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { @@ -597,7 +608,7 @@ private void callClusterStateAppliers(ClusterChangedEvent clusterChangedEvent, S callClusterStateAppliers(clusterChangedEvent, stopWatch, lowPriorityStateAppliers); } - private static void callClusterStateAppliers( + private void callClusterStateAppliers( ClusterChangedEvent clusterChangedEvent, StopWatch stopWatch, Collection clusterStateAppliers @@ -605,7 +616,13 @@ private static void callClusterStateAppliers( for (ClusterStateApplier applier : clusterStateAppliers) { logger.trace("calling [{}] with change to version [{}]", applier, clusterChangedEvent.state().version()); try (TimingHandle ignored = stopWatch.timing("running applier [" + applier + "]")) { + long applierStartTimeNS = System.nanoTime(); applier.applyClusterState(clusterChangedEvent); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.clusterStateAppliersHistogram, + (double) Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - applierStartTimeNS)), + Optional.of(Tags.create().addTag("Operation", applier.getClass().getSimpleName())) + ); } } } @@ -624,7 +641,13 @@ private void callClusterStateListener( try { logger.trace("calling [{}] with change to version [{}]", listener, clusterChangedEvent.state().version()); try (TimingHandle ignored = stopWatch.timing("notifying listener [" + listener + "]")) { + long listenerStartTimeNS = System.nanoTime(); listener.clusterChanged(clusterChangedEvent); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.clusterStateListenersHistogram, + (double) Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - listenerStartTimeNS)), + Optional.of(Tags.create().addTag("Operation", listener.getClass().getSimpleName())) + ); } } catch (Exception ex) { logger.warn("failed to notify ClusterStateListener", ex); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java index e9224596e048d..eaedb36a59f1e 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.service; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -20,7 +21,12 @@ */ @PublicApi(since = "2.2.0") public class ClusterManagerService extends MasterService { - public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - super(settings, clusterSettings, threadPool); + public ClusterManagerService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { + super(settings, clusterSettings, threadPool, clusterManagerMetrics); } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index aa7766979e851..fa61375e85c25 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.service; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; @@ -91,12 +92,17 @@ public class ClusterService extends AbstractLifecycleComponent { private IndexingPressureService indexingPressureService; - public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public ClusterService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { this( settings, clusterSettings, - new ClusterManagerService(settings, clusterSettings, threadPool), - new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool) + new ClusterManagerService(settings, clusterSettings, threadPool, clusterManagerMetrics), + new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool, clusterManagerMetrics) ); } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index af3e4f8437c43..6436dcfe33003 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.AckedClusterStateTaskListener; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Builder; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -70,6 +71,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.discovery.Discovery; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -79,6 +81,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -135,8 +138,14 @@ public class MasterService extends AbstractLifecycleComponent { protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; private final ClusterManagerThrottlingStats throttlingStats; private final ClusterStateStats stateStats; + private final ClusterManagerMetrics clusterManagerMetrics; - public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public MasterService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); this.slowTaskLoggingThreshold = CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); @@ -154,6 +163,7 @@ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadP ); this.stateStats = new ClusterStateStats(); this.threadPool = threadPool; + this.clusterManagerMetrics = clusterManagerMetrics; } private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { @@ -303,6 +313,12 @@ private void runTasks(TaskInputs taskInputs) { final TimeValue computationTime = getTimeSince(computationStartTime); logExecutionTime(computationTime, "compute cluster state update", summary); + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.clusterStateComputeHistogram, + (double) computationTime.getMillis(), + Optional.of(Tags.create().addTag("Operation", taskInputs.executor.getClass().getSimpleName())) + ); + if (taskOutputs.clusterStateUnchanged()) { final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); taskOutputs.notifySuccessfulTasksOnUnchangedClusterState(); @@ -361,6 +377,7 @@ protected boolean blockingAllowed() { final long durationMillis = getTimeSince(startTimeNanos).millis(); stateStats.stateUpdateTook(durationMillis); stateStats.stateUpdated(); + clusterManagerMetrics.recordLatency(clusterManagerMetrics.clusterStatePublishHistogram, (double) durationMillis); } catch (Exception e) { stateStats.stateUpdateFailed(); onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index 4f5f8d4b1ef5f..a2e4199029ef4 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -80,16 +80,16 @@ public interface BlobContainer { InputStream readBlob(String blobName) throws IOException; /** - * Creates a new {@link FetchBlobResult} for the given blob name. + * Creates a new {@link InputStreamWithMetadata} for the given blob name. * * @param blobName * The name of the blob to get an {@link InputStream} for. - * @return The {@link FetchBlobResult} of the blob. + * @return The {@link InputStreamWithMetadata} of the blob. * @throws NoSuchFileException if the blob does not exist * @throws IOException if the blob can not be read. */ @ExperimentalApi - default FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { + default InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { throw new UnsupportedOperationException("readBlobWithMetadata is not implemented yet"); }; diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index 8ce8ec8e01abe..406ccc6aa4a18 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -71,6 +71,13 @@ default Map> extendedStats() { */ default void reload(RepositoryMetadata repositoryMetadata) {} + /** + * Returns a boolean indicating if blobStore has object metadata support enabled + */ + default boolean isBlobMetadataEnabled() { + return false; + } + /** * Metrics for BlobStore interactions */ diff --git a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java b/server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java similarity index 74% rename from server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java rename to server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java index 55aca771b586c..aa307e260e033 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java +++ b/server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java @@ -10,6 +10,8 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.io.Closeable; +import java.io.IOException; import java.io.InputStream; import java.util.Map; @@ -20,7 +22,7 @@ * @opensearch.experimental */ @ExperimentalApi -public class FetchBlobResult { +public class InputStreamWithMetadata implements Closeable { /** * Downloaded blob InputStream @@ -40,9 +42,15 @@ public Map getMetadata() { return metadata; } - public FetchBlobResult(InputStream inputStream, Map metadata) { + public InputStreamWithMetadata(InputStream inputStream, Map metadata) { this.inputStream = inputStream; this.metadata = metadata; } + @Override + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + } + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index b6738ab4e864f..86a588a67797c 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -738,9 +738,11 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, // Remote Routing table settings RemoteRoutingTableService.REMOTE_ROUTING_TABLE_ENABLED_SETTING diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 255c1c87f0d89..ee995c4649d08 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -34,7 +34,7 @@ protected FeatureFlagSettings( FeatureFlags.IDENTITY_SETTING, FeatureFlags.TELEMETRY_SETTING, FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, - FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING, + FeatureFlags.TIERED_REMOTE_INDEX_SETTING, FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, FeatureFlags.PLUGGABLE_CACHE_SETTING, FeatureFlags.REMOTE_ROUTING_TABLE_EXPERIMENTAL_SETTING diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index c98cd24af3eeb..eeafe736f7dd6 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -56,10 +56,10 @@ public class FeatureFlags { public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; /** - * Gates the functionality of writeable remote index + * Gates the functionality of remote index having the capability to move across different tiers * Once the feature is ready for release, this feature flag can be removed. */ - public static final String WRITEABLE_REMOTE_INDEX = "opensearch.experimental.feature.writeable_remote_index.enabled"; + public static final String TIERED_REMOTE_INDEX = "opensearch.experimental.feature.tiered_remote_index.enabled"; /** * Gates the functionality of pluggable cache. @@ -90,11 +90,7 @@ public class FeatureFlags { Property.NodeScope ); - public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( - WRITEABLE_REMOTE_INDEX, - false, - Property.NodeScope - ); + public static final Setting TIERED_REMOTE_INDEX_SETTING = Setting.boolSetting(TIERED_REMOTE_INDEX, false, Property.NodeScope); public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); @@ -110,7 +106,7 @@ public class FeatureFlags { IDENTITY_SETTING, TELEMETRY_SETTING, DATETIME_FORMATTER_CACHING_SETTING, - WRITEABLE_REMOTE_INDEX_SETTING, + TIERED_REMOTE_INDEX_SETTING, PLUGGABLE_CACHE_SETTING, REMOTE_ROUTING_TABLE_EXPERIMENTAL_SETTING ); diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index d2de78ffac965..3c0797cd450d2 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -117,14 +117,17 @@ public void cleanCaches() { // for tests protected ShardsBatchGatewayAllocator() { + this(DEFAULT_SHARD_BATCH_SIZE); + } + + protected ShardsBatchGatewayAllocator(long batchSize) { this.rerouteService = null; this.batchStartedAction = null; this.primaryShardBatchAllocator = null; this.batchStoreAction = null; this.replicaShardBatchAllocator = null; - this.maxBatchSize = DEFAULT_SHARD_BATCH_SIZE; + this.maxBatchSize = batchSize; } - // for tests @Override @@ -228,13 +231,13 @@ protected Set createAndUpdateBatches(RoutingAllocation allocation, boole batchEntry.getValue().getBatchedShards().forEach(shardId -> currentBatchedShards.put(shardId, batchEntry.getKey())); } - Set newShardsToBatch = Sets.newHashSet(); + Map newShardsToBatch = new HashMap<>(); Set batchedShardsToAssign = Sets.newHashSet(); // add all unassigned shards to the batch if they are not already in a batch unassigned.forEach(shardRouting -> { if ((currentBatchedShards.containsKey(shardRouting.shardId()) == false) && (shardRouting.primary() == primary)) { assert shardRouting.unassigned(); - newShardsToBatch.add(shardRouting); + newShardsToBatch.put(shardRouting.shardId(), shardRouting); } // if shard is already batched update to latest shardRouting information in the batches // Replica shard assignment can be cancelled if we get a better match. These ShardRouting objects also @@ -262,7 +265,7 @@ else if (shardRouting.primary() == primary) { refreshShardBatches(currentBatches, batchedShardsToAssign, primary); - Iterator iterator = newShardsToBatch.iterator(); + Iterator iterator = newShardsToBatch.values().iterator(); assert maxBatchSize > 0 : "Shards batch size must be greater than 0"; long batchSize = maxBatchSize; diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9d8ab6815eecc..2b0a62c18d5a7 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -763,6 +763,7 @@ public static IndexMergePolicy fromString(String text) { private final boolean widenIndexSortType; private final boolean assignedOnRemoteNode; private final RemoteStorePathStrategy remoteStorePathStrategy; + private final boolean isTranslogMetadataEnabled; /** * The maximum age of a retention lease before it is considered expired. @@ -989,6 +990,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); remoteStorePathStrategy = RemoteStoreUtils.determineRemoteStorePathStrategy(indexMetadata); + isTranslogMetadataEnabled = RemoteStoreUtils.determineTranslogMetadataEnabled(indexMetadata); + setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1911,4 +1914,8 @@ public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePo public RemoteStorePathStrategy getRemoteStorePathStrategy() { return remoteStorePathStrategy; } + + public boolean isTranslogMetadataEnabled() { + return isTranslogMetadataEnabled; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java index 761fa20ea64e5..cc51fcd2f18f6 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java @@ -28,7 +28,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStorePathStrategyDuringMigration; +import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStoreCustomMetadataDuringMigration; import static org.opensearch.index.remote.RemoteStoreUtils.getRemoteStoreRepoName; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -118,7 +118,7 @@ private boolean needsRemoteIndexSettingsUpdate( } /** - * Updates the remote store path strategy metadata for the index when it is migrating to remote. + * Updates the remote store custom metadata for the index when it is migrating to remote. * This is run during state change of each shard copy when the cluster is in `MIXED` mode and the direction of migration is `REMOTE_STORE` * Should not interfere with docrep functionality even if the index is in docrep nodes since this metadata * is not used anywhere in the docrep flow @@ -127,20 +127,20 @@ private boolean needsRemoteIndexSettingsUpdate( * @param indexMetadataBuilder Mutated {@link IndexMetadata.Builder} having the previous state updates * @param index index name */ - public void maybeUpdateRemoteStorePathStrategy(IndexMetadata.Builder indexMetadataBuilder, String index) { - if (indexHasRemotePathMetadata(indexMetadata) == false) { - logger.info("Adding remote store path strategy for index [{}] during migration", index); + public void maybeUpdateRemoteStoreCustomMetadata(IndexMetadata.Builder indexMetadataBuilder, String index) { + if (indexHasRemoteCustomMetadata(indexMetadata) == false) { + logger.info("Adding remote store custom data for index [{}] during migration", index); indexMetadataBuilder.putCustom( REMOTE_STORE_CUSTOM_KEY, - determineRemoteStorePathStrategyDuringMigration(clusterSettings, discoveryNodes) + determineRemoteStoreCustomMetadataDuringMigration(clusterSettings, discoveryNodes) ); } else { - logger.debug("Index {} already has remote store path strategy", index); + logger.debug("Index {} already has remote store custom data", index); } } public static boolean indexHasAllRemoteStoreRelatedMetadata(IndexMetadata indexMetadata) { - return indexHasRemoteStoreSettings(indexMetadata.getSettings()) && indexHasRemotePathMetadata(indexMetadata); + return indexHasRemoteStoreSettings(indexMetadata.getSettings()) && indexHasRemoteCustomMetadata(indexMetadata); } /** @@ -167,9 +167,11 @@ public static boolean indexHasRemoteStoreSettings(Settings indexSettings) { * @param indexMetadata Current index metadata * @return true if all above conditions match. false otherwise */ - public static boolean indexHasRemotePathMetadata(IndexMetadata indexMetadata) { + public static boolean indexHasRemoteCustomMetadata(IndexMetadata indexMetadata) { Map customMetadata = indexMetadata.getCustomData(REMOTE_STORE_CUSTOM_KEY); - return Objects.nonNull(customMetadata) && Objects.nonNull(customMetadata.get(PathType.NAME)); + return Objects.nonNull(customMetadata) + && (Objects.nonNull(customMetadata.get(PathType.NAME)) + || Objects.nonNull(customMetadata.get(IndexMetadata.TRANSLOG_METADATA_KEY))); } public static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, String segmentRepository, String translogRepository) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java new file mode 100644 index 0000000000000..e8a0dda5a699e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.util.function.Supplier; + +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; + +/** + * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. + * + * @opensearch.internal + */ +@ExperimentalApi +public class RemoteStoreCustomMetadataResolver { + + private final RemoteStoreSettings remoteStoreSettings; + private final Supplier minNodeVersionSupplier; + private final Supplier repositoriesServiceSupplier; + private final Settings settings; + + public RemoteStoreCustomMetadataResolver( + RemoteStoreSettings remoteStoreSettings, + Supplier minNodeVersionSupplier, + Supplier repositoriesServiceSupplier, + Settings settings + ) { + this.remoteStoreSettings = remoteStoreSettings; + this.minNodeVersionSupplier = minNodeVersionSupplier; + this.repositoriesServiceSupplier = repositoriesServiceSupplier; + this.settings = settings; + } + + public RemoteStorePathStrategy getPathStrategy() { + PathType pathType; + PathHashAlgorithm pathHashAlgorithm; + // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. + pathType = Version.V_2_14_0.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; + // If the path type is fixed, hash algorithm is not applicable. + pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + } + + public boolean isTranslogMetadataEnabled() { + Repository repository; + try { + repository = repositoriesServiceSupplier.get().repository(getRemoteStoreTranslogRepo(settings)); + } catch (RepositoryMissingException ex) { + throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", ex); + } + BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + return Version.V_2_15_0.compareTo(minNodeVersionSupplier.get()) <= 0 + && remoteStoreSettings.isTranslogMetadataEnabled() + && blobStoreRepository.blobStore().isBlobMetadataEnabled(); + } + +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java deleted file mode 100644 index 178de406ed681..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.Version; -import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.RemoteStoreSettings; - -import java.util.function.Supplier; - -/** - * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. - * - * @opensearch.internal - */ -@ExperimentalApi -public class RemoteStorePathStrategyResolver { - - private final RemoteStoreSettings remoteStoreSettings; - private final Supplier minNodeVersionSupplier; - - public RemoteStorePathStrategyResolver(RemoteStoreSettings remoteStoreSettings, Supplier minNodeVersionSupplier) { - this.remoteStoreSettings = remoteStoreSettings; - this.minNodeVersionSupplier = minNodeVersionSupplier; - } - - public RemoteStorePathStrategy get() { - PathType pathType; - PathHashAlgorithm pathHashAlgorithm; - // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. - pathType = Version.V_2_14_0.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; - // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); - return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 27b1b88034573..9a9de6c819424 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -32,6 +32,7 @@ import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA; /** * Utils for remote store @@ -181,25 +182,50 @@ public static RemoteStorePathStrategy determineRemoteStorePathStrategy(IndexMeta return new RemoteStorePathStrategy(RemoteStoreEnums.PathType.FIXED); } + /** + * Determines if translog file object metadata can be used to store checkpoint file data. + */ + public static boolean determineTranslogMetadataEnabled(IndexMetadata indexMetadata) { + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + assert remoteCustomData == null || remoteCustomData.containsKey(IndexMetadata.TRANSLOG_METADATA_KEY); + if (remoteCustomData != null && remoteCustomData.containsKey(IndexMetadata.TRANSLOG_METADATA_KEY)) { + return Boolean.parseBoolean(remoteCustomData.get(IndexMetadata.TRANSLOG_METADATA_KEY)); + } + return false; + } + /** * Generates the remote store path type information to be added to custom data of index metadata during migration * * @param clusterSettings Current Cluster settings from {@link ClusterState} - * @param discoveryNodes Current {@link DiscoveryNodes} from the cluster state + * @param discoveryNodes Current {@link DiscoveryNodes} from the cluster state * @return {@link Map} to be added as custom data in index metadata */ - public static Map determineRemoteStorePathStrategyDuringMigration( + public static Map determineRemoteStoreCustomMetadataDuringMigration( Settings clusterSettings, DiscoveryNodes discoveryNodes ) { + Map remoteCustomData = new HashMap<>(); Version minNodeVersion = discoveryNodes.getMinNodeVersion(); + + // TODO: During the document replication to a remote store migration, there should be a check to determine if the registered + // translog blobstore supports custom metadata or not. + // Currently, the blobStoreMetadataEnabled flag is set to false because the integration tests run on the local file system, which + // does not support custom metadata. + // https://github.com/opensearch-project/OpenSearch/issues/13745 + boolean blobStoreMetadataEnabled = false; + boolean translogMetadata = Version.CURRENT.compareTo(minNodeVersion) <= 0 + && CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.get(clusterSettings) + && blobStoreMetadataEnabled; + + remoteCustomData.put(IndexMetadata.TRANSLOG_METADATA_KEY, Boolean.toString(translogMetadata)); + RemoteStoreEnums.PathType pathType = Version.CURRENT.compareTo(minNodeVersion) <= 0 ? CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.get(clusterSettings) : RemoteStoreEnums.PathType.FIXED; RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm = pathType == RemoteStoreEnums.PathType.FIXED ? null : CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.get(clusterSettings); - Map remoteCustomData = new HashMap<>(); remoteCustomData.put(RemoteStoreEnums.PathType.NAME, pathType.name()); if (Objects.nonNull(pathHashAlgorithm)) { remoteCustomData.put(RemoteStoreEnums.PathHashAlgorithm.NAME, pathHashAlgorithm.name()); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 18d4a2ca6d639..3517579856d43 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3970,7 +3970,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro new RemoteStoreRefreshListener( this, this.checkpointPublisher, - remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()) + remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()), + remoteStoreSettings ) ); } @@ -4976,7 +4977,14 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings); + RemoteFsTranslog.cleanup( + repository, + shardId, + getThreadPool(), + indexSettings.getRemoteStorePathStrategy(), + remoteStoreSettings, + indexSettings().isTranslogMetadataEnabled() + ); } /* @@ -5001,7 +5009,8 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings, logger, - shouldSeedRemoteStore() + shouldSeedRemoteStore(), + indexSettings().isTranslogMetadataEnabled() ); } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index bfb841307af49..20afd7b2f3568 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -33,6 +33,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.threadpool.ThreadPool; @@ -45,6 +46,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @@ -89,11 +91,13 @@ public final class RemoteStoreRefreshListener extends ReleasableRetryableRefresh private volatile long primaryTerm; private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; + private final RemoteStoreSettings remoteStoreSettings; public RemoteStoreRefreshListener( IndexShard indexShard, SegmentReplicationCheckpointPublisher checkpointPublisher, - RemoteSegmentTransferTracker segmentTracker + RemoteSegmentTransferTracker segmentTracker, + RemoteStoreSettings remoteStoreSettings ) { super(indexShard.getThreadPool()); logger = Loggers.getLogger(getClass(), indexShard.shardId()); @@ -116,6 +120,7 @@ public RemoteStoreRefreshListener( this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; + this.remoteStoreSettings = remoteStoreSettings; } @Override @@ -286,7 +291,12 @@ public void onFailure(Exception e) { // Start the segments files upload uploadNewSegments(localSegmentsPostRefresh, localSegmentsSizeMap, segmentUploadsCompletedListener); - latch.await(); + if (latch.await( + remoteStoreSettings.getClusterRemoteSegmentTransferTimeout().millis(), + TimeUnit.MILLISECONDS + ) == false) { + throw new SegmentUploadFailedException("Timeout while waiting for remote segment transfer to complete"); + } } catch (EngineException e) { logger.warn("Exception while reading SegmentInfosSnapshot", e); } diff --git a/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java b/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java new file mode 100644 index 0000000000000..bbff399fb71ff --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import java.io.IOException; + +/** + * Exception to be thrown when a segment upload fails. + * + * @opensearch.internal + */ +public class SegmentUploadFailedException extends IOException { + + /** + * Creates a new SegmentUploadFailedException. + * + * @param message error message + */ + public SegmentUploadFailedException(String message) { + super(message); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index c7f756957076c..67549c86b7dd2 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -91,6 +91,7 @@ public class RemoteFsTranslog extends Translog { private static final int SYNC_PERMIT = 1; private final Semaphore syncPermit = new Semaphore(SYNC_PERMIT); private final AtomicBoolean pauseSync = new AtomicBoolean(false); + private final boolean isTranslogMetadataEnabled; public RemoteFsTranslog( TranslogConfig config, @@ -110,6 +111,7 @@ public RemoteFsTranslog( this.startedPrimarySupplier = startedPrimarySupplier; this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); + isTranslogMetadataEnabled = indexSettings().isTranslogMetadataEnabled(); this.translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, @@ -117,7 +119,8 @@ public RemoteFsTranslog( fileTransferTracker, remoteTranslogTransferTracker, indexSettings().getRemoteStorePathStrategy(), - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); try { download(translogTransferManager, location, logger, config.shouldSeedRemote()); @@ -169,7 +172,8 @@ public static void download( RemoteStorePathStrategy pathStrategy, RemoteStoreSettings remoteStoreSettings, Logger logger, - boolean seedRemote + boolean seedRemote, + boolean isTranslogMetadataEnabled ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( Locale.ROOT, @@ -188,7 +192,8 @@ public static void download( fileTransferTracker, remoteTranslogTransferTracker, pathStrategy, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); RemoteFsTranslog.download(translogTransferManager, location, logger, seedRemote); logger.trace(remoteTranslogTransferTracker.toString()); @@ -293,7 +298,8 @@ public static TranslogTransferManager buildTranslogTransferManager( FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker tracker, RemoteStorePathStrategy pathStrategy, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) { assert Objects.nonNull(pathStrategy); String indexUUID = shardId.getIndex().getUUID(); @@ -315,7 +321,16 @@ public static TranslogTransferManager buildTranslogTransferManager( .build(); BlobPath mdPath = pathStrategy.generatePath(mdPathInput); BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); - return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker, remoteStoreSettings); + return new TranslogTransferManager( + shardId, + transferService, + dataPath, + mdPath, + fileTransferTracker, + tracker, + remoteStoreSettings, + isTranslogMetadataEnabled + ); } @Override @@ -592,7 +607,8 @@ public static void cleanup( ShardId shardId, ThreadPool threadPool, RemoteStorePathStrategy pathStrategy, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; @@ -607,7 +623,8 @@ public static void cleanup( fileTransferTracker, remoteTranslogTransferTracker, pathStrategy, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index bec2d78d9af62..704f6419da60a 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -18,7 +18,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; @@ -28,16 +28,20 @@ import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.threadpool.ThreadPool; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.channels.FileChannel; import java.nio.file.StandardOpenOption; +import java.util.Base64; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; /** * Service that handles remote transfer of translog and checkpoint files @@ -104,6 +108,30 @@ public void uploadBlobs( } + // Builds a metadata map containing the Base64-encoded checkpoint file data associated with a translog file. + static Map buildTransferFileMetadata(InputStream metadataInputStream) throws IOException { + Map metadata = new HashMap<>(); + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + byte[] buffer = new byte[128]; + int bytesRead; + int totalBytesRead = 0; + + while ((bytesRead = metadataInputStream.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + totalBytesRead += bytesRead; + if (totalBytesRead > 1024) { + // We enforce a limit of 1KB on the size of the checkpoint file. + throw new IOException("Input stream exceeds 1KB limit"); + } + } + + byte[] bytes = byteArrayOutputStream.toByteArray(); + String metadataString = Base64.getEncoder().encodeToString(bytes); + metadata.put(CHECKPOINT_FILE_DATA_KEY, metadataString); + } + return metadata; + } + private void uploadBlob( TransferFileSnapshot fileSnapshot, ActionListener listener, @@ -113,6 +141,11 @@ private void uploadBlob( try { ChannelFactory channelFactory = FileChannel::open; + Map metadata = null; + if (fileSnapshot.getMetadataFileInputStream() != null) { + metadata = buildTransferFileMetadata(fileSnapshot.getMetadataFileInputStream()); + } + long contentLength; try (FileChannel channel = channelFactory.open(fileSnapshot.getPath(), StandardOpenOption.READ)) { contentLength = channel.size(); @@ -130,7 +163,8 @@ private void uploadBlob( writePriority, (size, position) -> new OffsetRangeFileInputStream(fileSnapshot.getPath(), size, position), Objects.requireNonNull(fileSnapshot.getChecksum()), - remoteIntegrityEnabled + remoteIntegrityEnabled, + metadata ); ActionListener completionListener = ActionListener.wrap(resp -> listener.onResponse(fileSnapshot), ex -> { logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), ex); @@ -168,7 +202,8 @@ public InputStream downloadBlob(Iterable path, String fileName) throws I @Override @ExperimentalApi - public FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + public InputStreamWithMetadata downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + assert blobStore.isBlobMetadataEnabled(); return blobStore.blobContainer((BlobPath) path).readBlobWithMetadata(fileName); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java index dcec94edd694f..86f042af0584b 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java @@ -108,6 +108,8 @@ public static class TransferFileSnapshot extends FileSnapshot { private final long primaryTerm; private Long checksum; + @Nullable + private InputStream metadataFileInputStream; public TransferFileSnapshot(Path path, long primaryTerm, Long checksum) throws IOException { super(path); @@ -128,6 +130,14 @@ public long getPrimaryTerm() { return primaryTerm; } + public void setMetadataFileInputStream(InputStream inputStream) { + this.metadataFileInputStream = inputStream; + } + + public InputStream getMetadataFileInputStream() { + return metadataFileInputStream; + } + @Override public int hashCode() { return Objects.hash(primaryTerm, super.hashCode()); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index 0894ebf500ebd..0ff983739438b 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -11,7 +11,7 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.core.action.ActionListener; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -131,11 +131,11 @@ void uploadBlobs( * * @param path the remote path from where download should be made * @param fileName the name of the file - * @return {@link FetchBlobResult} of the remote file + * @return {@link InputStreamWithMetadata} of the remote file * @throws IOException the exception while reading the data */ @ExperimentalApi - FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; + InputStreamWithMetadata downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; void listAllInSortedOrder(Iterable path, String filenamePrefix, int limit, ActionListener> listener); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java index ef34fd31a296b..6dcdc8f8cf44a 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java @@ -12,6 +12,7 @@ import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import java.io.IOException; import java.util.Set; /** @@ -39,4 +40,10 @@ public interface TransferSnapshot { * @return the translog transfer metadata */ TranslogTransferMetadata getTranslogTransferMetadata(); + + /** + * The snapshot of the translog generational files having checkpoint file inputStream as metadata + * @return the set of translog files having checkpoint file inputStream as metadata. + */ + Set getTranslogFileSnapshotWithMetadata() throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index fb78731246a07..ae007c0c33e1e 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -64,6 +64,16 @@ public Set getTranslogFileSnapshots() { return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v1).collect(Collectors.toSet()); } + @Override + public Set getTranslogFileSnapshotWithMetadata() throws IOException { + for (Tuple tuple : translogCheckpointFileInfoTupleSet) { + TransferFileSnapshot translogFileSnapshot = tuple.v1(); + TransferFileSnapshot checkpointFileSnapshot = tuple.v2(); + translogFileSnapshot.setMetadataFileInputStream(checkpointFileSnapshot.inputStream()); + } + return getTranslogFileSnapshots(); + } + @Override public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata( diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 47638f44fd6fc..1cc39cdf442e2 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -16,6 +16,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -36,6 +37,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -63,6 +65,9 @@ public class TranslogTransferManager { private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private final RemoteStoreSettings remoteStoreSettings; private static final int METADATA_FILES_TO_FETCH = 10; + // Flag to include checkpoint file data as translog file metadata during upload/download + private final boolean isTranslogMetadataEnabled; + final static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; private final Logger logger; @@ -79,7 +84,8 @@ public TranslogTransferManager( BlobPath remoteMetadataTransferPath, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker remoteTranslogTransferTracker, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) { this.shardId = shardId; this.transferService = transferService; @@ -89,6 +95,7 @@ public TranslogTransferManager( this.logger = Loggers.getLogger(getClass(), shardId); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; this.remoteStoreSettings = remoteStoreSettings; + this.isTranslogMetadataEnabled = isTranslogMetadataEnabled; } public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker() { @@ -110,8 +117,12 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans long prevUploadTimeInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis(); try { - toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); - toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); + if (isTranslogMetadataEnabled) { + toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshotWithMetadata())); + } else { + toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); + toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); + } if (toUpload.isEmpty()) { logger.trace("Nothing to upload for transfer"); return true; @@ -236,30 +247,78 @@ public boolean downloadTranslog(String primaryTerm, String generation, Path loca generation, location ); - // Download Checkpoint file from remote to local FS String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); - downloadToFS(ckpFileName, location, primaryTerm); - // Download translog file from remote to local FS String translogFilename = Translog.getFilename(Long.parseLong(generation)); - downloadToFS(translogFilename, location, primaryTerm); + if (isTranslogMetadataEnabled == false) { + // Download Checkpoint file, translog file from remote to local FS + downloadToFS(ckpFileName, location, primaryTerm, false); + downloadToFS(translogFilename, location, primaryTerm, false); + } else { + // Download translog.tlog file with object metadata from remote to local FS + Map metadata = downloadToFS(translogFilename, location, primaryTerm, true); + try { + assert metadata != null && !metadata.isEmpty() && metadata.containsKey(CHECKPOINT_FILE_DATA_KEY); + recoverCkpFileUsingMetadata(metadata, location, generation, translogFilename); + } catch (Exception e) { + throw new IOException("Failed to recover checkpoint file from remote", e); + } + } return true; } - private void downloadToFS(String fileName, Path location, String primaryTerm) throws IOException { + /** + * Process the provided metadata and tries to recover translog.ckp file to the FS. + */ + private void recoverCkpFileUsingMetadata(Map metadata, Path location, String generation, String fileName) + throws IOException { + + String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); + Path filePath = location.resolve(ckpFileName); + // Here, we always override the existing file if present. + deleteFileIfExists(filePath); + + String ckpDataBase64 = metadata.get(CHECKPOINT_FILE_DATA_KEY); + if (ckpDataBase64 == null) { + logger.error("Error processing metadata for translog file: {}", fileName); + throw new IllegalStateException( + "Checkpoint file data key " + CHECKPOINT_FILE_DATA_KEY + " is expected but not found in metadata for file: " + fileName + ); + } + byte[] ckpFileBytes = Base64.getDecoder().decode(ckpDataBase64); + Files.write(filePath, ckpFileBytes); + } + + private Map downloadToFS(String fileName, Path location, String primaryTerm, boolean withMetadata) throws IOException { Path filePath = location.resolve(fileName); // Here, we always override the existing file if present. // We need to change this logic when we introduce incremental download - if (Files.exists(filePath)) { - Files.delete(filePath); - } + deleteFileIfExists(filePath); + Map metadata = null; boolean downloadStatus = false; long bytesToRead = 0, downloadStartTime = System.nanoTime(); - try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { - // Capture number of bytes for stats before reading - bytesToRead = inputStream.available(); - Files.copy(inputStream, filePath); - downloadStatus = true; + try { + if (withMetadata) { + try ( + InputStreamWithMetadata inputStreamWithMetadata = transferService.downloadBlobWithMetadata( + remoteDataTransferPath.add(primaryTerm), + fileName + ) + ) { + InputStream inputStream = inputStreamWithMetadata.getInputStream(); + metadata = inputStreamWithMetadata.getMetadata(); + + bytesToRead = inputStream.available(); + Files.copy(inputStream, filePath); + downloadStatus = true; + } + } else { + try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { + bytesToRead = inputStream.available(); + Files.copy(inputStream, filePath); + downloadStatus = true; + } + } } finally { remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); if (downloadStatus) { @@ -269,6 +328,13 @@ private void downloadToFS(String fileName, Path location, String primaryTerm) th // Mark in FileTransferTracker so that the same files are not uploaded at the time of translog sync fileTransferTracker.add(fileName, true); + return metadata; + } + + private void deleteFileIfExists(Path filePath) throws IOException { + if (Files.exists(filePath)) { + Files.delete(filePath); + } } public TranslogTransferMetadata readMetadata() throws IOException { @@ -391,7 +457,11 @@ public void deleteGenerationAsync(long primaryTerm, Set generations, Runna // Add .ckp and .tlog file to translog file list which is located in basePath/ String ckpFileName = Translog.getCommitCheckpointFileName(generation); String translogFileName = Translog.getFilename(generation); - translogFiles.addAll(List.of(ckpFileName, translogFileName)); + if (isTranslogMetadataEnabled == false) { + translogFiles.addAll(List.of(ckpFileName, translogFileName)); + } else { + translogFiles.add(translogFileName); + } }); // Delete the translog and checkpoint files asynchronously deleteTranslogFilesAsync(primaryTerm, translogFiles, onCompletion); diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 0bd4c7aedfc03..074186f64a75d 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -80,6 +80,18 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * This setting is used to disable uploading translog.ckp file as metadata to translog.tlog. This setting is effective only for + * repositories that supports metadata read and write with metadata and is applicable for only remote store enabled clusters. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_TRANSLOG_METADATA = Setting.boolSetting( + "cluster.remote_store.index.translog.translog_metadata", + true, + Property.NodeScope, + Property.Dynamic + ); + /** * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} @@ -105,12 +117,25 @@ public class RemoteStoreSettings { Property.NodeScope ); + /** + * Controls timeout value while uploading segment files to remote segment store + */ + public static final Setting CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.segment.transfer_timeout", + TimeValue.timeValueMinutes(30), + TimeValue.timeValueMinutes(10), + Property.NodeScope, + Property.Dynamic + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; + private volatile TimeValue clusterRemoteSegmentTransferTimeout; private volatile RemoteStoreEnums.PathType pathType; private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; + private volatile boolean isTranslogMetadataEnabled; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); @@ -134,11 +159,20 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { pathType = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setPathType); + isTranslogMetadataEnabled = clusterSettings.get(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, this::setTranslogMetadataEnabled); + pathHashAlgorithm = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setPathHashAlgorithm); maxRemoteTranslogReaders = CLUSTER_REMOTE_MAX_TRANSLOG_READERS.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_MAX_TRANSLOG_READERS, this::setMaxRemoteTranslogReaders); + + clusterRemoteSegmentTransferTimeout = CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING, + this::setClusterRemoteSegmentTransferTimeout + ); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -161,10 +195,18 @@ public TimeValue getClusterRemoteTranslogTransferTimeout() { return clusterRemoteTranslogTransferTimeout; } + public TimeValue getClusterRemoteSegmentTransferTimeout() { + return clusterRemoteSegmentTransferTimeout; + } + private void setClusterRemoteTranslogTransferTimeout(TimeValue clusterRemoteTranslogTransferTimeout) { this.clusterRemoteTranslogTransferTimeout = clusterRemoteTranslogTransferTimeout; } + private void setClusterRemoteSegmentTransferTimeout(TimeValue clusterRemoteSegmentTransferTimeout) { + this.clusterRemoteSegmentTransferTimeout = clusterRemoteSegmentTransferTimeout; + } + @ExperimentalApi public RemoteStoreEnums.PathType getPathType() { return pathType; @@ -179,6 +221,14 @@ private void setPathType(RemoteStoreEnums.PathType pathType) { this.pathType = pathType; } + private void setTranslogMetadataEnabled(boolean isTranslogMetadataEnabled) { + this.isTranslogMetadataEnabled = isTranslogMetadataEnabled; + } + + public boolean isTranslogMetadataEnabled() { + return isTranslogMetadataEnabled; + } + private void setPathHashAlgorithm(RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm) { this.pathHashAlgorithm = pathHashAlgorithm; } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 614f39166ea66..9462aeddbd0e4 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -59,6 +59,7 @@ import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -207,6 +208,7 @@ import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SystemIndexPlugin; +import org.opensearch.plugins.TelemetryAwarePlugin; import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; import org.opensearch.ratelimitting.admissioncontrol.transport.AdmissionControlTransportInterceptor; @@ -274,6 +276,7 @@ import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -603,23 +606,24 @@ protected Node( getCustomNameResolvers(pluginsService.filterPlugins(DiscoveryPlugin.class)) ); - List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class); - final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); - clusterService.addStateApplier(scriptService); - resourcesToClose.add(clusterService); - final Set> consistentSettings = settingsModule.getConsistentSettings(); - if (consistentSettings.isEmpty() == false) { - clusterService.addLocalNodeMasterListener( - new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() - ); - } - TracerFactory tracerFactory; MetricsRegistryFactory metricsRegistryFactory; if (FeatureFlags.isEnabled(TELEMETRY)) { - final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); + final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, settingsModule.getClusterSettings()); if (telemetrySettings.isTracingFeatureEnabled() || telemetrySettings.isMetricsFeatureEnabled()) { List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + List telemetryPluginsImplementingTelemetryAware = telemetryPlugins.stream() + .filter(a -> TelemetryAwarePlugin.class.isAssignableFrom(a.getClass())) + .collect(toList()); + if (telemetryPluginsImplementingTelemetryAware.isEmpty() == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "Telemetry plugins %s should not implement TelemetryAwarePlugin interface", + telemetryPluginsImplementingTelemetryAware + ) + ); + } TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); if (telemetrySettings.isTracingFeatureEnabled()) { tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); @@ -645,6 +649,24 @@ protected Node( resourcesToClose.add(tracer::close); resourcesToClose.add(metricsRegistry::close); + final ClusterManagerMetrics clusterManagerMetrics = new ClusterManagerMetrics(metricsRegistry); + + List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class); + final ClusterService clusterService = new ClusterService( + settings, + settingsModule.getClusterSettings(), + threadPool, + clusterManagerMetrics + ); + clusterService.addStateApplier(scriptService); + resourcesToClose.add(clusterService); + final Set> consistentSettings = settingsModule.getConsistentSettings(); + if (consistentSettings.isEmpty() == false) { + clusterService.addLocalNodeMasterListener( + new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() + ); + } + final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client); final UsageService usageService = new UsageService(); @@ -672,7 +694,8 @@ protected Node( clusterPlugins, clusterInfoService, snapshotsInfoService, - threadPool.getThreadContext() + threadPool.getThreadContext(), + clusterManagerMetrics ); modules.add(clusterModule); IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)); @@ -875,7 +898,8 @@ protected Node( systemIndices, forbidPrivateIndexSettings, awarenessReplicaBalance, - remoteStoreSettings + remoteStoreSettings, + repositoriesServiceReference::get ); pluginsService.filterPlugins(Plugin.class) .forEach( @@ -909,6 +933,30 @@ protected Node( ) .collect(Collectors.toList()); + Collection telemetryAwarePluginComponents = pluginsService.filterPlugins(TelemetryAwarePlugin.class) + .stream() + .flatMap( + p -> p.createComponents( + client, + clusterService, + threadPool, + resourceWatcherService, + scriptService, + xContentRegistry, + environment, + nodeEnvironment, + namedWriteableRegistry, + clusterModule.getIndexNameExpressionResolver(), + repositoriesServiceReference::get, + tracer, + metricsRegistry + ).stream() + ) + .collect(Collectors.toList()); + + // Add the telemetryAwarePlugin components to the existing pluginComponents collection. + pluginComponents.addAll(telemetryAwarePluginComponents); + // register all standard SearchRequestOperationsCompositeListenerFactory to the SearchRequestOperationsCompositeListenerFactory final SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory = new SearchRequestOperationsCompositeListenerFactory( diff --git a/server/src/main/java/org/opensearch/plugins/TelemetryAwarePlugin.java b/server/src/main/java/org/opensearch/plugins/TelemetryAwarePlugin.java new file mode 100644 index 0000000000000..42cab326f88bf --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/TelemetryAwarePlugin.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.script.ScriptService; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.Collection; +import java.util.Collections; +import java.util.function.Supplier; + +/** + * Plugin that provides the telemetry registries to build component with telemetry and also provide a way to + * pass telemetry registries to the implementing plugins for adding instrumentation in the code. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TelemetryAwarePlugin { + + /** + * Returns components added by this plugin. + *

+ * Any components returned that implement {@link LifecycleComponent} will have their lifecycle managed. + * Note: To aid in the migration away from guice, all objects returned as components will be bound in guice + * to themselves. + * + * @param client A client to make requests to the system + * @param clusterService A service to allow watching and updating cluster state + * @param threadPool A service to allow retrieving an executor to run an async action + * @param resourceWatcherService A service to watch for changes to node local files + * @param scriptService A service to allow running scripts on the local node + * @param xContentRegistry the registry for extensible xContent parsing + * @param environment the environment for path and setting configurations + * @param nodeEnvironment the node environment used coordinate access to the data paths + * @param namedWriteableRegistry the registry for {@link NamedWriteable} object parsing + * @param indexNameExpressionResolver A service that resolves expression to index and alias names + * @param repositoriesServiceSupplier A supplier for the service that manages snapshot repositories; will return null when this method + * is called, but will return the repositories service once the node is initialized. + * @param tracer the tracer to add tracing instrumentation. + * @param metricsRegistry the registry for metrics instrumentation. + */ + default Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier, + Tracer tracer, + MetricsRegistry metricsRegistry + ) { + return Collections.emptyList(); + } +} diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 5883a8a37be71..93aac68eb898c 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -472,7 +472,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - createIndexService.addRemoteStorePathStrategyInCustomData(indexMdBuilder, false); + createIndexService.addRemoteStoreCustomMetadata(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 50ffd7322544a..3d6a54055d3d5 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -740,7 +740,8 @@ public void testRolloverClusterState() throws Exception { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -879,7 +880,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -1058,7 +1060,8 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { new SystemIndices(emptyMap()), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index cf7080ab2fc06..ff9e41ee7c784 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -55,6 +55,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -153,7 +154,11 @@ private void indicesThatCannotBeCreatedTestCase( null, new IndexingPressureService( Settings.EMPTY, - new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) + ClusterServiceUtils.createClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null + ) ), null, new SystemIndices(emptyMap()), diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index da9156ccdb71a..a94a5d60b3f5a 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -71,6 +71,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -170,7 +171,11 @@ class TestTransportBulkAction extends TransportBulkAction { ), new IndexingPressureService( SETTINGS, - new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) + ClusterServiceUtils.createClusterService( + SETTINGS, + new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null + ) ), null, new SystemIndices(emptyMap()), diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java index f009988ffae17..91a2552ac3f04 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java @@ -45,6 +45,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -90,7 +91,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { LoggerContext context = (LoggerContext) LogManager.getContext(false); SearchPhaseContext searchPhaseContext1 = new MockSearchPhaseContext(1); - ClusterService clusterService1 = new ClusterService( + ClusterService clusterService1 = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null @@ -99,7 +100,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { int numberOfLoggersBefore = context.getLoggers().size(); SearchPhaseContext searchPhaseContext2 = new MockSearchPhaseContext(1); - ClusterService clusterService2 = new ClusterService( + ClusterService clusterService2 = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null @@ -124,7 +125,7 @@ public void testOnRequestEnd() throws InterruptedException { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "0ms"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService, logger); final List searchListenersList = new ArrayList<>(List.of(searchRequestSlowLog)); @@ -157,7 +158,7 @@ public void testConcurrentOnRequestEnd() throws InterruptedException { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "-1"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService, logger); final List searchListenersList = new ArrayList<>(List.of(searchRequestSlowLog)); @@ -321,7 +322,7 @@ public void testLevelSettingWarn() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(level, searchRequestSlowLog.getLevel()); } @@ -332,7 +333,7 @@ public void testLevelSettingDebug() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(level, searchRequestSlowLog.getLevel().toString()); } @@ -343,7 +344,7 @@ public void testLevelSettingFail() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); try { new SearchRequestSlowLog(clusterService); @@ -363,7 +364,7 @@ public void testSetThresholds() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "100ms"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(TimeValue.timeValueMillis(400).nanos(), searchRequestSlowLog.getWarnThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), searchRequestSlowLog.getInfoThreshold()); @@ -380,7 +381,7 @@ public void testSetThresholdsUnits() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "100nanos"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(TimeValue.timeValueSeconds(400).nanos(), searchRequestSlowLog.getWarnThreshold()); assertEquals(TimeValue.timeValueMillis(300).nanos(), searchRequestSlowLog.getInfoThreshold()); @@ -395,7 +396,7 @@ public void testSetThresholdsDefaults() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING.getKey(), "200ms"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); assertEquals(TimeValue.timeValueMillis(400).nanos(), searchRequestSlowLog.getWarnThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), searchRequestSlowLog.getInfoThreshold()); @@ -409,7 +410,7 @@ public void testSetThresholdsError() { settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING.getKey(), "NOT A TIME VALUE"); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); try { new SearchRequestSlowLog(clusterService); diff --git a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java index 557e4dc2ca8c5..ae35d37fe77b2 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java @@ -72,6 +72,8 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.plugins.ClusterPlugin; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.test.gateway.TestShardBatchGatewayAllocator; @@ -92,7 +94,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void setUp() throws Exception { super.setUp(); threadContext = new ThreadContext(Settings.EMPTY); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null @@ -167,7 +169,7 @@ public void testRegisterAllocationDeciderDuplicate() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new EnableAllocationDecider(settings, clusterSettings)); } - }), clusterInfoService, null, threadContext) + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)) ); assertEquals(e.getMessage(), "Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice"); } @@ -178,7 +180,7 @@ public void testRegisterAllocationDecider() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new FakeAllocationDecider()); } - }), clusterInfoService, null, threadContext); + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); } @@ -188,7 +190,7 @@ private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, Str public Map> getShardsAllocators(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonMap(name, supplier); } - }), clusterInfoService, null, threadContext); + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); } public void testRegisterShardsAllocator() { @@ -209,7 +211,15 @@ public void testUnknownShardsAllocator() { Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "dne").build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new ClusterModule(settings, clusterService, Collections.emptyList(), clusterInfoService, null, threadContext) + () -> new ClusterModule( + settings, + clusterService, + Collections.emptyList(), + clusterInfoService, + null, + threadContext, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ) ); assertEquals("Unknown ShardsAllocator [dne]", e.getMessage()); } @@ -295,7 +305,8 @@ public void testRejectsReservedExistingShardsAllocatorName() { Collections.singletonList(existingShardsAllocatorPlugin(GatewayAllocator.ALLOCATOR_NAME)), clusterInfoService, null, - threadContext + threadContext, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); expectThrows( IllegalArgumentException.class, @@ -310,7 +321,8 @@ public void testRejectsDuplicateExistingShardsAllocatorName() { Arrays.asList(existingShardsAllocatorPlugin("duplicate"), existingShardsAllocatorPlugin("duplicate")), clusterInfoService, null, - threadContext + threadContext, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 47dbf85c13b1f..537b2d13ec08a 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -54,6 +54,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpClient; @@ -83,7 +84,13 @@ public void testScheduling() { final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final ThreadPool threadPool = deterministicTaskQueue.getThreadPool(); - final ClusterApplierService clusterApplierService = new ClusterApplierService("test", settings, clusterSettings, threadPool) { + final ClusterApplierService clusterApplierService = new ClusterApplierService( + "test", + settings, + clusterSettings, + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { return new MockSinglePrioritizingExecutor("mock-executor", deterministicTaskQueue, threadPool); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index d94f3fb304fe2..10d5dceb74f55 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -33,6 +33,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; @@ -61,6 +62,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; @@ -179,7 +181,8 @@ private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterStat ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); AtomicReference clusterStateRef = new AtomicReference<>(initialState); clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index fad98a6609c3b..0072649e4ca72 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -56,6 +56,7 @@ import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -87,6 +88,8 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; @@ -116,6 +119,7 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -156,6 +160,7 @@ import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.hamcrest.Matchers.containsString; @@ -176,6 +181,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { private CreateIndexClusterStateUpdateRequest request; private QueryShardContext queryShardContext; private ClusterSettings clusterSettings; + private IndicesService indicesServices; + private RepositoriesService repositoriesService; + private Supplier repositoriesServiceSupplier; private static final String segmentRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() @@ -188,6 +196,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { public void setup() throws Exception { super.setUp(); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + indicesServices = mock(IndicesService.class); + repositoriesServiceSupplier = mock(Supplier.class); + repositoriesService = mock(RepositoriesService.class); } @Before @@ -694,7 +705,7 @@ public void testValidateIndexName() throws Exception { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -705,7 +716,8 @@ public void testValidateIndexName() throws Exception { new SystemIndices(Collections.emptyMap()), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); validateIndexName( checkerService, @@ -781,7 +793,7 @@ public void testValidateDotIndex() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -792,7 +804,8 @@ public void testValidateDotIndex() { new SystemIndices(Collections.singletonMap("foo", systemIndexDescriptors)), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); // Check deprecations assertFalse(checkerService.validateDotIndex(".test2", false)); @@ -1207,7 +1220,7 @@ public void testvalidateIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( settings, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1218,7 +1231,8 @@ public void testvalidateIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); @@ -1327,7 +1341,7 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { final MetadataCreateIndexService checkerService = new MetadataCreateIndexService( forceClusterSettingEnabled, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1338,7 +1352,8 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); // Use DOCUMENT replication type setting for index creation final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); @@ -1453,7 +1468,7 @@ public void testRemoteStoreDisabledByUserIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1464,7 +1479,8 @@ public void testRemoteStoreDisabledByUserIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1488,7 +1504,7 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1499,7 +1515,8 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1528,7 +1545,7 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1539,7 +1556,8 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1746,10 +1764,16 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); ThreadPool threadPool = new TestThreadPool(getTestName()); + BlobStoreRepository repositoryMock = mock(BlobStoreRepository.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + when(repositoriesService.repository(getRemoteStoreTranslogRepo(settings))).thenReturn(repositoryMock); + BlobStore blobStoreMock = mock(BlobStore.class); + when(repositoryMock.blobStore()).thenReturn(blobStoreMock); + when(blobStoreMock.isBlobMetadataEnabled()).thenReturn(randomBoolean()); MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( settings, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1760,7 +1784,8 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), - remoteStoreSettings + remoteStoreSettings, + repositoriesServiceSupplier ); CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = Settings.builder() @@ -1873,7 +1898,7 @@ public void testIndexLifecycleNameSetting() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1884,7 +1909,8 @@ public void testIndexLifecycleNameSetting() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true, Optional.empty()); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 0b99ffac67ee8..6643d6e13289b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -57,8 +57,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexTemplateMissingException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; @@ -2039,10 +2041,13 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr when(clusterService.state()).thenReturn(clusterState); when(clusterService.getSettings()).thenReturn(settings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + IndicesService indicesServices = mock(IndicesService.class); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + // when(indicesServices.getRepositoriesServiceSupplier()).thenReturn(() -> repositoriesService); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false), @@ -2053,7 +2058,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexTemplateService service = new MetadataIndexTemplateService( clusterService, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java index 64d9c243304d8..cce75105dd33f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java @@ -33,6 +33,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterInfo; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.EmptyClusterInfoService; @@ -56,6 +57,9 @@ import org.opensearch.common.settings.Settings; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.snapshots.EmptySnapshotsInfoService; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -77,6 +81,12 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.mockito.ArgumentMatchers.anyDouble; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class AllocationServiceTests extends OpenSearchTestCase { @@ -137,6 +147,16 @@ public void testAssignsPrimariesInPriorityOrderThenReplicas() { .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), Integer.MAX_VALUE) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + final MetricsRegistry metricsRegistry = mock(MetricsRegistry.class); + final Histogram rerouteHistogram = mock(Histogram.class); + final Histogram mockedHistogram = mock(Histogram.class); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("reroute.latency")) { + return rerouteHistogram; + } + return mockedHistogram; + }); final AllocationService allocationService = new AllocationService( new AllocationDeciders( Arrays.asList( @@ -158,7 +178,8 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } }, new EmptyClusterInfoService(), - EmptySnapshotsInfoService.INSTANCE + EmptySnapshotsInfoService.INSTANCE, + new ClusterManagerMetrics(metricsRegistry) ); final String unrealisticAllocatorName = "unrealistic"; @@ -258,10 +279,18 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing assertThat(routingTable3.index("mediumPriority").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4)); assertTrue(routingTable3.index("lowPriority").allPrimaryShardsActive()); assertThat(routingTable3.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty()); + + verify(rerouteHistogram, times(3)).record(anyDouble()); } public void testExplainsNonAllocationOfShardWithUnknownAllocator() { - final AllocationService allocationService = new AllocationService(null, null, null, null); + final AllocationService allocationService = new AllocationService( + null, + null, + null, + null, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ); allocationService.setExistingShardsAllocators( Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, new TestGatewayAllocator()) ); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index c5ed505e6bbf2..3cbdfb80067d7 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; @@ -51,6 +52,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -74,15 +77,30 @@ import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyDouble; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; public class ClusterApplierServiceTests extends OpenSearchTestCase { private static ThreadPool threadPool; private TimedClusterApplierService clusterApplierService; + private static MetricsRegistry metricsRegistry; + private static Histogram applierslatenctHistogram; + private static Histogram listenerslatenctHistogram; @BeforeClass public static void createThreadPool() { threadPool = new TestThreadPool(ClusterApplierServiceTests.class.getName()); + metricsRegistry = mock(MetricsRegistry.class); + applierslatenctHistogram = mock(Histogram.class); + listenerslatenctHistogram = mock(Histogram.class); } @AfterClass @@ -96,6 +114,13 @@ public static void stopThreadPool() { @Before public void setUp() throws Exception { super.setUp(); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("appliers.latency")) { + return applierslatenctHistogram; + } + return listenerslatenctHistogram; + }); clusterApplierService = createTimedClusterService(true); } @@ -110,7 +135,8 @@ private TimedClusterApplierService createTimedClusterService(boolean makeCluster TimedClusterApplierService timedClusterApplierService = new TimedClusterApplierService( Settings.builder().put("cluster.name", "ClusterApplierServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(metricsRegistry) ); timedClusterApplierService.setNodeConnectionsService(createNoOpNodeConnectionsService()); timedClusterApplierService.setInitialState( @@ -194,6 +220,8 @@ public void onFailure(String source, Exception e) { }); assertBusy(mockAppender::assertAllExpectationsMatched); } + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } @TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") @@ -291,6 +319,8 @@ public void onFailure(String source, Exception e) { latch.await(); mockAppender.assertAllExpectationsMatched(); } + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testLocalNodeClusterManagerListenerCallbacks() { @@ -329,6 +359,10 @@ public void offClusterManager() { setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(true)); + verify(listenerslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(listenerslatenctHistogram); + verifyNoInteractions(applierslatenctHistogram); + timedClusterApplierService.close(); } @@ -366,6 +400,10 @@ public void offMaster() { setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(false)); + verify(listenerslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(listenerslatenctHistogram); + verifyNoInteractions(applierslatenctHistogram); + timedClusterApplierService.close(); } @@ -405,6 +443,10 @@ public void onFailure(String source, Exception e) { latch.await(); assertNull(error.get()); assertTrue(applierCalled.get()); + + verify(applierslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierBubblesUpExceptionsInApplier() throws InterruptedException { @@ -435,6 +477,9 @@ public void onFailure(String source, Exception e) { latch.await(); assertNotNull(error.get()); assertThat(error.get().getMessage(), containsString("dummy exception")); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierBubblesUpExceptionsInSettingsApplier() throws InterruptedException { @@ -478,6 +523,9 @@ public void onFailure(String source, Exception e) { latch.await(); assertNotNull(error.get()); assertThat(error.get().getMessage(), containsString("illegal value can't update")); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierSwallowsExceptionInListener() throws InterruptedException { @@ -509,6 +557,9 @@ public void onFailure(String source, Exception e) { latch.await(); assertNull(error.get()); assertTrue(applierCalled.get()); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testClusterStateApplierCanCreateAnObserver() throws InterruptedException { @@ -565,6 +616,10 @@ public void onFailure(String source, Exception e) { latch.await(); assertNull(error.get()); assertTrue(applierCalled.get()); + + verify(applierslatenctHistogram, atLeastOnce()).record(anyDouble(), any()); + clearInvocations(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } public void testThreadContext() throws InterruptedException { @@ -609,6 +664,9 @@ public void onFailure(String source, Exception e) { } latch.await(); + + verifyNoInteractions(applierslatenctHistogram); + verifyNoInteractions(listenerslatenctHistogram); } static class TimedClusterApplierService extends ClusterApplierService { @@ -617,8 +675,13 @@ static class TimedClusterApplierService extends ClusterApplierService { volatile Long currentTimeOverride = null; boolean applicationMayFail; - TimedClusterApplierService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - super("test_node", settings, clusterSettings, threadPool); + TimedClusterApplierService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { + super("test_node", settings, clusterSettings, threadPool, clusterManagerMetrics); this.clusterSettings = clusterSettings; } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java index 4d88683826af7..bd12b09d2b983 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java @@ -10,6 +10,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.junit.After; @@ -26,11 +27,11 @@ public void terminateThreadPool() { public void testDeprecatedGetMasterServiceBWC() { try ( - ClusterService clusterService = new ClusterService( + ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool - ) + ); ) { MasterService masterService = clusterService.getMasterService(); ClusterManagerService clusterManagerService = clusterService.getClusterManagerService(); diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 85f6c129944fa..0ff8d9dc4e7a5 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -40,6 +40,7 @@ import org.opensearch.Version; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -61,6 +62,9 @@ import org.opensearch.common.util.concurrent.BaseFuture; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -95,6 +99,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyDouble; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class MasterServiceTests extends OpenSearchTestCase { @@ -125,6 +136,10 @@ public void randomizeCurrentTime() { } private ClusterManagerService createClusterManagerService(boolean makeClusterManager) { + return createClusterManagerService(makeClusterManager, NoopMetricsRegistry.INSTANCE); + } + + private ClusterManagerService createClusterManagerService(boolean makeClusterManager, MetricsRegistry metricsRegistry) { final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() @@ -132,7 +147,8 @@ private ClusterManagerService createClusterManagerService(boolean makeClusterMan .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(metricsRegistry) ); final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes( @@ -154,7 +170,18 @@ private ClusterManagerService createClusterManagerService(boolean makeClusterMan } public void testClusterManagerAwareExecution() throws Exception { - final ClusterManagerService nonClusterManager = createClusterManagerService(false); + final MetricsRegistry metricsRegistry = mock(MetricsRegistry.class); + final Histogram clusterStateComputeHistogram = mock(Histogram.class); + final Histogram clusterStatePublishHistogram = mock(Histogram.class); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("cluster.state.new.compute.latency")) { + return clusterStateComputeHistogram; + } + return clusterStatePublishHistogram; + }); + + final ClusterManagerService nonClusterManager = createClusterManagerService(false, metricsRegistry); final boolean[] taskFailed = { false }; final CountDownLatch latch1 = new CountDownLatch(1); @@ -194,6 +221,8 @@ public void onFailure(String source, Exception e) { assertFalse("non-cluster-manager cluster state update task was not executed", taskFailed[0]); nonClusterManager.close(); + + verify(clusterStateComputeHistogram, times(1)).record(anyDouble(), any()); } public void testThreadContext() throws InterruptedException { @@ -1070,7 +1099,8 @@ public void testLongClusterStateUpdateLogging() throws Exception { .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ) ) { @@ -1246,6 +1276,18 @@ public void testAcking() throws InterruptedException { final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + + final MetricsRegistry metricsRegistry = mock(MetricsRegistry.class); + final Histogram clusterStateComputeHistogram = mock(Histogram.class); + final Histogram clusterStatePublishHistogram = mock(Histogram.class); + when(metricsRegistry.createHistogram(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String histogramName = (String) invocationOnMock.getArguments()[0]; + if (histogramName.contains("cluster.state.new.compute.latency")) { + return clusterStateComputeHistogram; + } + return clusterStatePublishHistogram; + }); + try ( ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() @@ -1253,7 +1295,8 @@ public void testAcking() throws InterruptedException { .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(metricsRegistry) ) ) { @@ -1372,6 +1415,9 @@ public void onAckTimeout() { latch.await(); } } + + verify(clusterStateComputeHistogram, times(2)).record(anyDouble(), any()); + verify(clusterStatePublishHistogram, times(1)).record(anyDouble()); } public void testDeprecatedMasterServiceUpdateTaskThreadName() { diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index 1c43bb565ef69..dd2fb51151a5b 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -55,6 +55,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.repositories.IndexId; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; @@ -123,7 +124,7 @@ public String getValue(final String value) { }) ); - final ClusterService clusterService = new ClusterService(Settings.EMPTY, clusterSettings, null); + final ClusterService clusterService = ClusterServiceUtils.createClusterService(Settings.EMPTY, clusterSettings, null); final Metadata.Builder builder = Metadata.builder(); final Settings settings = Settings.builder().put("foo.old", randomAlphaOfLength(8)).build(); applySettingsToBuilder.accept(builder, settings); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java index bb59a5792ec8c..aa31c710c1fbd 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java @@ -18,14 +18,21 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.SnapshotShardSizeInfo; import org.opensearch.test.gateway.TestShardBatchGatewayAllocator; @@ -222,6 +229,21 @@ public void testSafelyRemoveShardFromBothBatch() { assertEquals(0, testShardsBatchGatewayAllocator.getBatchIdToStoreShardBatch().size()); } + public void testDeDuplicationOfReplicaShardsAcrossBatch() { + final ShardId shardId = new ShardId("test", "_na_", 0); + final DiscoveryNode node = newNode("node1"); + // number of replicas is greater than batch size - to ensure shardRouting gets de-duped across batch + createRoutingWithDifferentUnAssignedInfo(shardId, node, 50); + testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(10); + + // only replica shard should be in the batch + Set replicaBatches = testShardsBatchGatewayAllocator.createAndUpdateBatches(testAllocation, false); + assertEquals(1, replicaBatches.size()); + ShardsBatchGatewayAllocator.ShardsBatch shardsBatch = testShardsBatchGatewayAllocator.getBatchIdToStoreShardBatch() + .get(replicaBatches.iterator().next()); + assertEquals(1, shardsBatch.getBatchedShards().size()); + } + public void testGetBatchIdExisting() { createIndexAndUpdateClusterState(2, 1020, 1); // get all shardsRoutings for test index @@ -345,6 +367,59 @@ private void createIndexAndUpdateClusterState(int count, int numberOfShards, int ); } + private void createRoutingWithDifferentUnAssignedInfo(ShardId primaryShardId, DiscoveryNode node, int numberOfReplicas) { + + ShardRouting primaryShard = TestShardRouting.newShardRouting(primaryShardId, node.getId(), true, ShardRoutingState.STARTED); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(primaryShardId.getIndexName()) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(numberOfReplicas) + .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())) + ) + .build(); + + IndexRoutingTable.Builder isd = IndexRoutingTable.builder(primaryShardId.getIndex()) + .addIndexShard(new IndexShardRoutingTable.Builder(primaryShardId).addShard(primaryShard).build()); + + for (int i = 0; i < numberOfReplicas; i++) { + isd.addShard( + ShardRouting.newUnassigned( + primaryShardId, + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo( + UnassignedInfo.Reason.REPLICA_ADDED, + "message for replica-copy " + i, + null, + 0, + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + Collections.emptySet() + ) + ) + ); + } + + RoutingTable routingTable = RoutingTable.builder().add(isd).build(); + clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(); + testAllocation = new RoutingAllocation( + new AllocationDeciders(Collections.emptyList()), + new RoutingNodes(clusterState, false), + clusterState, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + + } + // call this after index creation and update cluster state private Tuple, Set> createBatchesAndAssert(int expectedBatchSize) { Set primaryBatches = testShardsBatchGatewayAllocator.createAndUpdateBatches(testAllocation, true); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index c448c4b07e03b..59fb7df5428e2 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -53,6 +53,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.snapshots.EmptySnapshotsInfoService; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; import org.hamcrest.Matchers; @@ -68,7 +69,7 @@ public class GatewayServiceTests extends OpenSearchTestCase { private GatewayService createService(final Settings.Builder settings) { - final ClusterService clusterService = new ClusterService( + final ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.builder().put("cluster.name", "GatewayServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null diff --git a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java index 0b657c1c9745f..8db7c58bf9503 100644 --- a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java @@ -24,6 +24,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.stats.IndexingPressureStats; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -44,7 +45,7 @@ public class IndexingPressureServiceTests extends OpenSearchTestCase { @Before public void beforeTest() { clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - clusterService = new ClusterService(settings, clusterSettings, null); + clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); } public void testCoordinatingOperationForShardIndexingPressure() { diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java index ce719a18898f8..c5ad1370ac75a 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java @@ -19,6 +19,7 @@ import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matcher; import org.hamcrest.MatcherAssert; @@ -42,7 +43,7 @@ public class ShardIndexingPressureConcurrentExecutionTests extends OpenSearchTes .build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - private final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + private final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); public enum OperationType { COORDINATING, diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java index 023063c7d6e03..31ecad7c8d701 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureMemoryManagerTests.java @@ -8,11 +8,11 @@ package org.opensearch.index; -import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.TimeUnit; @@ -27,7 +27,7 @@ public class ShardIndexingPressureMemoryManagerTests extends OpenSearchTestCase .build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private final ShardIndexingPressureSettings shardIndexingPressureSettings = new ShardIndexingPressureSettings( - new ClusterService(settings, clusterSettings, null), + ClusterServiceUtils.createClusterService(settings, clusterSettings, null), settings, IndexingPressure.MAX_INDEXING_BYTES.get(settings).getBytes() ); diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java index c555d8f9c489d..5e84a76b2250a 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureSettingsTests.java @@ -11,6 +11,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; public class ShardIndexingPressureSettingsTests extends OpenSearchTestCase { @@ -24,7 +25,7 @@ public class ShardIndexingPressureSettingsTests extends OpenSearchTestCase { .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); public void testFromSettings() { ShardIndexingPressureSettings shardIndexingPressureSettings = new ShardIndexingPressureSettings( diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java index 46f9801035ac3..d97eec4cc001d 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java @@ -8,10 +8,10 @@ package org.opensearch.index; -import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -22,7 +22,7 @@ public class ShardIndexingPressureStoreTests extends OpenSearchTestCase { private final Settings settings = Settings.builder().put(ShardIndexingPressureStore.MAX_COLD_STORE_SIZE.getKey(), 200).build(); private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private final ShardIndexingPressureSettings shardIndexingPressureSettings = new ShardIndexingPressureSettings( - new ClusterService(settings, clusterSettings, null), + ClusterServiceUtils.createClusterService(settings, clusterSettings, null), settings, IndexingPressure.MAX_INDEXING_BYTES.get(settings).getBytes() ); diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java index e7600b1d4c41a..ddc3592511de4 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureTests.java @@ -17,6 +17,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.stats.IndexingPressureStats; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; public class ShardIndexingPressureTests extends OpenSearchTestCase { @@ -30,7 +31,7 @@ public class ShardIndexingPressureTests extends OpenSearchTestCase { .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); public void testMemoryBytesMarkedAndReleased() { ShardIndexingPressure shardIndexingPressure = new ShardIndexingPressure(settings, clusterService); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java index d8220c93e4eeb..a2cd3f6b803e0 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java @@ -163,7 +163,7 @@ public void testMaybeUpdateRemoteStorePathStrategyExecutes() { .build(), logger ); - migrationIndexMetadataUpdater.maybeUpdateRemoteStorePathStrategy(builder, indexName); + migrationIndexMetadataUpdater.maybeUpdateRemoteStoreCustomMetadata(builder, indexName); assertCustomPathMetadataIsPresent(builder.build()); } @@ -186,7 +186,7 @@ public void testMaybeUpdateRemoteStorePathStrategyDoesNotExecute() { logger ); - migrationIndexMetadataUpdater.maybeUpdateRemoteStorePathStrategy(builder, indexName); + migrationIndexMetadataUpdater.maybeUpdateRemoteStoreCustomMetadata(builder, indexName); assertCustomPathMetadataIsPresent(builder.build()); } @@ -298,7 +298,14 @@ public static Metadata createIndexMetadataWithRemoteStoreSettings(String indexNa ) .putCustom( REMOTE_STORE_CUSTOM_KEY, - Map.of(RemoteStoreEnums.PathType.NAME, "dummy", RemoteStoreEnums.PathHashAlgorithm.NAME, "dummy") + Map.of( + RemoteStoreEnums.PathType.NAME, + "dummy", + RemoteStoreEnums.PathHashAlgorithm.NAME, + "dummy", + IndexMetadata.TRANSLOG_METADATA_KEY, + "dummy" + ) ) .build(); return Metadata.builder().put(indexMetadata).build(); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index ccdd1fe4ab609..280598c516c3c 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -15,6 +15,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.DirectoryFileTransferTracker; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -42,7 +43,7 @@ public class RemoteSegmentTransferTrackerTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java new file mode 100644 index 0000000000000..7e702ad3773e8 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteStoreCustomMetadataResolverTests extends OpenSearchTestCase { + + RepositoriesService repositoriesService = mock(RepositoriesService.class); + Settings settings = Settings.EMPTY; + + public void testGetPathStrategyMinVersionOlder() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_13_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testGetPathStrategyMinVersionNewer() { + PathType pathType = randomFrom(PathType.values()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(pathType, resolver.getPathStrategy().getType()); + if (pathType.requiresHashAlgorithm()) { + assertNotNull(resolver.getPathStrategy().getHashAlgorithm()); + } else { + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + } + } + + public void testGetPathStrategyStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testGetPathStrategyStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testTranslogMetadataAllowedTrueWithMinVersionNewer() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), true).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + BlobStoreRepository repositoryMock = mock(BlobStoreRepository.class); + when(repositoriesService.repository(getRemoteStoreTranslogRepo(settings))).thenReturn(repositoryMock); + BlobStore blobStoreMock = mock(BlobStore.class); + when(repositoryMock.blobStore()).thenReturn(blobStoreMock); + when(blobStoreMock.isBlobMetadataEnabled()).thenReturn(true); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.CURRENT, + () -> repositoriesService, + settings + ); + assertTrue(resolver.isTranslogMetadataEnabled()); + } + + public void testTranslogMetadataAllowedFalseWithMinVersionNewer() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), false).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.CURRENT, + () -> repositoriesService, + settings + ); + assertFalse(resolver.isTranslogMetadataEnabled()); + } + + public void testTranslogMetadataAllowedMinVersionOlder() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertFalse(resolver.isTranslogMetadataEnabled()); + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java deleted file mode 100644 index de61c902bf13e..0000000000000 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.Version; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.RemoteStoreSettings; -import org.opensearch.test.OpenSearchTestCase; - -import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; -import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; - -public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { - - public void testGetMinVersionOlder() { - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_13_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - assertNull(resolver.get().getHashAlgorithm()); - } - - public void testGetMinVersionNewer() { - PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(pathType, resolver.get().getType()); - if (pathType.requiresHashAlgorithm()) { - assertNotNull(resolver.get().getHashAlgorithm()); - } else { - assertNull(resolver.get().getHashAlgorithm()); - } - } - - public void testGetStrategy() { - // FIXED type - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // FIXED type with hash algorithm - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - } - - public void testGetStrategyWithDynamicUpdate() { - - // Default value - Settings settings = Settings.builder().build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - assertNull(resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - } -} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index 9d00cf9f2be46..18d18f2dc30b1 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -14,6 +14,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -45,7 +46,7 @@ public class RemoteStorePressureServiceTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java index 064c6c10eba02..7c1ef0de91887 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java @@ -11,6 +11,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -27,7 +28,7 @@ public class RemoteStorePressureSettingsTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java index c300f316ac633..2bc4792a9c31c 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -39,7 +40,11 @@ public void setUp() throws Exception { RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE ) .build(); - clusterService = new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); + clusterService = ClusterServiceUtils.createClusterService( + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); } @@ -85,7 +90,11 @@ public void testInvalidMovingAverageWindowSize() { "Failed to parse value", IllegalArgumentException.class, () -> new RemoteStoreStatsTrackerFactory( - new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + ClusterServiceUtils.createClusterService( + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ), settings ) ); @@ -107,7 +116,11 @@ public void testUpdateAfterGetConfiguredSettings() { public void testGetDefaultSettings() { remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory( - new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + ClusterServiceUtils.createClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ), Settings.EMPTY ); // Check moving average window size updated diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index c1fc0cdaa0d3b..59c3d3dccdd0f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -8,9 +8,13 @@ package org.opensearch.index.remote; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; @@ -26,7 +30,9 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.cluster.metadata.IndexMetadata.REMOTE_STORE_CUSTOM_KEY; import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.determineTranslogMetadataEnabled; import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; @@ -40,6 +46,7 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { private static Map BASE64_CHARSET_IDX_MAP; + private static String index = "test-index"; static { Map charToIndexMap = new HashMap<>(); @@ -341,4 +348,54 @@ static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { String binaryString = base64PartBinary + encodedValue.substring(1); return new BigInteger(binaryString, 2).longValue(); } + + public void testDeterdetermineTranslogMetadataEnabledWhenTrue() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 1); + IndexMetadata indexMetadata = metadata.index(index); + assertTrue(determineTranslogMetadataEnabled(indexMetadata)); + } + + public void testDeterdetermineTranslogMetadataEnabledWhenFalse() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 0); + IndexMetadata indexMetadata = metadata.index(index); + assertFalse(determineTranslogMetadataEnabled(indexMetadata)); + } + + public void testDeterdetermineTranslogMetadataEnabledWhenKeyNotFound() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 2); + IndexMetadata indexMetadata = metadata.index(index); + assertThrows(AssertionError.class, () -> determineTranslogMetadataEnabled(indexMetadata)); + } + + private static Metadata createIndexMetadataWithRemoteStoreSettings(String indexName, int option) { + IndexMetadata.Builder indexMetadata = IndexMetadata.builder(indexName); + indexMetadata.settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "dummy-tlog-repo") + .put(IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "dummy-segment-repo") + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), "SEGMENT") + .build() + ).putCustom(REMOTE_STORE_CUSTOM_KEY, getCustomDataMap(option)).build(); + return Metadata.builder().put(indexMetadata).build(); + } + + private static Map getCustomDataMap(int option) { + if (option > 1) { + return Map.of(); + } + String value = (option == 1) ? "true" : "false"; + return Map.of( + RemoteStoreEnums.PathType.NAME, + "dummy", + RemoteStoreEnums.PathHashAlgorithm.NAME, + "dummy", + IndexMetadata.TRANSLOG_METADATA_KEY, + value + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 33f6c67b94b3d..bb0776e0ced25 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -23,6 +23,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.InternalEngineFactory; @@ -34,9 +35,11 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -82,7 +85,7 @@ public void setup(boolean primary, int numberOfDocs) throws IOException { indexShard.refresh("test"); } - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool @@ -90,7 +93,12 @@ public void setup(boolean primary, int numberOfDocs) throws IOException { remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); - remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard, SegmentReplicationCheckpointPublisher.EMPTY, tracker); + remoteStoreRefreshListener = new RemoteStoreRefreshListener( + indexShard, + SegmentReplicationCheckpointPublisher.EMPTY, + tracker, + DefaultRemoteStoreSettings.INSTANCE + ); } private void indexDocs(int startDocId, int numberOfDocs) throws IOException { @@ -175,7 +183,12 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); // Since the thrown IOException is caught in the constructor, ctor should be invoked successfully. - new RemoteStoreRefreshListener(shard, SegmentReplicationCheckpointPublisher.EMPTY, mock(RemoteSegmentTransferTracker.class)); + new RemoteStoreRefreshListener( + shard, + SegmentReplicationCheckpointPublisher.EMPTY, + mock(RemoteSegmentTransferTracker.class), + DefaultRemoteStoreSettings.INSTANCE + ); // Validate that the stream of metadata file of remoteMetadataDirectory has been opened only once and the // listFilesByPrefixInLexicographicOrder has been called twice. @@ -370,6 +383,33 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { assertNoLagAndTotalUploadsFailed(segmentTracker, 1); } + public void testSegmentUploadTimeout() throws Exception { + // This covers the case where segment upload fails due to timeout + int succeedOnAttempt = 1; + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. + CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); + CountDownLatch successLatch = new CountDownLatch(2); + Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch, + 1, + new CountDownLatch(0), + true, + true + ); + assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); + assertBusy(() -> assertEquals(1, successLatch.getCount())); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); + assertBusy(() -> { + assertTrue(segmentTracker.getTotalUploadsFailed() > 1); + assertTrue(segmentTracker.getTotalUploadsSucceeded() < 2); + }); + // shutdown threadpool for avoid leaking threads + indexShard.getThreadPool().shutdownNow(); + } + /** * Tests retry flow after snapshot and metadata files have been uploaded to remote store in the failed attempt. * Snapshot and metadata files created in failed attempt should not break retry. @@ -469,6 +509,7 @@ public void testRefreshFailedDueToPrimaryTermMisMatch() throws Exception { successLatch, checkpointPublishSucceedOnAttempt, reachedCheckpointPublishLatch, + false, false ); @@ -520,7 +561,8 @@ private Tuple mockIn successLatch, succeedCheckpointPublishOnAttempt, reachedCheckpointPublishLatch, - true + true, + false ); } @@ -530,7 +572,8 @@ private Tuple mockIn CountDownLatch successLatch, int succeedCheckpointPublishOnAttempt, CountDownLatch reachedCheckpointPublishLatch, - boolean mockPrimaryTerm + boolean mockPrimaryTerm, + boolean testUploadTimeout ) throws IOException { // Create index shard that we will be using to mock different methods in IndexShard for the unit test indexShard = newStartedShard( @@ -564,9 +607,22 @@ private Tuple mockIn // Mock (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) Store remoteStore = mock(Store.class); when(shard.remoteStore()).thenReturn(remoteStore); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = - (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) - .getDelegate(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory; + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + + if (testUploadTimeout) { + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + indexShard.getThreadPool(), + indexShard.shardId + ); + } else { + remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore() + .directory()).getDelegate()).getDelegate(); + } + FilterDirectory remoteStoreFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(remoteSegmentStoreDirectory)); when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); @@ -627,7 +683,7 @@ private Tuple mockIn return null; }).when(emptyCheckpointPublisher).publish(any(), any()); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool @@ -638,7 +694,28 @@ private Tuple mockIn RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); when(remoteStoreSettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); when(shard.getRemoteStoreSettings()).thenReturn(remoteStoreSettings); - RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); + if (testUploadTimeout) { + when(remoteStoreSettings.getClusterRemoteSegmentTransferTimeout()).thenReturn(TimeValue.timeValueMillis(10)); + doAnswer(invocation -> { + ActionListener actionListener = invocation.getArgument(5); + indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> { + try { + Thread.sleep(30000); + } catch (InterruptedException e) { + logger.warn("copyFrom thread interrupted during sleep"); + } + actionListener.onResponse(null); + }); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(ActionListener.class), any(Boolean.class)); + } + + RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener( + shard, + emptyCheckpointPublisher, + tracker, + remoteStoreSettings + ); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java index e4f5a454b15f6..3ed90c0837663 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -26,14 +26,21 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.Base64; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; + public class BlobStoreTransferServiceTests extends OpenSearchTestCase { private ThreadPool threadPool; @@ -144,4 +151,37 @@ private Environment createEnvironment() { .build() ); } + + public void testBuildTransferFileMetadata_EmptyInputStream() throws IOException { + InputStream emptyInputStream = new ByteArrayInputStream(new byte[0]); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(emptyInputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + assertEquals("", metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + + public void testBuildTransferFileMetadata_NonEmptyInputStream() throws IOException { + String inputData = "This is a test input stream."; + InputStream inputStream = new ByteArrayInputStream(inputData.getBytes(StandardCharsets.UTF_8)); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(inputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + String expectedBase64String = Base64.getEncoder().encodeToString(inputData.getBytes(StandardCharsets.UTF_8)); + assertEquals(expectedBase64String, metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + + public void testBuildTransferFileMetadata_InputStreamExceedsLimit() { + byte[] largeData = new byte[1025]; // 1025 bytes, exceeding the 1KB limit + InputStream largeInputStream = new ByteArrayInputStream(largeData); + IOException exception = assertThrows(IOException.class, () -> BlobStoreTransferService.buildTransferFileMetadata(largeInputStream)); + assertEquals(exception.getMessage(), "Input stream exceeds 1KB limit"); + } + + public void testBuildTransferFileMetadata_SmallInputStreamOptimization() throws IOException { + String inputData = "Small input"; + InputStream inputStream = new ByteArrayInputStream(inputData.getBytes(StandardCharsets.UTF_8)); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(inputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + String expectedBase64String = Base64.getEncoder().encodeToString(inputData.getBytes(StandardCharsets.UTF_8)); + assertEquals(expectedBase64String, metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 8b3fc6651a505..c6f9838ad2d52 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -15,6 +15,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.Tuple; @@ -41,9 +42,12 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.UUID; @@ -53,15 +57,17 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -81,6 +87,7 @@ public class TranslogTransferManagerTests extends OpenSearchTestCase { FileTransferTracker tracker; TranslogTransferManager translogTransferManager; long delayForBlobDownload; + boolean isTranslogMetadataEnabled; @Override public void setUp() throws Exception { @@ -97,6 +104,7 @@ public void setUp() throws Exception { tlogBytes = "Hello Translog".getBytes(StandardCharsets.UTF_8); ckpBytes = "Hello Checkpoint".getBytes(StandardCharsets.UTF_8); tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0), remoteTranslogTransferTracker); + isTranslogMetadataEnabled = false; translogTransferManager = new TranslogTransferManager( shardId, transferService, @@ -104,7 +112,8 @@ public void setUp() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); delayForBlobDownload = 1; @@ -170,7 +179,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -222,7 +232,8 @@ public void testTransferSnapshotOnUploadTimeout() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); SetOnce exception = new SetOnce<>(); translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -265,7 +276,8 @@ public void testTransferSnapshotOnThreadInterrupt() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); SetOnce exception = new SetOnce<>(); @@ -297,64 +309,66 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { uploadThread.get().interrupt(); } - private TransferSnapshot createTransferSnapshot() { - return new TransferSnapshot() { - @Override - public Set getCheckpointFileSnapshots() { - try { - return Set.of( - new CheckpointFileSnapshot( - primaryTerm, - generation, - minTranslogGeneration, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX), - null - ), - new CheckpointFileSnapshot( - primaryTerm, - generation, - minTranslogGeneration, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX), - null - ) - ); - } catch (IOException e) { - throw new AssertionError("Failed to create temp file", e); + private TransferSnapshot createTransferSnapshot() throws IOException { + try { + CheckpointFileSnapshot checkpointFileSnapshot1 = new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX), + null + ); + CheckpointFileSnapshot checkpointFileSnapshot2 = new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX), + null + ); + TranslogFileSnapshot translogFileSnapshot1 = new TranslogFileSnapshot( + primaryTerm, + generation, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX), + null + ); + TranslogFileSnapshot translogFileSnapshot2 = new TranslogFileSnapshot( + primaryTerm, + generation - 1, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX), + null + ); + + return new TransferSnapshot() { + @Override + public Set getCheckpointFileSnapshots() { + return Set.of(checkpointFileSnapshot1, checkpointFileSnapshot2); } - } - @Override - public Set getTranslogFileSnapshots() { - try { - return Set.of( - new TranslogFileSnapshot( - primaryTerm, - generation, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX), - null - ), - new TranslogFileSnapshot( - primaryTerm, - generation - 1, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX), - null - ) - ); - } catch (IOException e) { - throw new AssertionError("Failed to create temp file", e); + @Override + public Set getTranslogFileSnapshots() { + return Set.of(translogFileSnapshot1, translogFileSnapshot2); } - } - @Override - public TranslogTransferMetadata getTranslogTransferMetadata() { - return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); - } + @Override + public TranslogTransferMetadata getTranslogTransferMetadata() { + return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); + } - @Override - public String toString() { - return "test-to-string"; - } - }; + @Override + public Set getTranslogFileSnapshotWithMetadata() throws IOException { + translogFileSnapshot1.setMetadataFileInputStream(checkpointFileSnapshot1.inputStream()); + translogFileSnapshot2.setMetadataFileInputStream(checkpointFileSnapshot2.inputStream()); + return Set.of(translogFileSnapshot1, translogFileSnapshot2); + } + + @Override + public String toString() { + return "test-to-string"; + } + }; + } catch (Exception e) { + throw new IOException("Failed to create transfer snapshot"); + } } public void testReadMetadataNoFile() throws IOException { @@ -502,7 +516,8 @@ public void testDeleteTranslogSuccess() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -567,7 +582,8 @@ public void testDeleteTranslogFailure() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -624,4 +640,117 @@ public void testMetadataConflict() throws InterruptedException { assertThrows(RuntimeException.class, translogTransferManager::readMetadata); } + + // tests for cases when ckp is stored as translog metadata. + public void testTransferSnapshotWithTranslogMetadata() throws Exception { + AtomicInteger fileTransferSucceeded = new AtomicInteger(); + AtomicInteger fileTransferFailed = new AtomicInteger(); + AtomicInteger translogTransferSucceeded = new AtomicInteger(); + AtomicInteger translogTransferFailed = new AtomicInteger(); + + isTranslogMetadataEnabled = true; + + doNothing().when(transferService) + .uploadBlob( + any(TransferFileSnapshot.class), + Mockito.eq(remoteBaseTransferPath.add(String.valueOf(primaryTerm))), + any(WritePriority.class) + ); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + Set transferFileSnapshots = (Set) invocationOnMock.getArguments()[0]; + transferFileSnapshots.forEach(transferFileSnapshot -> { + assertNotNull(transferFileSnapshot.getMetadataFileInputStream()); + listener.onResponse(transferFileSnapshot); + }); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ) { + @Override + public void onSuccess(TransferFileSnapshot fileSnapshot) { + fileTransferSucceeded.incrementAndGet(); + super.onSuccess(fileSnapshot); + } + + @Override + public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + fileTransferFailed.incrementAndGet(); + super.onFailure(fileSnapshot, e); + } + + }; + + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), + fileTransferTracker, + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled + ); + + assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) { + translogTransferSucceeded.incrementAndGet(); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + translogTransferFailed.incrementAndGet(); + } + })); + assertEquals(2, fileTransferSucceeded.get()); + assertEquals(0, fileTransferFailed.get()); + assertEquals(1, translogTransferSucceeded.get()); + assertEquals(0, translogTransferFailed.get()); + assertEquals(2, fileTransferTracker.allUploaded().size()); + } + + public void testDownloadTranslogWithMetadata() throws IOException { + isTranslogMetadataEnabled = true; + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), + tracker, + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled + ); + Path location = createTempDir(); + assertFalse(Files.exists(location.resolve("translog-23.tlog"))); + assertFalse(Files.exists(location.resolve("translog-23.ckp"))); + mockDownloadBlobWithMetadataResponse(); + translogTransferManager.downloadTranslog("12", "23", location); + verify(transferService, times(0)).downloadBlob(any(BlobPath.class), eq("translog-23.tlog")); + verify(transferService, times(0)).downloadBlob(any(BlobPath.class), eq("translog-23.ckp")); + verify(transferService, times(1)).downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog")); + assertTrue(Files.exists(location.resolve("translog-23.tlog"))); + assertTrue(Files.exists(location.resolve("translog-23.ckp"))); + assertTlogCkpDownloadStatsWithMetadata(); + } + + private void mockDownloadBlobWithMetadataResponse() throws IOException { + Map metadata = new HashMap<>(); + String ckpDataString = Base64.getEncoder().encodeToString(ckpBytes); + metadata.put(CHECKPOINT_FILE_DATA_KEY, ckpDataString); + when(transferService.downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new InputStreamWithMetadata(new ByteArrayInputStream(tlogBytes), metadata); + }); + } + + private void assertTlogCkpDownloadStatsWithMetadata() { + assertEquals(tlogBytes.length, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + // Expect delay for both tlog and ckp file + assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= delayForBlobDownload); + } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 17bd821ed0c8c..8f1d58cf201e9 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -314,7 +314,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m systemIndices, true, awarenessReplicaBalance, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); transportCloseIndexAction = new TransportCloseIndexAction( diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index d91dc696eb30b..f44cc352cd330 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -34,17 +34,23 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.bootstrap.BootstrapCheck; import org.opensearch.bootstrap.BootstrapContext; +import org.opensearch.client.Client; import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; @@ -56,22 +62,35 @@ import org.opensearch.monitor.fs.FsProbe; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.TelemetryAwarePlugin; +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.script.ScriptService; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -404,6 +423,81 @@ public void testCreateWithFileCache() throws Exception { } } + public void testTelemetryAwarePlugins() throws IOException { + Settings.Builder settings = baseSettings(); + List> plugins = basePlugins(); + plugins.add(MockTelemetryAwarePlugin.class); + try (Node node = new MockNode(settings.build(), plugins)) { + MockTelemetryAwareComponent mockTelemetryAwareComponent = node.injector().getInstance(MockTelemetryAwareComponent.class); + assertNotNull(mockTelemetryAwareComponent.getTracer()); + assertNotNull(mockTelemetryAwareComponent.getMetricsRegistry()); + TelemetryAwarePlugin telemetryAwarePlugin = node.getPluginsService().filterPlugins(TelemetryAwarePlugin.class).get(0); + assertTrue(telemetryAwarePlugin instanceof MockTelemetryAwarePlugin); + } + } + + public void testTelemetryPluginShouldNOTImplementTelemetryAwarePlugin() throws IOException { + Settings.Builder settings = baseSettings(); + List> plugins = basePlugins(); + plugins.add(MockTelemetryPlugin.class); + FeatureFlagSetter.set(FeatureFlags.TELEMETRY); + settings.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); + assertThrows(IllegalStateException.class, () -> new MockNode(settings.build(), plugins)); + } + + private static class MockTelemetryAwareComponent { + private final Tracer tracer; + private final MetricsRegistry metricsRegistry; + + public MockTelemetryAwareComponent(Tracer tracer, MetricsRegistry metricsRegistry) { + this.tracer = tracer; + this.metricsRegistry = metricsRegistry; + } + + public Tracer getTracer() { + return tracer; + } + + public MetricsRegistry getMetricsRegistry() { + return metricsRegistry; + } + } + + public static class MockTelemetryAwarePlugin extends Plugin implements TelemetryAwarePlugin { + @Override + public Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier, + Tracer tracer, + MetricsRegistry metricsRegistry + ) { + return List.of(new MockTelemetryAwareComponent(tracer, metricsRegistry)); + } + + } + + public static class MockTelemetryPlugin extends Plugin implements TelemetryPlugin, TelemetryAwarePlugin { + + @Override + public Optional getTelemetry(TelemetrySettings telemetrySettings) { + return Optional.empty(); + } + + @Override + public String getName() { + return null; + } + } + public static class MockCircuitBreakerPlugin extends Plugin implements CircuitBreakerPlugin { private SetOnce myCircuitBreaker = new SetOnce<>(); diff --git a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java index 7ca1f1e864b99..7567224f16ac3 100644 --- a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -63,7 +64,7 @@ public class ResponseCollectorServiceTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadpool = new TestThreadPool("response_collector_tests"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadpool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java index 4f615290f1805..fbb083a3ae419 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java @@ -17,6 +17,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -33,7 +34,7 @@ public class AdmissionControlServiceTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java index c11ee1cc608f6..fbadcad804b31 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -28,7 +29,7 @@ public class AdmissionControlSettingsTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java index e72c0cd58ed64..f2cb45a033460 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java @@ -15,6 +15,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -31,7 +32,7 @@ public class CpuBasedAdmissionControllerTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java index c5a2208f49ce6..54cb438e14ce6 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java @@ -15,6 +15,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -31,7 +32,7 @@ public class IoBasedAdmissionControllerTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java index 6836ecb3d615f..f5686f33e7f50 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -28,7 +29,7 @@ public class CPUBasedAdmissionControllerSettingsTests extends OpenSearchTestCase public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java index c462f9700264d..3f157531f6c9a 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java @@ -21,6 +21,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -36,7 +37,7 @@ public class IoBasedAdmissionControllerSettingsTests extends OpenSearchTestCase public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool("io_based_admission_controller_settings_test"); - clusterService = new ClusterService( + clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java index 7b4db5f787d6e..da57ef9f06a1a 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java @@ -20,6 +20,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -46,7 +47,7 @@ public void setUp() throws Exception { ) .build(); threadPool = new TestThreadPool("admission_controller_settings_test"); - ClusterService clusterService = new ClusterService( + ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java index fe0399e79a5f4..0ef9aa61bb827 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java @@ -20,6 +20,7 @@ import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -43,7 +44,7 @@ public void setUp() throws Exception { ) .build(); threadPool = new TestThreadPool("admission_controller_settings_test"); - ClusterService clusterService = new ClusterService( + ClusterService clusterService = ClusterServiceUtils.createClusterService( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java index 6883eccbf7427..818a2fa18d751 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsActionTests.java @@ -130,10 +130,12 @@ public void testUnrecognizedIndexMetricDidYouMean() { ); } - public void testIndexMetricsRequestWithoutIndicesMetric() throws IOException { + public void testIndexMetricsRequestWithoutIndicesAndCachesMetrics() throws IOException { final HashMap params = new HashMap<>(); final Set metrics = new HashSet<>(RestNodesStatsAction.METRICS.keySet()); metrics.remove("indices"); + // caches stats is handled separately + metrics.remove("caches"); params.put("metric", randomSubsetOf(1, metrics).get(0)); final String indexMetric = randomSubsetOf(1, RestNodesStatsAction.FLAGS.keySet()).get(0); params.put("index_metric", indexMetric); @@ -150,6 +152,19 @@ public void testIndexMetricsRequestWithoutIndicesMetric() throws IOException { ); } + public void testCacheStatsRequestWithInvalidCacheType() throws IOException { + final HashMap params = new HashMap<>(); + params.put("metric", "caches"); + final String cacheType = randomAlphaOfLength(64); + params.put("index_metric", cacheType); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_nodes/stats").withParams(params).build(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request [/_nodes/stats] contains unrecognized cache type: [" + cacheType + "]"))); + } + public void testIndexMetricsRequestOnAllRequest() throws IOException { final HashMap params = new HashMap<>(); params.put("metric", "_all"); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 6d105c27a692f..d97cfdf003600 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -353,33 +353,40 @@ private void testSimple( newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) ) ) { + List documents = new ArrayList<>(); Document document = new Document(); addFieldConsumer.apply(document, "string", "a"); addFieldConsumer.apply(document, "string", "b"); - indexWriter.addDocument(document); + documents.add(document); + document = new Document(); addFieldConsumer.apply(document, "string", ""); addFieldConsumer.apply(document, "string", "c"); addFieldConsumer.apply(document, "string", "a"); - indexWriter.addDocument(document); + documents.add(document); + document = new Document(); addFieldConsumer.apply(document, "string", "b"); addFieldConsumer.apply(document, "string", "d"); - indexWriter.addDocument(document); + documents.add(document); + document = new Document(); addFieldConsumer.apply(document, "string", ""); if (includeDocCountField) { // Adding _doc_count to one document document.add(new NumericDocValuesField("_doc_count", 10)); } - indexWriter.addDocument(document); + documents.add(document); if (includeDeletedDocumentsInSegment) { document = new Document(); ADD_SORTED_SET_FIELD_INDEXED.apply(document, "string", "e"); - indexWriter.addDocument(document); + documents.add(document); + indexWriter.addDocuments(documents); indexWriter.deleteDocuments(new Term("string", "e")); assertEquals(5, indexWriter.getDocStats().maxDoc); // deleted document still in segment + } else { + indexWriter.addDocuments(documents); } try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 95a343f3b4025..460aaa08a224d 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -112,6 +112,7 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -1922,7 +1923,13 @@ private final class TestClusterNode { settings, clusterSettings, clusterManagerService, - new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { + new ClusterApplierService( + node.getName(), + settings, + clusterSettings, + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + ) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue, threadPool); @@ -2164,7 +2171,8 @@ public void onFailure(final Exception e) { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); actions.put( CreateIndexAction.INSTANCE, diff --git a/settings.gradle b/settings.gradle index 065a8dc3d0a8a..f58a8b35b10eb 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.17.3" + id "com.gradle.enterprise" version "3.17.4" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 33635f2053a21..d8d5d95651b5e 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -71,7 +71,7 @@ dependencies { api 'org.apache.zookeeper:zookeeper:3.9.2' api "org.apache.commons:commons-text:1.12.0" api "commons-net:commons-net:3.10.0" - api "ch.qos.logback:logback-core:1.5.3" + api "ch.qos.logback:logback-core:1.5.6" api "ch.qos.logback:logback-classic:1.2.13" api 'org.apache.kerby:kerb-admin:2.0.3' runtimeOnly "com.google.guava:guava:${versions.guava}" diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 0754cc1793dc8..3ae737bf63923 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -39,6 +39,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskListener; @@ -89,6 +90,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; @@ -1144,7 +1146,8 @@ protected Optional getDisruptableMockTransport(Transpo settings, clusterSettings, deterministicTaskQueue, - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); clusterService.setNodeConnectionsService( @@ -1594,9 +1597,10 @@ static class DisruptableClusterApplierService extends ClusterApplierService { Settings settings, ClusterSettings clusterSettings, DeterministicTaskQueue deterministicTaskQueue, - ThreadPool threadPool + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics ) { - super(nodeName, settings, clusterSettings, threadPool); + super(nodeName, settings, clusterSettings, threadPool, clusterManagerMetrics); this.nodeName = nodeName; this.deterministicTaskQueue = deterministicTaskQueue; addStateApplier(event -> { diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index 3ca938c99b5fd..53ef595c7931e 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.ClusterStatePublisher.AckListener; import org.opensearch.common.UUIDs; @@ -45,6 +46,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -74,7 +76,8 @@ public FakeThreadPoolClusterManagerService( super( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); this.name = serviceName; this.onTaskAvailableToRun = onTaskAvailableToRun; diff --git a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java index 8f4f510da5ec3..f0c0e9bc2d589 100644 --- a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.core.util.Throwables; import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -52,6 +53,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.threadpool.ThreadPool; import java.util.Collections; @@ -66,7 +69,8 @@ public static ClusterManagerService createClusterManagerService(ThreadPool threa ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_cluster_manager_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { @@ -169,8 +173,22 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove } public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode, ClusterSettings clusterSettings) { + return createClusterService(threadPool, localNode, clusterSettings, NoopMetricsRegistry.INSTANCE); + } + + public static ClusterService createClusterService( + ThreadPool threadPool, + DiscoveryNode localNode, + ClusterSettings clusterSettings, + MetricsRegistry metricsRegistry + ) { Settings settings = Settings.builder().put("node.name", "test").put("cluster.name", "ClusterServiceTests").build(); - ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); + ClusterService clusterService = new ClusterService( + settings, + clusterSettings, + threadPool, + new ClusterManagerMetrics(metricsRegistry) + ); clusterService.setNodeConnectionsService(createNoOpNodeConnectionsService()); ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) @@ -184,6 +202,10 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove return clusterService; } + public static ClusterService createClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + return new ClusterService(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); + } + public static NodeConnectionsService createNoOpNodeConnectionsService() { return new NodeConnectionsService(Settings.EMPTY, null, null) { @Override diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index a9f6fdc86155d..0eca08a7678ae 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2640,6 +2640,7 @@ private static Settings buildRemoteStoreNodeAttributes( .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()); return settings.build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java index 53a4e90adb976..fbb39c284f0ff 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java @@ -31,6 +31,14 @@ public class TestShardBatchGatewayAllocator extends ShardsBatchGatewayAllocator { + public TestShardBatchGatewayAllocator() { + + } + + public TestShardBatchGatewayAllocator(long maxBatchSize) { + super(maxBatchSize); + } + Map> knownAllocations = new HashMap<>(); DiscoveryNodes currentNodes = DiscoveryNodes.EMPTY_NODES; Map shardIdNodeToReplicationCheckPointMap = new HashMap<>();