diff --git a/.github/workflows/standard-its.yml b/.github/workflows/standard-its.yml index b10351986468..a0d4f856e62a 100644 --- a/.github/workflows/standard-its.yml +++ b/.github/workflows/standard-its.yml @@ -77,7 +77,7 @@ jobs: strategy: fail-fast: false matrix: - testing_group: [query, query-retry, query-error, security, high-availability, centralized-table-schema] + testing_group: [query, query-retry, query-error, security, high-availability, centralized-datasource-schema] uses: ./.github/workflows/reusable-standard-its.yml if: ${{ needs.changes.outputs.core == 'true' || needs.changes.outputs.common-extensions == 'true' }} with: @@ -195,6 +195,6 @@ jobs: with: build_jdk: 8 runtime_jdk: 8 - testing_groups: -DexcludedGroups=batch-index,input-format,input-source,perfect-rollup-parallel-batch-index,kafka-index,query,query-retry,query-error,realtime-index,security,ldap-security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kinesis-data-format,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow,kafka-data-format,hadoop-s3-to-s3-deep-storage,hadoop-s3-to-hdfs-deep-storage,hadoop-azure-to-azure-deep-storage,hadoop-azure-to-hdfs-deep-storage,hadoop-gcs-to-gcs-deep-storage,hadoop-gcs-to-hdfs-deep-storage,aliyun-oss-deep-storage,append-ingestion,compaction,high-availability,upgrade,shuffle-deep-store,custom-coordinator-duties,centralized-table-schema + testing_groups: -DexcludedGroups=batch-index,input-format,input-source,perfect-rollup-parallel-batch-index,kafka-index,query,query-retry,query-error,realtime-index,security,ldap-security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kinesis-data-format,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow,kafka-data-format,hadoop-s3-to-s3-deep-storage,hadoop-s3-to-hdfs-deep-storage,hadoop-azure-to-azure-deep-storage,hadoop-azure-to-hdfs-deep-storage,hadoop-gcs-to-gcs-deep-storage,hadoop-gcs-to-hdfs-deep-storage,aliyun-oss-deep-storage,append-ingestion,compaction,high-availability,upgrade,shuffle-deep-store,custom-coordinator-duties,centralized-datasource-schema use_indexer: ${{ matrix.indexer }} group: other diff --git a/docs/api-reference/legacy-metadata-api.md b/docs/api-reference/legacy-metadata-api.md index baf9418f5caf..6eb54c279b31 100644 --- a/docs/api-reference/legacy-metadata-api.md +++ b/docs/api-reference/legacy-metadata-api.md @@ -118,7 +118,7 @@ Returns a list of all segments for each datasource with the full segment metadat `GET /druid/coordinator/v1/metadata/segments?includeOvershadowedStatus&includeRealtimeSegments` -Returns a list of all published and realtime segments for each datasource with the full segment metadata and extra fields `overshadowed`,`realtime` & `numRows`. Realtime segments are returned only when `druid.coordinator.centralizedTableSchema.enabled` is set on the Coordinator. +Returns a list of all published and realtime segments for each datasource with the full segment metadata and extra fields `overshadowed`,`realtime` & `numRows`. Realtime segments are returned only when `druid.centralizedDatasourceSchema.enabled` is set on the Coordinator. `GET /druid/coordinator/v1/metadata/segments?includeOvershadowedStatus&datasources={dataSourceName1}&datasources={dataSourceName2}` @@ -126,7 +126,7 @@ Returns a list of all segments for one or more specific datasources with the ful `GET /druid/coordinator/v1/metadata/segments?includeOvershadowedStatus&includeRealtimeSegments&datasources={dataSourceName1}&datasources={dataSourceName2}` -Returns a list of all published and realtime segments for the specified datasources with the full segment metadata and extra fields `overshadwed`,`realtime` & `numRows`. Realtime segments are returned only when `druid.coordinator.centralizedTableSchema.enabled` is set on the Coordinator. +Returns a list of all published and realtime segments for the specified datasources with the full segment metadata and extra fields `overshadwed`,`realtime` & `numRows`. Realtime segments are returned only when `druid.centralizedDatasourceSchema.enabled` is set on the Coordinator. `GET /druid/coordinator/v1/metadata/datasources` diff --git a/docs/configuration/index.md b/docs/configuration/index.md index c806bccb0ab9..c40af8ca8417 100644 --- a/docs/configuration/index.md +++ b/docs/configuration/index.md @@ -876,7 +876,7 @@ These Coordinator static configurations can be defined in the `coordinator/runti |`druid.coordinator.loadqueuepeon.repeatDelay`|The start and repeat delay for the `loadqueuepeon`, which manages the load and drop of segments.|`PT0.050S` (50 ms)| |`druid.coordinator.asOverlord.enabled`|Boolean value for whether this Coordinator service should act like an Overlord as well. This configuration allows users to simplify a Druid cluster by not having to deploy any standalone Overlord services. If set to true, then Overlord console is available at `http://coordinator-host:port/console.html` and be sure to set `druid.coordinator.asOverlord.overlordService` also.|false| |`druid.coordinator.asOverlord.overlordService`| Required, if `druid.coordinator.asOverlord.enabled` is `true`. This must be same value as `druid.service` on standalone Overlord services and `druid.selectors.indexing.serviceName` on Middle Managers.|NULL| -|`druid.coordinator.centralizedTableSchema.enabled`|Boolean flag for enabling table schema building on the Coordinator.|false| +|`druid.centralizedDatasourceSchema.enabled`|Boolean flag for enabling datasource schema building on the Coordinator.|false| ##### Metadata management diff --git a/integration-tests/docker/docker-compose.centralized-table-schema.yml b/integration-tests/docker/docker-compose.centralized-datasource-schema.yml similarity index 98% rename from integration-tests/docker/docker-compose.centralized-table-schema.yml rename to integration-tests/docker/docker-compose.centralized-datasource-schema.yml index 58f0f622c48a..abfe377c6572 100644 --- a/integration-tests/docker/docker-compose.centralized-table-schema.yml +++ b/integration-tests/docker/docker-compose.centralized-datasource-schema.yml @@ -35,7 +35,7 @@ services: service: druid-coordinator environment: - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_coordinator_centralizedTableSchema_enabled=true + - druid_centralizedDatasourceSchema_enabled=true - druid_coordinator_segmentMetadata_metadataRefreshPeriod=PT15S depends_on: - druid-overlord diff --git a/integration-tests/docker/druid.sh b/integration-tests/docker/druid.sh index f009197ed494..5aac15512f84 100755 --- a/integration-tests/docker/druid.sh +++ b/integration-tests/docker/druid.sh @@ -85,7 +85,7 @@ setupData() # The "query" and "security" test groups require data to be setup before running the tests. # In particular, they requires segments to be download from a pre-existing s3 bucket. # This is done by using the loadSpec put into metadatastore and s3 credientials set below. - if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "upgrade" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "centralized-table-schema" ]; then + if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "upgrade" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "centralized-datasource-schema" ]; then # touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72. find /var/lib/mysql -type f -exec touch {} \; && service mysql start \ && cat /test-data/${DRUID_INTEGRATION_TEST_GROUP}-sample-data.sql | mysql -u root druid \ diff --git a/integration-tests/script/docker_compose_args.sh b/integration-tests/script/docker_compose_args.sh index f2f98cd6c03c..b746a530a7d8 100644 --- a/integration-tests/script/docker_compose_args.sh +++ b/integration-tests/script/docker_compose_args.sh @@ -71,10 +71,10 @@ getComposeArgs() then # default + with override config + schema registry container echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml" - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "centralized-table-schema" ] + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "centralized-datasource-schema" ] then # cluster with overriden properties for broker and coordinator - echo "-f ${DOCKERDIR}/docker-compose.centralized-table-schema.yml" + echo "-f ${DOCKERDIR}/docker-compose.centralized-datasource-schema.yml" else # default echo "-f ${DOCKERDIR}/docker-compose.yml" diff --git a/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java b/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java index e3703ec71c9d..a54d22ef0216 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java @@ -162,5 +162,5 @@ public class TestNGGroup public static final String HTTP_ENDPOINT = "http-endpoint"; - public static final String CENTRALIZED_TABLE_SCHEMA = "centralized-table-schema"; + public static final String CENTRALIZED_DATASOURCE_SCHEMA = "centralized-datasource-schema"; } diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITBroadcastJoinQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITBroadcastJoinQueryTest.java index 09c2b6c49bcc..a4930a249985 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITBroadcastJoinQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITBroadcastJoinQueryTest.java @@ -39,7 +39,7 @@ import org.testng.annotations.Guice; import org.testng.annotations.Test; -@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_TABLE_SCHEMA}) +@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_DATASOURCE_SCHEMA}) @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITBroadcastJoinQueryTest extends AbstractIndexerTest { diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java index e942fc6c6296..cbf7a78d3aa2 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java @@ -47,7 +47,7 @@ import java.util.Properties; import java.util.Set; -@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_TABLE_SCHEMA}) +@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_DATASOURCE_SCHEMA}) @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITJdbcQueryTest { diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java index dcbae71c37f9..4245df030bd2 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java @@ -44,7 +44,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_TABLE_SCHEMA}) +@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_DATASOURCE_SCHEMA}) @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITSqlCancelTest { diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITSystemTableQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITSystemTableQueryTest.java index 9bcb47f1912d..d417737c58c8 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITSystemTableQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITSystemTableQueryTest.java @@ -29,7 +29,7 @@ import org.testng.annotations.Guice; import org.testng.annotations.Test; -@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_TABLE_SCHEMA}) +@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_DATASOURCE_SCHEMA}) @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITSystemTableQueryTest { diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITTwitterQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITTwitterQueryTest.java index 41486902add3..8d6c3c38a081 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITTwitterQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITTwitterQueryTest.java @@ -29,7 +29,7 @@ import org.testng.annotations.Guice; import org.testng.annotations.Test; -@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_TABLE_SCHEMA}) +@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_DATASOURCE_SCHEMA}) @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITTwitterQueryTest { diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITUnionQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITUnionQueryTest.java index 2e826609d729..7eedcbb73173 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITUnionQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITUnionQueryTest.java @@ -58,7 +58,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_TABLE_SCHEMA}) +@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_DATASOURCE_SCHEMA}) @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITUnionQueryTest extends AbstractIndexerTest { diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITWikipediaQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITWikipediaQueryTest.java index ccbf73e4776e..f9c149e8ed05 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITWikipediaQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITWikipediaQueryTest.java @@ -47,7 +47,7 @@ import java.util.UUID; import java.util.concurrent.Future; -@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_TABLE_SCHEMA}) +@Test(groups = {TestNGGroup.QUERY, TestNGGroup.CENTRALIZED_DATASOURCE_SCHEMA}) @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITWikipediaQueryTest { diff --git a/server/src/main/java/org/apache/druid/server/http/MetadataResource.java b/server/src/main/java/org/apache/druid/server/http/MetadataResource.java index bd12e99dc2b5..3fc13469723e 100644 --- a/server/src/main/java/org/apache/druid/server/http/MetadataResource.java +++ b/server/src/main/java/org/apache/druid/server/http/MetadataResource.java @@ -156,7 +156,7 @@ public Response getAllUsedSegments( } if (includeOvershadowedStatus != null) { - // note that realtime segments are returned only when druid.coordinator.centralizedTableSchema.enabled is set on the Coordinator + // note that realtime segments are returned only when druid.centralizedDatasourceSchema.enabled is set on the Coordinator // when the feature is disabled we do not want to increase the payload size polled by the Brokers, since they already have this information return getAllUsedSegmentsWithAdditionalDetails(req, dataSources, includeRealtimeSegments); } diff --git a/services/src/main/java/org/apache/druid/cli/CliCoordinator.java b/services/src/main/java/org/apache/druid/cli/CliCoordinator.java index db7c3bfdfd48..d885b8748df0 100644 --- a/services/src/main/java/org/apache/druid/cli/CliCoordinator.java +++ b/services/src/main/java/org/apache/druid/cli/CliCoordinator.java @@ -155,7 +155,7 @@ public class CliCoordinator extends ServerRunnable { private static final Logger log = new Logger(CliCoordinator.class); private static final String AS_OVERLORD_PROPERTY = "druid.coordinator.asOverlord.enabled"; - private static final String CENTRALIZED_SCHEMA_MANAGEMENT_ENABLED = "druid.coordinator.centralizedTableSchema.enabled"; + private static final String CENTRALIZED_SCHEMA_MANAGEMENT_ENABLED = "druid.centralizedDatasourceSchema.enabled"; private Properties properties; private boolean beOverlord; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/MetadataSegmentView.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/MetadataSegmentView.java index 9bbe1491a324..89280252bc66 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/MetadataSegmentView.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/MetadataSegmentView.java @@ -194,7 +194,7 @@ private JsonParserIterator getMetadataSegments( ) { // includeRealtimeSegments flag would additionally request realtime segments - // note that realtime segments are returned only when druid.coordinator.centralizedTableSchema.enabled is set on the Coordinator + // note that realtime segments are returned only when druid.centralizedDatasourceSchema.enabled is set on the Coordinator StringBuilder queryBuilder = new StringBuilder("/druid/coordinator/v1/metadata/segments?includeOvershadowedStatus&includeRealtimeSegments"); if (watchedDataSources != null && !watchedDataSources.isEmpty()) { log.debug( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index ff242d58e601..1706620f79cf 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -311,7 +311,7 @@ public Enumerable scan(DataContext root) numRows = partialSegmentData.getNumRows(); } - // If druid.coordinator.centralizedTableSchema.enabled is set on the Coordinator, SegmentMetadataCache on the + // If druid.centralizedDatasourceSchema.enabled is set on the Coordinator, SegmentMetadataCache on the // broker might have outdated or no information regarding numRows and rowSignature for a segment. // In that case, we should use {@code numRows} from the segment polled from the coordinator. if (null != val.getNumRows()) { @@ -358,7 +358,7 @@ public Enumerable scan(DataContext root) } }); - // If druid.coordinator.centralizedTableSchema.enabled is set on the Coordinator, all the segments in this loop + // If druid.centralizedDatasourceSchema.enabled is set on the Coordinator, all the segments in this loop // would be covered in the previous iteration since Coordinator would return realtime segments as well. final FluentIterable availableSegments = FluentIterable .from(() -> getAuthorizedAvailableSegments(