diff --git a/docs/querying/query-context.md b/docs/querying/query-context.md index 1ac3af1127c4..2299c98ab070 100644 --- a/docs/querying/query-context.md +++ b/docs/querying/query-context.md @@ -67,6 +67,7 @@ Unless otherwise noted, the following parameters apply to all query types. |`setProcessingThreadNames`|`true`| Whether processing thread names will be set to `queryType_dataSource_intervals` while processing a query. This aids in interpreting thread dumps, and is on by default. Query overhead can be reduced slightly by setting this to `false`. This has a tiny effect in most scenarios, but can be meaningful in high-QPS, low-per-segment-processing-time scenarios. | |`maxNumericInFilters`|`-1`|Max limit for the amount of numeric values that can be compared for a string type dimension when the entire SQL WHERE clause of a query translates only to an [OR](../querying/filters.md#or) of [Bound filter](../querying/filters.md#bound-filter). By default, Druid does not restrict the amount of of numeric Bound Filters on String columns, although this situation may block other queries from running. Set this parameter to a smaller value to prevent Druid from running queries that have prohibitively long segment processing times. The optimal limit requires some trial and error; we recommend starting with 100. Users who submit a query that exceeds the limit of `maxNumericInFilters` should instead rewrite their queries to use strings in the `WHERE` clause instead of numbers. For example, `WHERE someString IN (‘123’, ‘456’)`. This value cannot exceed the set system configuration `druid.sql.planner.maxNumericInFilters`. This value is ignored if `druid.sql.planner.maxNumericInFilters` is not set explicitly.| |`inSubQueryThreshold`|`2147483647`| Threshold for minimum number of values in an IN clause to convert the query to a JOIN operation on an inlined table rather than a predicate. A threshold of 0 forces usage of an inline table in all cases; a threshold of [Integer.MAX_VALUE] forces usage of OR in all cases. | +|`federatedClusterBrokers`|`null`| If multiple Druid clusters need to support federated queries, each cluster should select a broker or router. Separate the connection information for each cluster with commas. | ## Druid SQL parameters diff --git a/processing/src/main/java/org/apache/druid/query/QueryContexts.java b/processing/src/main/java/org/apache/druid/query/QueryContexts.java index bf8003658c17..f108a3514065 100644 --- a/processing/src/main/java/org/apache/druid/query/QueryContexts.java +++ b/processing/src/main/java/org/apache/druid/query/QueryContexts.java @@ -85,7 +85,7 @@ public class QueryContexts public static final String SERIALIZE_DATE_TIME_AS_LONG_INNER_KEY = "serializeDateTimeAsLongInner"; public static final String UNCOVERED_INTERVALS_LIMIT_KEY = "uncoveredIntervalsLimit"; public static final String MIN_TOP_N_THRESHOLD = "minTopNThreshold"; - public static final String FEDERATED_CLUSSTER_BROKERS = "federatedClusterBrokers"; + public static final String FEDERATED_CLUSTER_BROKERS = "federatedClusterBrokers"; // SQL query context keys public static final String CTX_SQL_QUERY_ID = BaseQuery.SQL_QUERY_ID; diff --git a/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java b/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java index aa776f8f3936..2629b4a6ce0f 100644 --- a/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java +++ b/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java @@ -24,6 +24,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Iterators; @@ -680,8 +681,9 @@ private void addSequencesFromFederatedCluster( final List> listOfSequences ) { - if (query.context().containsKey(QueryContexts.FEDERATED_CLUSSTER_BROKERS)) { - String[] brokers = query.context().getString(QueryContexts.FEDERATED_CLUSSTER_BROKERS).split(","); + String federatedClusterBrokersStr = query.context().getString(QueryContexts.FEDERATED_CLUSTER_BROKERS); + if (!Strings.isNullOrEmpty(federatedClusterBrokersStr)) { + String[] brokers = federatedClusterBrokersStr.split(","); for (String hostName : brokers) { if (hostName.length() > 0) { final QueryRunner serverRunner = serverView.getAndAddServer(hostName).getQueryRunner(); @@ -690,7 +692,7 @@ private void addSequencesFromFederatedCluster( final Sequence serverResults = serverRunner.run( queryPlus.withQuery(queryPlus.getQuery() .withOverriddenContext(ImmutableMap.of( - QueryContexts.FEDERATED_CLUSSTER_BROKERS, + QueryContexts.FEDERATED_CLUSTER_BROKERS, "" ))) .withMaxQueuedBytes(maxQueuedBytes), diff --git a/server/src/test/java/org/apache/druid/client/CachingClusteredClientTest.java b/server/src/test/java/org/apache/druid/client/CachingClusteredClientTest.java index bdb0a3035600..64078955748b 100644 --- a/server/src/test/java/org/apache/druid/client/CachingClusteredClientTest.java +++ b/server/src/test/java/org/apache/druid/client/CachingClusteredClientTest.java @@ -3150,7 +3150,7 @@ public void testAddSequencesFromFederatedCluster() { Map context = new HashMap<>(); - context.put(QueryContexts.FEDERATED_CLUSSTER_BROKERS, "test1"); + context.put(QueryContexts.FEDERATED_CLUSTER_BROKERS, "test1"); context.putAll(CONTEXT); final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder()