Skip to content

Commit

Permalink
Add relevant documentation and modify variable names
Browse files Browse the repository at this point in the history
  • Loading branch information
599166320 committed Oct 19, 2023
1 parent 1f615df commit 16e0383
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 5 deletions.
1 change: 1 addition & 0 deletions docs/querying/query-context.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ Unless otherwise noted, the following parameters apply to all query types.
|`setProcessingThreadNames`|`true`| Whether processing thread names will be set to `queryType_dataSource_intervals` while processing a query. This aids in interpreting thread dumps, and is on by default. Query overhead can be reduced slightly by setting this to `false`. This has a tiny effect in most scenarios, but can be meaningful in high-QPS, low-per-segment-processing-time scenarios. |
|`maxNumericInFilters`|`-1`|Max limit for the amount of numeric values that can be compared for a string type dimension when the entire SQL WHERE clause of a query translates only to an [OR](../querying/filters.md#or) of [Bound filter](../querying/filters.md#bound-filter). By default, Druid does not restrict the amount of of numeric Bound Filters on String columns, although this situation may block other queries from running. Set this parameter to a smaller value to prevent Druid from running queries that have prohibitively long segment processing times. The optimal limit requires some trial and error; we recommend starting with 100. Users who submit a query that exceeds the limit of `maxNumericInFilters` should instead rewrite their queries to use strings in the `WHERE` clause instead of numbers. For example, `WHERE someString IN (‘123’, ‘456’)`. This value cannot exceed the set system configuration `druid.sql.planner.maxNumericInFilters`. This value is ignored if `druid.sql.planner.maxNumericInFilters` is not set explicitly.|
|`inSubQueryThreshold`|`2147483647`| Threshold for minimum number of values in an IN clause to convert the query to a JOIN operation on an inlined table rather than a predicate. A threshold of 0 forces usage of an inline table in all cases; a threshold of [Integer.MAX_VALUE] forces usage of OR in all cases. |
|`federatedClusterBrokers`|`null`| If multiple Druid clusters need to support federated queries, each cluster should select a broker or router. Separate the connection information for each cluster with commas. |

## Druid SQL parameters

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ public class QueryContexts
public static final String SERIALIZE_DATE_TIME_AS_LONG_INNER_KEY = "serializeDateTimeAsLongInner";
public static final String UNCOVERED_INTERVALS_LIMIT_KEY = "uncoveredIntervalsLimit";
public static final String MIN_TOP_N_THRESHOLD = "minTopNThreshold";
public static final String FEDERATED_CLUSSTER_BROKERS = "federatedClusterBrokers";
public static final String FEDERATED_CLUSTER_BROKERS = "federatedClusterBrokers";

// SQL query context keys
public static final String CTX_SQL_QUERY_ID = BaseQuery.SQL_QUERY_ID;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
Expand Down Expand Up @@ -680,8 +681,9 @@ private void addSequencesFromFederatedCluster(
final List<Sequence<T>> listOfSequences
)
{
if (query.context().containsKey(QueryContexts.FEDERATED_CLUSSTER_BROKERS)) {
String[] brokers = query.context().getString(QueryContexts.FEDERATED_CLUSSTER_BROKERS).split(",");
String federatedClusterBrokersStr = query.context().getString(QueryContexts.FEDERATED_CLUSTER_BROKERS);
if (!Strings.isNullOrEmpty(federatedClusterBrokersStr)) {
String[] brokers = federatedClusterBrokersStr.split(",");
for (String hostName : brokers) {
if (hostName.length() > 0) {
final QueryRunner serverRunner = serverView.getAndAddServer(hostName).getQueryRunner();
Expand All @@ -690,7 +692,7 @@ private void addSequencesFromFederatedCluster(
final Sequence<T> serverResults = serverRunner.run(
queryPlus.withQuery(queryPlus.getQuery()
.withOverriddenContext(ImmutableMap.of(
QueryContexts.FEDERATED_CLUSSTER_BROKERS,
QueryContexts.FEDERATED_CLUSTER_BROKERS,
""
)))
.withMaxQueuedBytes(maxQueuedBytes),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3150,7 +3150,7 @@ public void testAddSequencesFromFederatedCluster()
{

Map<String, Object> context = new HashMap<>();
context.put(QueryContexts.FEDERATED_CLUSSTER_BROKERS, "test1");
context.put(QueryContexts.FEDERATED_CLUSTER_BROKERS, "test1");
context.putAll(CONTEXT);

final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder()
Expand Down

0 comments on commit 16e0383

Please sign in to comment.