Skip to content

Commit

Permalink
Rename segment load wait parameter (apache#15251)
Browse files Browse the repository at this point in the history
  • Loading branch information
adarshsanjeev authored and LakshSingla committed Oct 25, 2023
1 parent 0760e8b commit 4e2383b
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 21 deletions.
3 changes: 2 additions & 1 deletion docs/multi-stage-query/reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -246,9 +246,10 @@ The following table lists the context parameters for the MSQ task engine:
| `durableShuffleStorage` | SELECT, INSERT, REPLACE <br /><br />Whether to use durable storage for shuffle mesh. To use this feature, configure the durable storage at the server level using `druid.msq.intermediate.storage.enable=true`). If these properties are not configured, any query with the context variable `durableShuffleStorage=true` fails with a configuration error. <br /><br /> | `false` |
| `faultTolerance` | SELECT, INSERT, REPLACE<br /><br /> Whether to turn on fault tolerance mode or not. Failed workers are retried based on [Limits](#limits). Cannot be used when `durableShuffleStorage` is explicitly set to false. | `false` |
| `selectDestination` | SELECT<br /><br /> Controls where the final result of the select query is written. <br />Use `taskReport`(the default) to write select results to the task report. <b> This is not scalable since task reports size explodes for large results </b> <br/>Use `durableStorage` to write results to durable storage location. <b>For large results sets, its recommended to use `durableStorage` </b>. To configure durable storage see [`this`](#durable-storage) section. | `taskReport` |
| `waitTillSegmentsLoad` | INSERT, REPLACE<br /><br /> If set, the ingest query waits for the generated segment to be loaded before exiting, else the ingest query exits without waiting. The task and live reports contain the information about the status of loading segments if this flag is set. This will ensure that any future queries made after the ingestion exits will include results from the ingestion. The drawback is that the controller task will stall till the segments are loaded. | `false` |
| `waitUntilSegmentsLoad` | INSERT, REPLACE<br /><br /> If set, the ingest query waits for the generated segment to be loaded before exiting, else the ingest query exits without waiting. The task and live reports contain the information about the status of loading segments if this flag is set. This will ensure that any future queries made after the ingestion exits will include results from the ingestion. The drawback is that the controller task will stall until the segments are loaded. | `false` |
| `includeSegmentSource` | SELECT, INSERT, REPLACE<br /><br /> Controls the sources, which will be queried for results in addition to the segments present on deep storage. Can be `NONE` or `REALTIME`. If this value is `NONE`, only non-realtime (published and used) segments will be downloaded from deep storage. If this value is `REALTIME`, results will also be included from realtime tasks. | `NONE` |
| `rowsPerPage` | SELECT<br /><br />The number of rows per page to target. The actual number of rows per page may be somewhat higher or lower than this number. In most cases, use the default.<br /> This property comes into effect only when `selectDestination` is set to `durableStorage` | 100000 |

## Joins

Joins in multi-stage queries use one of two algorithms based on what you set the [context parameter](#context-parameters) `sqlJoinAlgorithm` to:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ public class MultiStageQueryContext

public static final String CTX_FAULT_TOLERANCE = "faultTolerance";
public static final boolean DEFAULT_FAULT_TOLERANCE = false;
public static final String CTX_SEGMENT_LOAD_WAIT = "waitTillSegmentsLoad";
public static final String CTX_SEGMENT_LOAD_WAIT = "waitUntilSegmentsLoad";
public static final boolean DEFAULT_SEGMENT_LOAD_WAIT = false;
public static final String CTX_MAX_INPUT_BYTES_PER_WORKER = "maxInputBytesPerWorker";

Expand Down
18 changes: 9 additions & 9 deletions web-console/src/druid-models/query-context/query-context.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -162,20 +162,20 @@ export function changeFinalizeAggregations(
: deepDelete(context, 'finalizeAggregations');
}

// waitTillSegmentsLoad
// waitUntilSegmentsLoad

export function getWaitTillSegmentsLoad(context: QueryContext): boolean | undefined {
const { waitTillSegmentsLoad } = context;
return typeof waitTillSegmentsLoad === 'boolean' ? waitTillSegmentsLoad : undefined;
export function getWaitUntilSegmentsLoad(context: QueryContext): boolean | undefined {
const { waitUntilSegmentsLoad } = context;
return typeof waitUntilSegmentsLoad === 'boolean' ? waitUntilSegmentsLoad : undefined;
}

export function changeWaitTillSegmentsLoad(
export function changeWaitUntilSegmentsLoad(
context: QueryContext,
waitTillSegmentsLoad: boolean | undefined,
waitUntilSegmentsLoad: boolean | undefined,
): QueryContext {
return typeof waitTillSegmentsLoad === 'boolean'
? deepSet(context, 'waitTillSegmentsLoad', waitTillSegmentsLoad)
: deepDelete(context, 'waitTillSegmentsLoad');
return typeof waitUntilSegmentsLoad === 'boolean'
? deepSet(context, 'waitUntilSegmentsLoad', waitUntilSegmentsLoad)
: deepDelete(context, 'waitUntilSegmentsLoad');
}

// groupByEnableMultiValueUnnesting
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ describe('WorkbenchQuery', () => {
finalizeAggregations: false,
groupByEnableMultiValueUnnesting: false,
useCache: false,
waitTillSegmentsLoad: true,
waitUntilSegmentsLoad: true,
},
header: true,
query: 'INSERT INTO wiki2 SELECT * FROM wikipedia',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -552,7 +552,7 @@ export class WorkbenchQuery {
apiQuery.context.executionMode ??= 'async';
apiQuery.context.finalizeAggregations ??= !ingestQuery;
apiQuery.context.groupByEnableMultiValueUnnesting ??= !ingestQuery;
apiQuery.context.waitTillSegmentsLoad ??= true;
apiQuery.context.waitUntilSegmentsLoad ??= true;
}

if (Array.isArray(queryParameters) && queryParameters.length) {
Expand Down
6 changes: 3 additions & 3 deletions web-console/src/helpers/execution/sql-task-execution.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ export async function submitTaskQuery(
): Promise<Execution | IntermediateQueryState<Execution>> {
const { query, prefixLines, cancelToken, preserveOnTermination, onSubmitted } = options;

// setting waitTillSegmentsLoad to true by default
// setting waitUntilSegmentsLoad to true by default
const context = {
waitTillSegmentsLoad: true,
waitUntilSegmentsLoad: true,
...(options.context || {}),
};

Expand Down Expand Up @@ -268,7 +268,7 @@ export async function updateExecutionWithDatasourceLoadedIfNeeded(
}

// This means we don't have to perform the SQL query to check if the segments are loaded
if (execution.queryContext?.waitTillSegmentsLoad === true) {
if (execution.queryContext?.waitUntilSegmentsLoad === true) {
return execution.markDestinationDatasourceLoaded();
}

Expand Down
10 changes: 5 additions & 5 deletions web-console/src/views/workbench-view/run-panel/run-panel.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ import {
changeUseApproximateCountDistinct,
changeUseApproximateTopN,
changeUseCache,
changeWaitTillSegmentsLoad,
changeWaitUntilSegmentsLoad,
getDurableShuffleStorage,
getFinalizeAggregations,
getGroupByEnableMultiValueUnnesting,
Expand All @@ -54,7 +54,7 @@ import {
getUseApproximateCountDistinct,
getUseApproximateTopN,
getUseCache,
getWaitTillSegmentsLoad,
getWaitUntilSegmentsLoad,
summarizeIndexSpec,
} from '../../../druid-models';
import { deepGet, deepSet, pluralIfNeeded, tickIcon } from '../../../utils';
Expand Down Expand Up @@ -112,7 +112,7 @@ export const RunPanel = React.memo(function RunPanel(props: RunPanelProps) {

const maxParseExceptions = getMaxParseExceptions(queryContext);
const finalizeAggregations = getFinalizeAggregations(queryContext);
const waitTillSegmentsLoad = getWaitTillSegmentsLoad(queryContext);
const waitUntilSegmentsLoad = getWaitUntilSegmentsLoad(queryContext);
const groupByEnableMultiValueUnnesting = getGroupByEnableMultiValueUnnesting(queryContext);
const sqlJoinAlgorithm = queryContext.sqlJoinAlgorithm ?? 'broadcast';
const selectDestination = queryContext.selectDestination ?? 'taskReport';
Expand Down Expand Up @@ -317,10 +317,10 @@ export const RunPanel = React.memo(function RunPanel(props: RunPanelProps) {
<MenuTristate
icon={IconNames.STOPWATCH}
text="Wait until segments have loaded"
value={waitTillSegmentsLoad}
value={waitUntilSegmentsLoad}
undefinedEffectiveValue /* ={true} */
onValueChange={v =>
changeQueryContext(changeWaitTillSegmentsLoad(queryContext, v))
changeQueryContext(changeWaitUntilSegmentsLoad(queryContext, v))
}
/>
<MenuTristate
Expand Down

0 comments on commit 4e2383b

Please sign in to comment.