Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add table sampling with fixed shard #15128

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type", defaultImpl = TableDataSource.class)
@JsonSubTypes({
@JsonSubTypes.Type(value = TableDataSource.class, name = "table"),
@JsonSubTypes.Type(value = SampledTableDataSource.class, name = "sampled_table"),
@JsonSubTypes.Type(value = QueryDataSource.class, name = "query"),
@JsonSubTypes.Type(value = UnionDataSource.class, name = "union"),
@JsonSubTypes.Type(value = JoinDataSource.class, name = "join"),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.query;

import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonTypeName;
import com.fasterxml.jackson.annotation.JsonValue;
import org.apache.druid.java.util.common.Cacheable;
import org.apache.druid.java.util.common.StringUtils;

@JsonTypeName("sampled_table")
public class SampledTableDataSource extends TableDataSource
{
private final SamplingType samplingType;
private final int samplingPercentage;

public enum SamplingType implements Cacheable
{
FIXED_SHARD;

@JsonValue
@Override
public String toString()
{
return StringUtils.toLowerCase(this.name());
}

@JsonCreator
public static SamplingType fromString(String name)
{
return valueOf(StringUtils.toUpperCase(name));
}

@Override
public byte[] getCacheKey()
{
return new byte[] {(byte) this.ordinal()};
}
}

@JsonCreator
public SampledTableDataSource(
@JsonProperty("name") String name,
@JsonProperty("samplingType") SamplingType samplingType,
@JsonProperty("samplingPercentage") int samplingPercentage
)
{
super(name);
this.samplingType = samplingType;
this.samplingPercentage = samplingPercentage;
}

@JsonCreator
public static SampledTableDataSource create(
@JsonProperty("name")final String name,
@JsonProperty("samplingType")final String samplingType,
@JsonProperty("samplingPercentage")final int samplingPercentage)
{
return new SampledTableDataSource(name, SamplingType.fromString(samplingType), samplingPercentage);
}


@JsonProperty
public SamplingType getSamplingType()
{
return samplingType;
}

@JsonProperty
public float getSamplingPercentage()
{
return samplingPercentage;
}

@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (!(o instanceof SampledTableDataSource)) {
return false;
}
if (!super.equals(o)) {
return false;
}

SampledTableDataSource that = (SampledTableDataSource) o;

if (samplingPercentage != that.samplingPercentage) {
return false;
}
return samplingType == that.samplingType;
}

@Override
public int hashCode()
{
int result = super.hashCode();
result = 31 * result + (samplingType != null ? samplingType.hashCode() : 0);
result = 31 * result + samplingPercentage;
return result;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -453,6 +453,12 @@ public Object mergeValues(Object oldValue, Object newValue)
false
);

public static final Key SAMPLING_COMPOSITION = new StringKey(
"samplingComposition",
true,
false
);

/**
* Indicates if a {@link ResponseContext} was truncated during serialization.
*/
Expand Down Expand Up @@ -488,6 +494,7 @@ public Object mergeValues(Object oldValue, Object newValue)
TIMEOUT_AT,
NUM_SCANNED_ROWS,
CPU_CONSUMED_NANOS,
SAMPLING_COMPOSITION,
TRUNCATED,
}
);
Expand Down Expand Up @@ -738,6 +745,12 @@ public void addCpuNanos(long ns)
addValue(Keys.CPU_CONSUMED_NANOS, ns);
}


public void addSamplingComposition(String samplingComposition)
{
addValue(Keys.SAMPLING_COMPOSITION, samplingComposition);
}

private Object addValue(Key key, Object value)
{
return getDelegate().merge(key, value, key::mergeValues);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.apache.druid.query.DataSource;
import org.apache.druid.query.JoinDataSource;
import org.apache.druid.query.Query;
import org.apache.druid.query.SampledTableDataSource;
import org.apache.druid.query.TableDataSource;
import org.apache.druid.query.UnionDataSource;
import org.apache.druid.query.UnnestDataSource;
Expand Down Expand Up @@ -117,6 +118,8 @@
{
if (baseDataSource instanceof TableDataSource) {
return Optional.of((TableDataSource) baseDataSource);
} else if (baseDataSource instanceof SampledTableDataSource) {

Check failure

Code scanning / CodeQL

Contradictory type checks Error

This access of
baseDataSource
cannot be of type
SampledTableDataSource
, since
this expression
ensures that it is not of type
TableDataSource
.
return Optional.of((SampledTableDataSource) baseDataSource);

Check failure

Code scanning / CodeQL

Contradictory type checks Error

This access of
baseDataSource
cannot be of type
SampledTableDataSource
, since
this expression
ensures that it is not of type
TableDataSource
.
} else {
return Optional.empty();
}
Expand Down Expand Up @@ -216,6 +219,7 @@
public boolean isTableBased()
{
return (baseDataSource instanceof TableDataSource
|| baseDataSource instanceof SampledTableDataSource

Check failure

Code scanning / CodeQL

Contradictory type checks Error

This access of
baseDataSource
cannot be of type
SampledTableDataSource
, since
this expression
ensures that it is not of type
TableDataSource
.
|| (baseDataSource instanceof UnionDataSource &&
baseDataSource.getChildren()
.stream()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@
import org.apache.druid.query.QueryToolChest;
import org.apache.druid.query.QueryToolChestWarehouse;
import org.apache.druid.query.Result;
import org.apache.druid.query.SampledTableDataSource;
import org.apache.druid.query.SampledTableDataSource.SamplingType;
import org.apache.druid.query.SegmentDescriptor;
import org.apache.druid.query.aggregation.MetricManipulatorFns;
import org.apache.druid.query.context.ResponseContext;
Expand Down Expand Up @@ -344,6 +346,10 @@ ClusterQueryResult<T> run(
}

final Set<SegmentServerSelector> segmentServers = computeSegmentsToQuery(timeline, specificSegments);
Pair<Integer, Integer> ratio = pruneSegmentsForShardSampling(segmentServers);
if (ratio != null) {
responseContext.addSamplingComposition(ratio.lhs + "/" + ratio.rhs);
}
@Nullable
final byte[] queryCacheKey = cacheKeyManager.computeSegmentLevelQueryCacheKey();
if (query.getContext().get(QueryResource.HEADER_IF_NONE_MATCH) != null) {
Expand Down Expand Up @@ -460,6 +466,7 @@ private Set<SegmentServerSelector> computeSegmentsToQuery(
segments.add(new SegmentServerSelector(server, segment));
}
}

return segments;
}

Expand Down Expand Up @@ -503,6 +510,36 @@ private void computeUncoveredIntervals(TimelineLookup<String, ServerSelector> ti
}
}

private Pair<Integer, Integer> pruneSegmentsForShardSampling(final Set<SegmentServerSelector> segments)
{
if (query.getDataSource() instanceof SampledTableDataSource) {
if (((SampledTableDataSource) query.getDataSource()).getSamplingType()
== SamplingType.FIXED_SHARD) {
int allSegmentsSize = segments.size();
int allShards = segments.stream()
.mapToInt(s -> s.getSegmentDescriptor().getPartitionNumber()).max().getAsInt();
int targetShards = Math.round(
allShards * ((SampledTableDataSource) query.getDataSource()).getSamplingPercentage()) / 100;
Iterator<SegmentServerSelector> iterator = segments.iterator();
int removedSegments = 0;
while (iterator.hasNext()) {
SegmentServerSelector segmentServerSelector = iterator.next();
SegmentDescriptor segmentDescriptor = segmentServerSelector.getSegmentDescriptor();
int shard = segmentDescriptor.getPartitionNumber();
if (targetShards < shard) {
removedSegments++;
iterator.remove();
}
}
return Pair.of(allSegmentsSize - removedSegments, allSegmentsSize);
} else {
throw new UnsupportedOperationException("");
}
}
return null;
}


private List<Pair<Interval, byte[]>> pruneSegmentsWithCachedResults(
final byte[] queryCacheKey,
final Set<SegmentServerSelector> segments
Expand Down Expand Up @@ -541,6 +578,7 @@ private Map<SegmentServerSelector, Cache.NamedKey> computePerSegmentCacheKeys(
byte[] queryCacheKey
)
{

// cacheKeys map must preserve segment ordering, in order for shards to always be combined in the same order
Map<SegmentServerSelector, Cache.NamedKey> cacheKeys = Maps.newLinkedHashMap();
for (SegmentServerSelector segmentServer : segments) {
Expand Down
Loading