Skip to content

Commit

Permalink
style: fix nightly clippy (#17178)
Browse files Browse the repository at this point in the history
Signed-off-by: xxchan <[email protected]>
  • Loading branch information
xxchan authored Jun 10, 2024
1 parent 3d4bd82 commit b569d78
Show file tree
Hide file tree
Showing 52 changed files with 128 additions and 161 deletions.
4 changes: 2 additions & 2 deletions src/batch/src/executor/join/hash_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ use crate::task::{BatchTaskContext, ShutdownToken};
/// 2. Iterate over the probe side (i.e. left table) and compute the hash value of each row.
/// Then find the matched build side row for each probe side row in the hash map.
/// 3. Concatenate the matched pair of probe side row and build side row into a single row and push
/// it into the data chunk builder.
/// it into the data chunk builder.
/// 4. Yield chunks from the builder.
pub struct HashJoinExecutor<K> {
/// Join type e.g. inner, left outer, ...
Expand Down Expand Up @@ -1669,7 +1669,7 @@ impl<K: HashKey> HashJoinExecutor<K> {
/// | 4 | 3 | 3 | - |
///
/// 3. Remove duplicate rows with NULL build side. This is done by setting the visibility bitmap
/// of the chunk.
/// of the chunk.
///
/// | offset | v1 | v2 | v3 |
/// |---|---|---|---|
Expand Down
4 changes: 2 additions & 2 deletions src/batch/src/executor/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ pub fn gen_sorted_data(
let mut array_builder = DataType::Int64.create_array_builder(batch_size);

for _ in 0..batch_size {
array_builder.append(&data_gen.generate_datum(0));
array_builder.append(data_gen.generate_datum(0));
}

let array = array_builder.finish();
Expand All @@ -102,7 +102,7 @@ pub fn gen_projected_data(
let mut array_builder = DataType::Int64.create_array_builder(batch_size);

for j in 0..batch_size {
array_builder.append(&data_gen.generate_datum(((i + 1) * (j + 1)) as u64));
array_builder.append(data_gen.generate_datum(((i + 1) * (j + 1)) as u64));
}

let chunk = DataChunk::new(vec![array_builder.finish().into()], batch_size);
Expand Down
5 changes: 3 additions & 2 deletions src/common/metrics/src/relabeled_metric.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@ use crate::{
/// - when `metric_level` <= `relabel_threshold`, they behaves exactly the same as their inner
/// metric.
/// - when `metric_level` > `relabel_threshold`, all their input label values are rewrite to "" when
/// calling `with_label_values`. That's means the metric vec is aggregated into a single metric.
/// calling `with_label_values`. That's means the metric vec is aggregated into a single metric.
///
///
/// These wrapper classes add a `metric_level` field to corresponding metric.
/// We could have use one single struct to represent all `MetricVec<T: MetricVecBuilder>`, rather
/// than specializing them one by one. However, that's undoable because prometheus crate doesn't
Expand Down
2 changes: 1 addition & 1 deletion src/common/src/array/data_chunk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -804,7 +804,7 @@ impl DataChunkTestExt for DataChunk {
let arr = col;
let mut builder = arr.create_builder(n * 2);
for v in arr.iter() {
builder.append(&v.to_owned_datum());
builder.append(v.to_owned_datum());
builder.append_null();
}

Expand Down
1 change: 1 addition & 0 deletions src/common/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1033,6 +1033,7 @@ mod tests {

pub struct Block {
pub offset: u64,
#[allow(dead_code)]
pub sst: u64,
}

Expand Down
8 changes: 4 additions & 4 deletions src/common/src/session_config/search_path.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ pub const USER_NAME_WILD_CARD: &str = "\"$user\"";
/// see <https://www.postgresql.org/docs/14/runtime-config-client.html#GUC-SEARCH-PATH>
///
/// 1. when we `select` or `drop` object and don't give a specified schema, it will search the
/// object from the valid items in schema `rw_catalog`, `pg_catalog` and `search_path`. If schema
/// `rw_catalog` and `pg_catalog` are not in `search_path`, we will search them firstly. If they're
/// in `search_path`, we will follow the order in `search_path`.
/// object from the valid items in schema `rw_catalog`, `pg_catalog` and `search_path`. If schema
/// `rw_catalog` and `pg_catalog` are not in `search_path`, we will search them firstly. If they're
/// in `search_path`, we will follow the order in `search_path`.
///
/// 2. when we `create` a `source` or `mv` and don't give a specified schema, it will use the first
/// valid schema in `search_path`.
/// valid schema in `search_path`.
///
/// 3. when we `create` a `index` or `sink`, it will use the schema of the associated table.
#[derive(Clone, Debug, PartialEq)]
Expand Down
4 changes: 2 additions & 2 deletions src/common/src/util/chunk_coalesce.rs
Original file line number Diff line number Diff line change
Expand Up @@ -459,13 +459,13 @@ mod tests {

let mut left_array_builder = DataType::Int32.create_array_builder(5);
for v in [1, 2, 3, 4, 5] {
left_array_builder.append(&Some(ScalarImpl::Int32(v)));
left_array_builder.append(Some(ScalarImpl::Int32(v)));
}
let left_arrays = [left_array_builder.finish()];

let mut right_array_builder = DataType::Int64.create_array_builder(5);
for v in [5, 4, 3, 2, 1] {
right_array_builder.append(&Some(ScalarImpl::Int64(v)));
right_array_builder.append(Some(ScalarImpl::Int64(v)));
}
let right_arrays = [right_array_builder.finish()];

Expand Down
2 changes: 1 addition & 1 deletion src/common/src/util/memcmp_encoding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ mod tests {
use rand::seq::SliceRandom;

fn serialize(f: F32) -> MemcmpEncoded {
encode_value(&Some(ScalarImpl::from(f)), OrderType::default()).unwrap()
encode_value(Some(ScalarImpl::from(f)), OrderType::default()).unwrap()
}

fn deserialize(data: MemcmpEncoded) -> F32 {
Expand Down
1 change: 1 addition & 0 deletions src/connector/src/schema/schema_registry/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ pub struct Subject {
#[derive(Debug, Deserialize)]
pub struct SchemaReference {
/// The name of the reference.
#[allow(dead_code)]
pub name: String,
/// The subject that the referenced schema belongs to
pub subject: String,
Expand Down
1 change: 0 additions & 1 deletion src/connector/src/sink/dynamodb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
// limitations under the License.

use std::collections::{BTreeMap, HashMap, HashSet};
use std::usize;

use anyhow::{anyhow, Context};
use aws_sdk_dynamodb as dynamodb;
Expand Down
2 changes: 1 addition & 1 deletion src/connector/src/sink/encoder/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ pub trait RowEncoder {
/// * an json object
/// * a protobuf message
/// * an avro record
/// into
/// into
/// * string (required by kinesis key)
/// * bytes
///
Expand Down
1 change: 1 addition & 0 deletions src/connector/src/sink/encoder/proto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ impl MaybeData for () {
/// * Top level is always a message.
/// * All message fields can be omitted in proto3.
/// * All repeated elements must have a value.
///
/// So we handle [`ScalarRefImpl`] rather than [`DatumRef`] here.
impl MaybeData for ScalarRefImpl<'_> {
type Out = Value;
Expand Down
4 changes: 2 additions & 2 deletions src/connector/src/sink/formatter/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ pub trait SinkFormatter {
type V;

/// * Key may be None so that messages are partitioned using round-robin.
/// For example append-only without `primary_key` (aka `downstream_pk`) set.
/// For example append-only without `primary_key` (aka `downstream_pk`) set.
/// * Value may be None so that messages with same key are removed during log compaction.
/// For example debezium tombstone event.
/// For example debezium tombstone event.
fn format_chunk(
&self,
chunk: &StreamChunk,
Expand Down
1 change: 0 additions & 1 deletion src/connector/src/sink/google_pubsub.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
// limitations under the License.

use std::collections::BTreeMap;
use std::usize;

use anyhow::{anyhow, Context};
use google_cloud_gax::conn::Environment;
Expand Down
1 change: 1 addition & 0 deletions src/connector/src/source/pulsar/topic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ pub fn get_partition_index(topic: &str) -> Result<Option<i32>> {
/// The short topic name can be:
/// - `<topic>`
/// - `<tenant>/<namespace>/<topic>`
///
/// The fully qualified topic name can be:
/// `<domain>://<tenant>/<namespace>/<topic>`
pub fn parse_topic(topic: &str) -> Result<Topic> {
Expand Down
1 change: 1 addition & 0 deletions src/frontend/src/binder/bind_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ pub enum BindingCteState {
pub struct RecursiveUnion {
/// currently this *must* be true,
/// otherwise binding will fail.
#[allow(dead_code)]
pub all: bool,
/// lhs part of the `UNION ALL` operator
pub base: Box<BoundSetExpr>,
Expand Down
14 changes: 7 additions & 7 deletions src/frontend/src/binder/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -261,17 +261,17 @@ impl UdfContext {
/// following the rules:
/// 1. At the beginning, it contains the user specified parameters type.
/// 2. When the binder encounters a parameter, it will record it as unknown(call `record_new_param`)
/// if it didn't exist in `ParameterTypes`.
/// if it didn't exist in `ParameterTypes`.
/// 3. When the binder encounters a cast on parameter, if it's a unknown type, the cast function
/// will record the target type as infer type for that parameter(call `record_infer_type`). If the
/// parameter has been inferred, the cast function will act as a normal cast.
/// will record the target type as infer type for that parameter(call `record_infer_type`). If the
/// parameter has been inferred, the cast function will act as a normal cast.
/// 4. After bind finished:
/// (a) parameter not in `ParameterTypes` means that the user didn't specify it and it didn't
/// occur in the query. `export` will return error if there is a kind of
/// parameter. This rule is compatible with PostgreSQL
/// occur in the query. `export` will return error if there is a kind of
/// parameter. This rule is compatible with PostgreSQL
/// (b) parameter is None means that it's a unknown type. The user didn't specify it
/// and we can't infer it in the query. We will treat it as VARCHAR type finally. This rule is
/// compatible with PostgreSQL.
/// and we can't infer it in the query. We will treat it as VARCHAR type finally. This rule is
/// compatible with PostgreSQL.
/// (c) parameter is Some means that it's a known type.
#[derive(Clone, Debug)]
pub struct ParameterTypes(Arc<RwLock<HashMap<u64, Option<DataType>>>>);
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/catalog/table_catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ use crate::user::UserId;
/// - **Order Key**: the primary key for storage, used to sort and access data.
///
/// For an MV, the columns in `ORDER BY` clause will be put at the beginning of the order key. And
/// the remaining columns in pk will follow behind.
/// the remaining columns in pk will follow behind.
///
/// If there's no `ORDER BY` clause, the order key will be the same as pk.
///
Expand Down
6 changes: 3 additions & 3 deletions src/frontend/src/optimizer/plan_node/logical_multi_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ impl LogicalMultiJoin {
/// a. a filter with the non eq conditions
/// b. a projection which reorders the output column ordering to agree with the
/// original ordering of the joins.
/// The filter will then be pushed down by another filter pushdown pass.
/// The filter will then be pushed down by another filter pushdown pass.
pub(crate) fn heuristic_ordering(&self) -> Result<Vec<usize>> {
let mut labeller = ConnectedComponentLabeller::new(self.inputs.len());

Expand Down Expand Up @@ -494,9 +494,9 @@ impl LogicalMultiJoin {
/// 2. Second, for every isolated node will create connection to every other nodes.
/// 3. Third, select and merge one node for a iteration, and use a bfs policy for which node the
/// selected node merged with.
/// i. The select node mentioned above is the node with least number of relations and the
/// i. The select node mentioned above is the node with least number of relations and the
/// lowerst join tree.
/// ii. nodes with a join tree higher than the temporal optimal join tree will be pruned.
/// ii. nodes with a join tree higher than the temporal optimal join tree will be pruned.
pub fn as_bushy_tree_join(&self) -> Result<PlanRef> {
let (nodes, condition) = self.get_join_graph()?;

Expand Down
4 changes: 2 additions & 2 deletions src/frontend/src/optimizer/plan_node/predicate_pushdown.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,14 @@ pub trait PredicatePushdown {
/// There are three kinds of predicates:
///
/// 1. those can't be pushed down. We just create a `LogicalFilter` for them above the current
/// `PlanNode`. i.e.,
/// `PlanNode`. i.e.,
///
/// ```ignore
/// LogicalFilter::create(self.clone().into(), predicate)
/// ```
///
/// 2. those can be merged with current `PlanNode` (e.g., `LogicalJoin`). We just merge
/// the predicates with the `Condition` of it.
/// the predicates with the `Condition` of it.
///
/// 3. those can be pushed down. We pass them to current `PlanNode`'s input.
fn predicate_pushdown(
Expand Down
1 change: 1 addition & 0 deletions src/frontend/src/optimizer/plan_node/stream_table_scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ impl StreamTableScan {
/// | 1002 | Int64(1) | t | 10 |
/// | 1003 | Int64(1) | t | 10 |
/// | 1003 | Int64(1) | t | 10 |
///
/// Eventually we should track progress per vnode, to support scaling with both mview and
/// the corresponding `no_shuffle_backfill`.
/// However this is not high priority, since we are working on supporting arrangement backfill,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ impl Rule for LogicalFilterExpressionSimplifyRule {
/// The pattern we aim to optimize, e.g.,
/// 1. (NOT (e)) OR (e) => True
/// 2. (NOT (e)) AND (e) => False
///
/// NOTE: `e` should only contain at most a single column
/// otherwise we will not conduct the optimization
fn apply(&self, plan: PlanRef) -> Option<PlanRef> {
Expand Down Expand Up @@ -153,6 +154,7 @@ fn check_optimizable_pattern(e1: ExprImpl, e2: ExprImpl) -> (bool, Option<ExprIm

/// 1. True or (...) | (...) or True => True
/// 2. False and (...) | (...) and False => False
///
/// NOTE: the `True` and `False` here not only represent a single `ExprImpl::Literal`
/// but represent every `ExprImpl` that can be *evaluated* to `ScalarImpl::Bool`
/// during optimization phase as well
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/scheduler/distributed/stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ pub enum StageEvent {
reason: SchedulerError,
},
/// All tasks in stage finished.
Completed(StageId),
Completed(#[allow(dead_code)] StageId),
}

#[derive(Clone)]
Expand Down
2 changes: 1 addition & 1 deletion src/meta/src/controller/rename.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ impl QueryRewriter<'_> {
///
/// So that we DON'T have to:
/// 1. rewrite the select and expr part like `schema.table.column`, `table.column`,
/// `alias.column` etc.
/// `alias.column` etc.
/// 2. handle the case that the old name is used as alias.
/// 3. handle the case that the new name is used as alias.
fn visit_table_factor(&self, table_factor: &mut TableFactor) {
Expand Down
15 changes: 0 additions & 15 deletions src/meta/src/hummock/compaction/picker/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,18 +97,3 @@ pub trait CompactionPicker {
stats: &mut LocalPickerStatistic,
) -> Option<CompactionInput>;
}

#[derive(Default, Clone, Debug)]
pub struct PartitionLevelInfo {
pub level_id: u32,
pub sub_level_id: u64,
pub left_idx: usize,
pub right_idx: usize,
pub total_file_size: u64,
}

#[derive(Default, Clone, Debug)]
pub struct LevelPartition {
pub sub_levels: Vec<PartitionLevelInfo>,
pub total_file_size: u64,
}
12 changes: 6 additions & 6 deletions src/meta/src/hummock/compactor_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,12 @@ impl Compactor {
/// `CompactTaskAssignment`.
///
/// A compact task can be in one of these states:
/// - 1. Success: an assigned task is reported as success via `CompactStatus::report_compact_task`.
/// It's the final state.
/// - 2. Failed: an Failed task is reported as success via `CompactStatus::report_compact_task`.
/// It's the final state.
/// - 3. Cancelled: a task is reported as cancelled via `CompactStatus::report_compact_task`. It's
/// the final state.
/// 1. Success: an assigned task is reported as success via `CompactStatus::report_compact_task`.
/// It's the final state.
/// 2. Failed: an Failed task is reported as success via `CompactStatus::report_compact_task`.
/// It's the final state.
/// 3. Cancelled: a task is reported as cancelled via `CompactStatus::report_compact_task`. It's
/// the final state.
pub struct CompactorManagerInner {
pub task_expired_seconds: u64,
pub heartbeat_expired_seconds: u64,
Expand Down
2 changes: 1 addition & 1 deletion src/meta/src/hummock/manager/gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ impl HummockManager {
/// Starts a full GC.
/// 1. Meta node sends a `FullScanTask` to a compactor in this method.
/// 2. The compactor returns scan result of object store to meta node. See
/// `HummockManager::full_scan_inner` in storage crate.
/// `HummockManager::full_scan_inner` in storage crate.
/// 3. Meta node decides which SSTs to delete. See `HummockManager::complete_full_gc`.
///
/// Returns Ok(false) if there is no worker available.
Expand Down
2 changes: 2 additions & 0 deletions src/meta/src/manager/catalog/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ impl CatalogManager {
/// We identify a 'legacy' source based on two conditions:
/// 1. The `format_encode_options` in `source_info` is empty.
/// 2. Keys with certain prefixes belonging to `format_encode_options` exist in `with_properties` instead.
///
/// And if the source is identified as 'legacy', we copy the misplaced keys from `with_properties` to `format_encode_options`.
async fn source_backward_compat_check(&self) -> MetaResult<()> {
let core = &mut *self.core.lock().await;
Expand Down Expand Up @@ -897,6 +898,7 @@ impl CatalogManager {
/// with:
/// 1. `stream_job_status` = CREATING
/// 2. Not belonging to a background stream job.
///
/// Clean up these hanging tables by the id.
pub async fn clean_dirty_tables(&self, fragment_manager: FragmentManagerRef) -> MetaResult<()> {
let core = &mut *self.core.lock().await;
Expand Down
4 changes: 2 additions & 2 deletions src/meta/src/manager/env.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ pub struct MetaOpts {
/// The Dashboard service uses this in the following ways:
/// 1. Query Prometheus for relevant metrics to find Stream Graph Bottleneck, and display it.
/// 2. Provide cluster diagnostics, at `/api/monitor/diagnose` to troubleshoot cluster.
/// These are just examples which show how the Meta Dashboard Service queries Prometheus.
/// These are just examples which show how the Meta Dashboard Service queries Prometheus.
pub prometheus_endpoint: Option<String>,

/// The additional selector used when querying Prometheus.
Expand Down Expand Up @@ -251,7 +251,7 @@ pub struct MetaOpts {
/// When `hybrid_partition_vnode_count` > 0, in hybrid compaction group
/// - Tables with high write throughput will be split at vnode granularity
/// - Tables with high size tables will be split by table granularity
/// When `hybrid_partition_vnode_count` = 0,no longer be special alignment operations for the hybird compaction group
/// When `hybrid_partition_vnode_count` = 0,no longer be special alignment operations for the hybird compaction group
pub hybrid_partition_node_count: u32,

pub event_log_enabled: bool,
Expand Down
4 changes: 2 additions & 2 deletions src/meta/src/storage/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ use crate::storage::{ColumnFamily, Key, Value};
/// A `Transaction` executes several writes(aka. operations) to meta store atomically with optional
/// preconditions checked. It executes as follow:
/// 1. If all `preconditions` are valid, all `operations` are executed; Otherwise no operation
/// is executed.
/// is executed.
/// 2. Upon `commit` the transaction, the `TransactionAbort` error will be returned if
/// any precondition was not met in previous step.
/// any precondition was not met in previous step.
#[derive(Default)]
pub struct Transaction {
preconditions: Vec<Precondition>,
Expand Down
Loading

0 comments on commit b569d78

Please sign in to comment.