From ce2149c51d77baab0f4ac373ec87246628b712e4 Mon Sep 17 00:00:00 2001 From: xxchan Date: Sat, 9 Sep 2023 23:12:43 +0800 Subject: [PATCH] build: bump toolchain to 2023-09-09 (#11809) Signed-off-by: TennyZhuang Signed-off-by: MrCroxx Co-authored-by: TennyZhuang Co-authored-by: MrCroxx Co-authored-by: William Wen Co-authored-by: xxchan --- .cargo/config.toml | 23 ++---- Cargo.lock | 12 ++-- Cargo.toml | 70 ++++++++++++++++--- ci/build-ci-image.sh | 2 +- ci/docker-compose.yml | 10 +-- ci/rust-toolchain | 2 +- ci/scripts/check.sh | 2 +- ci/scripts/gen-flamegraph.sh | 4 +- .../feature-store/server/Cargo.toml | 3 + .../feature-store/simulator/Cargo.toml | 3 + rustfmt.toml | 5 +- scripts/source/prepare_ci_pubsub/Cargo.toml | 3 + src/batch/Cargo.toml | 3 + src/batch/benches/expand.rs | 2 +- src/batch/benches/filter.rs | 2 +- src/batch/benches/hash_agg.rs | 2 +- src/batch/benches/limit.rs | 2 +- src/batch/benches/sort.rs | 2 +- src/batch/benches/top_n.rs | 2 +- src/batch/benches/utils/mod.rs | 2 +- src/batch/src/executor/group_top_n.rs | 6 +- src/batch/src/executor/merge_sort_exchange.rs | 2 +- src/batch/src/executor/order_by.rs | 6 +- src/batch/src/executor/sort_over_window.rs | 2 +- src/batch/src/executor/source.rs | 2 +- src/batch/src/executor/top_n.rs | 8 +-- src/batch/src/monitor/stats.rs | 2 +- src/batch/src/task/task_manager.rs | 2 +- src/bench/Cargo.toml | 3 + src/cmd/Cargo.toml | 3 + src/cmd_all/Cargo.toml | 3 + src/common/Cargo.toml | 3 + src/common/common_service/Cargo.toml | 3 + src/common/common_service/src/tracing.rs | 23 +++--- src/common/proc_macro/Cargo.toml | 4 +- src/common/proc_macro/src/estimate_size.rs | 12 ++-- src/common/proc_macro/src/lib.rs | 6 +- src/common/src/array/bool_array.rs | 2 +- src/common/src/array/data_chunk.rs | 16 ++--- src/common/src/array/decimal_array.rs | 2 +- src/common/src/array/list_array.rs | 26 ++++--- src/common/src/array/mod.rs | 6 +- src/common/src/array/stream_chunk.rs | 2 +- src/common/src/array/struct_array.rs | 30 ++++---- src/common/src/array/utf8_array.rs | 2 +- src/common/src/error.rs | 4 +- src/common/src/hash/key.rs | 19 +++-- src/common/src/lib.rs | 8 ++- src/common/src/types/interval.rs | 5 +- src/common/src/types/ordered.rs | 1 + src/common/src/types/to_binary.rs | 1 + src/common/src/util/hash_util.rs | 2 +- src/common/src/util/memcmp_encoding.rs | 5 +- src/common/src/util/sort_util.rs | 25 ++++--- .../src/vnode_mapping/vnode_placement.rs | 24 ++++--- src/compute/Cargo.toml | 3 + src/compute/src/memory_management/policy.rs | 2 +- .../src/rpc/service/exchange_service.rs | 4 +- .../src/rpc/service/monitor_service.rs | 2 +- src/compute/tests/integration_tests.rs | 2 +- src/connector/Cargo.toml | 3 + src/connector/build.rs | 2 +- src/connector/src/parser/util.rs | 4 +- src/connector/src/sink/clickhouse.rs | 2 +- src/ctl/Cargo.toml | 3 + .../src/cmd_impl/hummock/compaction_group.rs | 2 +- src/ctl/src/cmd_impl/hummock/list_version.rs | 2 +- src/ctl/src/cmd_impl/meta/serving.rs | 2 +- src/ctl/src/cmd_impl/scale/resize.rs | 2 +- src/ctl/src/lib.rs | 2 +- src/expr/Cargo.toml | 3 + src/expr/macro/Cargo.toml | 3 + src/expr/src/agg/approx_count_distinct/mod.rs | 4 +- src/expr/src/expr/expr_concat_ws.rs | 2 +- src/expr/src/expr/expr_in.rs | 2 +- src/expr/src/expr/expr_unary.rs | 4 +- src/expr/src/lib.rs | 1 - src/expr/src/vector_op/encdec.rs | 6 +- src/expr/src/vector_op/like.rs | 10 +-- src/frontend/Cargo.toml | 3 + src/frontend/planner_test/Cargo.toml | 3 + src/frontend/src/binder/bind_context.rs | 2 +- src/frontend/src/binder/expr/function.rs | 12 +++- src/frontend/src/binder/expr/value.rs | 6 +- src/frontend/src/expr/utils.rs | 2 +- .../src/handler/alter_table_column.rs | 2 +- src/frontend/src/handler/drop_index.rs | 4 +- src/frontend/src/handler/drop_mv.rs | 2 +- src/frontend/src/lib.rs | 3 +- src/frontend/src/optimizer/mod.rs | 3 +- .../src/optimizer/plan_node/generic/join.rs | 8 +-- .../src/optimizer/plan_node/logical_join.rs | 6 +- .../optimizer/plan_node/logical_multi_join.rs | 2 +- .../plan_node/logical_over_window.rs | 2 +- .../src/optimizer/plan_node/logical_scan.rs | 2 +- src/frontend/src/optimizer/plan_node/mod.rs | 3 +- .../optimizer/plan_node/stream_table_scan.rs | 2 +- src/frontend/src/optimizer/plan_node/utils.rs | 2 +- .../src/optimizer/property/func_dep.rs | 4 +- .../rule/apply_join_transpose_rule.rs | 2 +- .../rule/apply_project_set_transpose_rule.rs | 2 +- .../rule/apply_project_transpose_rule.rs | 2 +- .../rule/apply_topn_transpose_rule.rs | 4 +- .../src/optimizer/rule/distinct_agg_rule.rs | 4 +- .../optimizer/rule/index_selection_rule.rs | 4 +- .../optimizer/rule/rewrite_like_expr_rule.rs | 6 +- .../src/scheduler/distributed/stage.rs | 2 +- src/frontend/src/scheduler/local.rs | 4 +- src/frontend/src/stream_fragmenter/mod.rs | 6 +- .../stream_fragmenter/rewrite/delta_join.rs | 4 +- src/frontend/src/user/user_authentication.rs | 2 +- .../src/utils/connected_components.rs | 10 +-- src/java_binding/Cargo.toml | 3 + src/java_binding/src/lib.rs | 1 - src/jni_core/Cargo.toml | 3 + src/jni_core/src/lib.rs | 1 - src/meta/Cargo.toml | 3 + src/meta/src/barrier/command.rs | 6 +- src/meta/src/barrier/mod.rs | 6 +- src/meta/src/error.rs | 2 +- .../src/hummock/compaction/level_selector.rs | 10 +-- src/meta/src/hummock/compaction/mod.rs | 2 + .../picker/min_overlap_compaction_picker.rs | 1 + .../picker/space_reclaim_compaction_picker.rs | 4 +- .../picker/ttl_reclaim_compaction_picker.rs | 4 +- .../tombstone_compaction_selector.rs | 5 +- .../manager/compaction_group_manager.rs | 6 +- src/meta/src/hummock/manager/gc.rs | 2 +- src/meta/src/hummock/manager/mod.rs | 4 +- src/meta/src/hummock/manager/tests.rs | 6 +- .../hummock/model/compaction_group_config.rs | 3 +- src/meta/src/lib.rs | 8 +-- src/meta/src/manager/catalog/fragment.rs | 6 +- src/meta/src/manager/catalog/user.rs | 2 +- src/meta/src/manager/cluster.rs | 6 +- src/meta/src/manager/idle.rs | 2 +- .../src/manager/sink_coordination/manager.rs | 6 +- src/meta/src/manager/system_param/mod.rs | 4 +- src/meta/src/rpc/server.rs | 40 +++++------ src/meta/src/stream/scale.rs | 12 ++-- src/meta/src/stream/source_manager.rs | 7 +- src/meta/src/stream/stream_graph/schedule.rs | 18 ++--- src/meta/src/stream/stream_manager.rs | 2 +- src/meta/src/stream/test_fragmenter.rs | 2 +- src/object_store/Cargo.toml | 4 +- src/object_store/src/lib.rs | 1 - src/object_store/src/object/error.rs | 4 +- src/object_store/src/object/mod.rs | 3 + .../opendal_engine/opendal_object_store.rs | 2 +- src/object_store/src/object/s3.rs | 2 +- src/prost/Cargo.toml | 3 + src/prost/helpers/Cargo.toml | 3 + src/risedevtool/Cargo.toml | 3 + src/risedevtool/config/Cargo.toml | 3 + src/rpc_client/Cargo.toml | 3 + src/rpc_client/src/lib.rs | 2 +- src/rpc_client/src/meta_client.rs | 8 +-- src/source/Cargo.toml | 3 + src/source/src/fs_connector_source.rs | 3 +- src/source/src/lib.rs | 3 +- src/sqlparser/Cargo.toml | 3 + src/sqlparser/fuzz/Cargo.toml | 3 + src/sqlparser/sqlparser_bench/Cargo.toml | 3 + src/sqlparser/src/ast/mod.rs | 2 +- src/sqlparser/src/parser.rs | 12 +++- src/sqlparser/test_runner/Cargo.toml | 3 + src/storage/Cargo.toml | 3 + src/storage/backup/Cargo.toml | 3 + src/storage/backup/cmd/Cargo.toml | 3 + src/storage/backup/src/lib.rs | 7 +- src/storage/benches/bench_compression.rs | 2 +- src/storage/compactor/Cargo.toml | 3 + src/storage/hummock_sdk/Cargo.toml | 3 + .../compaction_group/hummock_version_ext.rs | 6 +- src/storage/hummock_sdk/src/lib.rs | 4 +- src/storage/hummock_test/Cargo.toml | 3 + src/storage/hummock_trace/Cargo.toml | 3 + src/storage/src/error.rs | 2 +- .../src/hummock/compactor/compactor_runner.rs | 3 + src/storage/src/hummock/compactor/iterator.rs | 1 - src/storage/src/hummock/compactor/mod.rs | 2 +- .../compactor/shared_buffer_compact.rs | 2 +- src/storage/src/hummock/conflict_detector.rs | 2 +- src/storage/src/hummock/error.rs | 2 +- .../event_handler/hummock_event_handler.rs | 6 +- .../src/hummock/event_handler/uploader.rs | 45 ++++++------ src/storage/src/hummock/file_cache/store.rs | 2 +- .../src/hummock/iterator/merge_inner.rs | 30 ++++---- .../shared_buffer/shared_buffer_batch.rs | 2 +- src/storage/src/hummock/sstable/builder.rs | 2 +- .../sstable/delete_range_aggregator.rs | 6 +- src/storage/src/hummock/sstable/mod.rs | 4 +- src/storage/src/hummock/sstable_store.rs | 5 +- src/storage/src/hummock/state_store.rs | 2 +- src/storage/src/hummock/store/memtable.rs | 2 +- src/storage/src/hummock/utils.rs | 3 + src/storage/src/lib.rs | 8 +-- src/storage/src/monitor/local_metrics.rs | 10 +-- src/storage/src/monitor/monitored_store.rs | 9 ++- src/storage/src/monitor/traced_store.rs | 9 ++- .../src/table/batch_table/storage_table.rs | 1 - src/stream/Cargo.toml | 3 + src/stream/benches/stream_hash_agg.rs | 2 +- src/stream/src/common/log_store/in_mem.rs | 3 - .../common/log_store/kv_log_store/reader.rs | 1 - .../common/log_store/kv_log_store/serde.rs | 2 +- .../log_store/kv_log_store/test_utils.rs | 2 +- .../common/log_store/kv_log_store/writer.rs | 1 - .../src/common/table/test_state_table.rs | 10 +-- .../src/common/table/test_storage_table.rs | 8 +-- src/stream/src/error.rs | 4 +- .../src/executor/aggregation/distinct.rs | 3 +- .../executor/backfill/arrangement_backfill.rs | 4 +- src/stream/src/executor/backfill/utils.rs | 5 +- src/stream/src/executor/dispatch.rs | 14 ++-- src/stream/src/executor/error.rs | 4 +- src/stream/src/executor/hash_agg.rs | 2 +- src/stream/src/executor/hop_window.rs | 2 +- src/stream/src/executor/lookup/cache.rs | 2 +- .../src/executor/managed_state/join/mod.rs | 2 +- src/stream/src/executor/mod.rs | 8 +-- src/stream/src/executor/mview/test_utils.rs | 2 +- src/stream/src/executor/over_window/eowc.rs | 2 +- .../src/executor/over_window/general.rs | 16 ++--- .../executor/over_window/over_partition.rs | 2 +- .../src/executor/source/source_executor.rs | 4 +- src/stream/src/executor/temporal_join.rs | 2 +- src/stream/src/executor/top_n/top_n_cache.rs | 3 +- .../src/executor/top_n/topn_cache_state.rs | 7 +- src/stream/src/lib.rs | 8 +-- src/stream/src/task/stream_manager.rs | 3 + src/test_runner/Cargo.toml | 3 + src/tests/compaction_test/Cargo.toml | 3 + .../src/compaction_test_runner.rs | 6 +- src/tests/e2e_extended_mode/Cargo.toml | 3 + src/tests/e2e_extended_mode/src/test.rs | 2 +- src/tests/libpq_test/Cargo.toml | 3 + src/tests/regress/Cargo.toml | 3 + src/tests/simulation/Cargo.toml | 3 + .../tests/integration_tests/main.rs | 2 +- .../tests/integration_tests/scale/plan.rs | 2 +- src/tests/sqlsmith/Cargo.toml | 3 + src/tests/sqlsmith/src/reducer.rs | 8 ++- src/tests/sqlsmith/src/runner.rs | 8 +-- src/tests/sqlsmith/src/sql_gen/dml.rs | 2 +- src/tests/sqlsmith/tests/frontend/mod.rs | 2 +- src/tests/state_cleaning_test/Cargo.toml | 3 + src/udf/Cargo.toml | 3 + src/utils/local_stats_alloc/Cargo.toml | 3 + src/utils/pgwire/Cargo.toml | 3 + src/utils/pgwire/src/lib.rs | 1 - src/utils/pgwire/src/pg_extended.rs | 2 +- src/utils/pgwire/src/pg_message.rs | 4 +- src/utils/pgwire/src/pg_protocol.rs | 8 +-- src/utils/pgwire/src/pg_server.rs | 2 - src/utils/runtime/Cargo.toml | 3 + src/utils/sync-point/Cargo.toml | 3 + src/utils/task_stats_alloc/Cargo.toml | 3 + src/utils/variables/Cargo.toml | 3 + src/utils/workspace-config/Cargo.toml | 3 + 260 files changed, 745 insertions(+), 569 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 54b6e0e42d804..a6d413812c5f4 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -37,23 +37,8 @@ rustflags = [ # uncomment the following two lines to enable `TaskLocalAlloc` # "--cfg", # "enable_task_local_alloc", - # lints - # TODO: use lint configuration in cargo https://github.com/rust-lang/cargo/issues/5034 - "-Funused_must_use", - "-Aclippy::uninlined_format_args", - "-Wclippy::dbg_macro", - "-Wclippy::disallowed_methods", - "-Wclippy::disallowed_types", - "-Wclippy::doc_markdown", - "-Wclippy::explicit_into_iter_loop", - "-Wclippy::explicit_iter_loop", - "-Wclippy::inconsistent_struct_constructor", - "-Wclippy::unused_async", - "-Wclippy::map_flatten", - "-Wclippy::no_effect_underscore_binding", - "-Wclippy::await_holding_lock", - "-Wrustdoc::broken_intra_doc_links", - "-Wfuture_incompatible", - "-Wnonstandard_style", - "-Wrust_2018_idioms", + ] + +[unstable] +lints = true diff --git a/Cargo.lock b/Cargo.lock index cdfa79410491f..279c4c1c2d563 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -157,9 +157,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" dependencies = [ "backtrace", ] @@ -8917,18 +8917,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 0c56df58c5e24..83be53f1d7439 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,13 +61,33 @@ repository = "https://github.com/risingwavelabs/risingwave" [workspace.dependencies] await-tree = "0.1.1" -aws-config = { version = "0.55", default-features = false, features = ["rt-tokio", "native-tls"] } -aws-credential-types = { version = "0.55", default-features = false, features = ["hardcoded-credentials"] } -aws-sdk-kinesis = { version = "0.28", default-features = false, features = ["rt-tokio", "native-tls"] } -aws-sdk-s3 = { version = "0.28", default-features = false, features = ["rt-tokio","native-tls"] } -aws-sdk-ec2 = { version = "0.28", default-features = false, features = ["rt-tokio","native-tls"] } -aws-sdk-sqs = { version = "0.28", default-features = false, features = ["rt-tokio", "native-tls"] } -aws-smithy-client = { version = "0.55", default-features = false, features = ["rt-tokio", "native-tls"] } +aws-config = { version = "0.55", default-features = false, features = [ + "rt-tokio", + "native-tls", +] } +aws-credential-types = { version = "0.55", default-features = false, features = [ + "hardcoded-credentials", +] } +aws-sdk-kinesis = { version = "0.28", default-features = false, features = [ + "rt-tokio", + "native-tls", +] } +aws-sdk-s3 = { version = "0.28", default-features = false, features = [ + "rt-tokio", + "native-tls", +] } +aws-sdk-ec2 = { version = "0.28", default-features = false, features = [ + "rt-tokio", + "native-tls", +] } +aws-sdk-sqs = { version = "0.28", default-features = false, features = [ + "rt-tokio", + "native-tls", +] } +aws-smithy-client = { version = "0.55", default-features = false, features = [ + "rt-tokio", + "native-tls", +] } aws-smithy-http = "0.55" aws-smithy-types = "0.55" aws-endpoint = "0.55" @@ -75,8 +95,14 @@ aws-types = "0.55" etcd-client = { package = "madsim-etcd-client", version = "0.3" } futures-async-stream = "0.2" hytra = "0.1" -rdkafka = { package = "madsim-rdkafka", git = "https://github.com/madsim-rs/madsim.git", rev = "bb8f063", features = ["cmake-build"] } -hashbrown = { version = "0.14.0", features = ["ahash", "inline-more", "nightly"] } +rdkafka = { package = "madsim-rdkafka", git = "https://github.com/madsim-rs/madsim.git", rev = "bb8f063", features = [ + "cmake-build", +] } +hashbrown = { version = "0.14.0", features = [ + "ahash", + "inline-more", + "nightly", +] } criterion = { version = "0.5", features = ["async_futures"] } tonic = { package = "madsim-tonic", version = "0.3.1" } tonic-build = { package = "madsim-tonic-build", version = "0.3.1" } @@ -117,6 +143,32 @@ risingwave_variables = { path = "./src/utils/variables" } risingwave_java_binding = { path = "./src/java_binding" } risingwave_jni_core = { path = "src/jni_core" } +[workspace.lints.rust] +# `forbid` will also prevent the misuse of `#[allow(unused)]` +unused_must_use = "forbid" +future_incompatible = "warn" +nonstandard_style = "warn" +rust_2018_idioms = "warn" + +[workspace.lints.clippy] +uninlined_format_args = "allow" +dbg_macro = "warn" +disallowed_methods = "warn" +disallowed_types = "warn" +doc_markdown = "warn" +explicit_into_iter_loop = "warn" +explicit_iter_loop = "warn" +inconsistent_struct_constructor = "warn" +unused_async = "warn" +map_flatten = "warn" +no_effect_underscore_binding = "warn" +await_holding_lock = "warn" + +[workspace.lints.rustdoc] +private_intra_doc_links = "allow" +# Explicit lints don't hurt, and sometimes rust-analyzer works better with explicit links. +redundant_explicit_links = "allow" + [profile.dev] lto = 'off' diff --git a/ci/build-ci-image.sh b/ci/build-ci-image.sh index 91bfbd8cf4491..299b5d91878fe 100755 --- a/ci/build-ci-image.sh +++ b/ci/build-ci-image.sh @@ -13,7 +13,7 @@ cat ../rust-toolchain # !!! CHANGE THIS WHEN YOU WANT TO BUMP CI IMAGE !!! # # AND ALSO docker-compose.yml # ###################################################### -export BUILD_ENV_VERSION=v20230821 +export BUILD_ENV_VERSION=v20230909 export BUILD_TAG="public.ecr.aws/x5u3w5h6/rw-build-env:${BUILD_ENV_VERSION}" diff --git a/ci/docker-compose.yml b/ci/docker-compose.yml index 4e622553842ae..d0cdbe4af816a 100644 --- a/ci/docker-compose.yml +++ b/ci/docker-compose.yml @@ -71,7 +71,7 @@ services: retries: 5 source-test-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230821 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 depends_on: - mysql - db @@ -81,7 +81,7 @@ services: - ..:/risingwave sink-test-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230821 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 depends_on: - mysql - db @@ -91,12 +91,12 @@ services: - ..:/risingwave rw-build-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230821 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 volumes: - ..:/risingwave ci-flamegraph-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230821 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 # NOTE(kwannoel): This is used in order to permit # syscalls for `nperf` (perf_event_open), # so it can do CPU profiling. @@ -107,7 +107,7 @@ services: - ..:/risingwave regress-test-env: - image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230821 + image: public.ecr.aws/x5u3w5h6/rw-build-env:v20230909 depends_on: db: condition: service_healthy diff --git a/ci/rust-toolchain b/ci/rust-toolchain index 648290684fc13..ebc0b6c285a4e 100644 --- a/ci/rust-toolchain +++ b/ci/rust-toolchain @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2023-05-31" +channel = "nightly-2023-09-09" diff --git a/ci/scripts/check.sh b/ci/scripts/check.sh index 26b03343e4974..728788227e8f6 100755 --- a/ci/scripts/check.sh +++ b/ci/scripts/check.sh @@ -35,7 +35,7 @@ sccache --show-stats sccache --zero-stats echo "--- Build documentation" -RUSTDOCFLAGS="-Dwarnings -Arustdoc::private_intra_doc_links" cargo doc --document-private-items --no-deps +RUSTDOCFLAGS="-Dwarnings" cargo doc --document-private-items --no-deps echo "--- Show sccache stats" sccache --show-stats diff --git a/ci/scripts/gen-flamegraph.sh b/ci/scripts/gen-flamegraph.sh index d2556a8ac9b06..11abf2290d6f1 100755 --- a/ci/scripts/gen-flamegraph.sh +++ b/ci/scripts/gen-flamegraph.sh @@ -6,6 +6,8 @@ set -euo pipefail source ci/scripts/common.sh +RUST_TOOLCHAIN=$(cat rust-toolchain) + QUERY_DIR="/risingwave/ci/scripts/sql/nexmark" # TODO(kwannoel): This is a workaround since workdir is `/risingwave` in the docker container. @@ -79,7 +81,7 @@ install_all() { git clone https://github.com/gimli-rs/addr2line pushd addr2line git checkout 0.20.0 - echo "nightly-2023-04-07" > rust-toolchain + echo "$RUST_TOOLCHAIN" > rust-toolchain cargo b --examples -r mv ./target/release/examples/addr2line $(which addr2line) popd diff --git a/integration_tests/feature-store/server/Cargo.toml b/integration_tests/feature-store/server/Cargo.toml index c1fe08fb15475..123f089f5e7a3 100644 --- a/integration_tests/feature-store/server/Cargo.toml +++ b/integration_tests/feature-store/server/Cargo.toml @@ -26,3 +26,6 @@ tonic-build = "0.7.1" [[bin]] name = "server" path = "src/main.rs" + +[lints] +workspace = true diff --git a/integration_tests/feature-store/simulator/Cargo.toml b/integration_tests/feature-store/simulator/Cargo.toml index f0a82f9867ecb..1fd9609ba2d1a 100644 --- a/integration_tests/feature-store/simulator/Cargo.toml +++ b/integration_tests/feature-store/simulator/Cargo.toml @@ -20,3 +20,6 @@ prost = "0.10" serde = { version = "1", features = ["derive"] } futures = "0.3.0" csv = "1.2.2" + +[lints] +workspace = true diff --git a/rustfmt.toml b/rustfmt.toml index a5ad1d9440cd6..5757d76295ee8 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -9,4 +9,7 @@ group_imports = "StdExternalCrate" reorder_impl_items = true reorder_imports = true tab_spaces = 4 -wrap_comments = true +# TODO: Fix it later. It produces too many unwanted changes related with markdown lists now. e.g., +# https://github.com/rust-lang/rustfmt/issues/5862 +# https://github.com/rust-lang/rustfmt/issues/5836 +# wrap_comments = true diff --git a/scripts/source/prepare_ci_pubsub/Cargo.toml b/scripts/source/prepare_ci_pubsub/Cargo.toml index 54db02f017287..7f52421512411 100644 --- a/scripts/source/prepare_ci_pubsub/Cargo.toml +++ b/scripts/source/prepare_ci_pubsub/Cargo.toml @@ -23,3 +23,6 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [ "signal", "fs", ] } + +[lints] +workspace = true diff --git a/src/batch/Cargo.toml b/src/batch/Cargo.toml index 145a05666d15d..f3c5032d5619a 100644 --- a/src/batch/Cargo.toml +++ b/src/batch/Cargo.toml @@ -100,3 +100,6 @@ harness = false [[bench]] name = "limit" harness = false + +[lints] +workspace = true diff --git a/src/batch/benches/expand.rs b/src/batch/benches/expand.rs index 7c94cd6e176df..c300408bd8882 100644 --- a/src/batch/benches/expand.rs +++ b/src/batch/benches/expand.rs @@ -44,7 +44,7 @@ fn bench_expand(c: &mut Criterion) { let chunk_num = SIZE / chunk_size; b.to_async(&rt).iter_batched( || create_expand_executor(vec![vec![0, 1], vec![2]], chunk_size, chunk_num), - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ); }, diff --git a/src/batch/benches/filter.rs b/src/batch/benches/filter.rs index 95318dfcf20fd..28169ba6bcab5 100644 --- a/src/batch/benches/filter.rs +++ b/src/batch/benches/filter.rs @@ -46,7 +46,7 @@ fn bench_filter(c: &mut Criterion) { let chunk_num = TOTAL_SIZE / chunk_size; b.to_async(&rt).iter_batched( || create_filter_executor(chunk_size, chunk_num), - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ); }, diff --git a/src/batch/benches/hash_agg.rs b/src/batch/benches/hash_agg.rs index b22fb88c87c77..e5a561e03a535 100644 --- a/src/batch/benches/hash_agg.rs +++ b/src/batch/benches/hash_agg.rs @@ -142,7 +142,7 @@ fn bench_hash_agg(c: &mut Criterion) { chunk_num, ) }, - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ); }, diff --git a/src/batch/benches/limit.rs b/src/batch/benches/limit.rs index 45d1fc3d17b35..3096a8cbea6eb 100644 --- a/src/batch/benches/limit.rs +++ b/src/batch/benches/limit.rs @@ -51,7 +51,7 @@ fn bench_limit(c: &mut Criterion) { let chunk_num = SIZE / chunk_size; b.to_async(&rt).iter_batched( || create_limit_executor(chunk_size, chunk_num, 128, 128), - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ); }, diff --git a/src/batch/benches/sort.rs b/src/batch/benches/sort.rs index a1b31df1f08fb..1c089790f5f7a 100644 --- a/src/batch/benches/sort.rs +++ b/src/batch/benches/sort.rs @@ -80,7 +80,7 @@ fn bench_order_by(c: &mut Criterion) { let chunk_num = SIZE / chunk_size; b.to_async(&rt).iter_batched( || create_order_by_executor(chunk_size, chunk_num, single_column), - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ); }, diff --git a/src/batch/benches/top_n.rs b/src/batch/benches/top_n.rs index 9e713b1641fd9..a02a5b401860b 100644 --- a/src/batch/benches/top_n.rs +++ b/src/batch/benches/top_n.rs @@ -85,7 +85,7 @@ fn bench_top_n(c: &mut Criterion) { let chunk_num = SIZE / chunk_size; b.to_async(&rt).iter_batched( || create_top_n_executor(chunk_size, chunk_num, single_column, 128, 128), - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ); }, diff --git a/src/batch/benches/utils/mod.rs b/src/batch/benches/utils/mod.rs index 500fa752feb2a..cf057eb0f659f 100644 --- a/src/batch/benches/utils/mod.rs +++ b/src/batch/benches/utils/mod.rs @@ -53,7 +53,7 @@ pub fn bench_join( right_chunk_num, ) }, - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ); }, diff --git a/src/batch/src/executor/group_top_n.rs b/src/batch/src/executor/group_top_n.rs index 4fa4094e824ac..32f8a8b73c61e 100644 --- a/src/batch/src/executor/group_top_n.rs +++ b/src/batch/src/executor/group_top_n.rs @@ -217,7 +217,7 @@ impl GroupTopNExecutor { } let mut chunk_builder = DataChunkBuilder::new(self.schema.data_types(), self.chunk_size); - for (_, h) in groups.iter_mut() { + for (_, h) in &mut groups { let mut heap = TopNHeap::empty(); swap(&mut heap, h); for ele in heap.dump() { @@ -308,7 +308,7 @@ mod tests { let mut stream = top_n_executor.execute(); let res = stream.next().await; - assert!(matches!(res, Some(_))); + assert!(res.is_some()); if let Some(res) = res { let res = res.unwrap(); assert!( @@ -338,7 +338,7 @@ mod tests { } let res = stream.next().await; - assert!(matches!(res, None)); + assert!(res.is_none()); } assert_eq!(0, parent_mem.get_bytes_used()); diff --git a/src/batch/src/executor/merge_sort_exchange.rs b/src/batch/src/executor/merge_sort_exchange.rs index f80207d22706c..11574fb5b1b21 100644 --- a/src/batch/src/executor/merge_sort_exchange.rs +++ b/src/batch/src/executor/merge_sort_exchange.rs @@ -329,7 +329,7 @@ mod tests { let mut stream = executor.execute(); let res = stream.next().await; - assert!(matches!(res, Some(_))); + assert!(res.is_some()); if let Some(res) = res { let res = res.unwrap(); assert_eq!(res.capacity(), 3 * num_sources); diff --git a/src/batch/src/executor/order_by.rs b/src/batch/src/executor/order_by.rs index 1c842f1bea6f1..74a297c1f02d8 100644 --- a/src/batch/src/executor/order_by.rs +++ b/src/batch/src/executor/order_by.rs @@ -196,7 +196,7 @@ mod tests { let mut stream = order_by_executor.execute(); let res = stream.next().await; - assert!(matches!(res, Some(_))); + assert!(res.is_some()); if let Some(res) = res { let res = res.unwrap(); let col0 = res.column_at(0); @@ -246,7 +246,7 @@ mod tests { let mut stream = order_by_executor.execute(); let res = stream.next().await; - assert!(matches!(res, Some(_))); + assert!(res.is_some()); if let Some(res) = res { let res = res.unwrap(); let col0 = res.column_at(0); @@ -296,7 +296,7 @@ mod tests { let mut stream = order_by_executor.execute(); let res = stream.next().await; - assert!(matches!(res, Some(_))); + assert!(res.is_some()); if let Some(res) = res { let res = res.unwrap(); let col0 = res.column_at(0); diff --git a/src/batch/src/executor/sort_over_window.rs b/src/batch/src/executor/sort_over_window.rs index 533b5e6a6782f..f12ebc2452384 100644 --- a/src/batch/src/executor/sort_over_window.rs +++ b/src/batch/src/executor/sort_over_window.rs @@ -177,7 +177,7 @@ impl SortOverWindowExecutor { ) { let mut states = WindowStates::new(this.calls.iter().map(create_window_state).try_collect()?); - for row in rows.iter() { + for row in &*rows { for (call, state) in this.calls.iter().zip_eq_fast(states.iter_mut()) { // TODO(rc): batch appending state.append( diff --git a/src/batch/src/executor/source.rs b/src/batch/src/executor/source.rs index f3ecf28f108d5..78733420c9158 100644 --- a/src/batch/src/executor/source.rs +++ b/src/batch/src/executor/source.rs @@ -66,7 +66,7 @@ impl BoxedExecutorBuilder for SourceExecutor { // prepare connector source let source_props: HashMap = - HashMap::from_iter(source_node.properties.clone().into_iter()); + HashMap::from_iter(source_node.properties.clone()); let config = ConnectorProperties::extract(source_props) .map_err(|e| RwError::from(ConnectorError(e.into())))?; diff --git a/src/batch/src/executor/top_n.rs b/src/batch/src/executor/top_n.rs index 211c96abbe573..cffbae855de61 100644 --- a/src/batch/src/executor/top_n.rs +++ b/src/batch/src/executor/top_n.rs @@ -225,7 +225,7 @@ impl Eq for HeapElem {} impl PartialOrd for HeapElem { fn partial_cmp(&self, other: &Self) -> Option { - self.encoded_row.partial_cmp(&other.encoded_row) + Some(self.cmp(other)) } } @@ -350,7 +350,7 @@ mod tests { let mut stream = top_n_executor.execute(); let res = stream.next().await; - assert!(matches!(res, Some(_))); + assert!(res.is_some()); if let Some(res) = res { let res = res.unwrap(); assert_eq!(res.cardinality(), 3); @@ -361,7 +361,7 @@ mod tests { } let res = stream.next().await; - assert!(matches!(res, None)); + assert!(res.is_none()); } #[tokio::test] @@ -408,6 +408,6 @@ mod tests { let mut stream = top_n_executor.execute(); let res = stream.next().await; - assert!(matches!(res, None)); + assert!(res.is_none()); } } diff --git a/src/batch/src/monitor/stats.rs b/src/batch/src/monitor/stats.rs index d2de36d8a29a0..c9e9dddfa861d 100644 --- a/src/batch/src/monitor/stats.rs +++ b/src/batch/src/monitor/stats.rs @@ -63,7 +63,7 @@ pub static GLOBAL_BATCH_TASK_METRICS: LazyLock = impl BatchTaskMetrics { /// The created [`BatchTaskMetrics`] is already registered to the `registry`. fn new(registry: &Registry) -> Self { - let task_labels = vec!["query_id", "stage_id", "task_id"]; + let task_labels = ["query_id", "stage_id", "task_id"]; let mut descs = Vec::with_capacity(8); let task_first_poll_delay = GaugeVec::new(opts!( diff --git a/src/batch/src/task/task_manager.rs b/src/batch/src/task/task_manager.rs index f84cb842eba9e..858e9bc432b96 100644 --- a/src/batch/src/task/task_manager.rs +++ b/src/batch/src/task/task_manager.rs @@ -289,7 +289,7 @@ impl BatchManager { let mut max_mem_task_id = None; let mut max_mem = usize::MIN; let guard = self.tasks.lock(); - for (t_id, t) in guard.iter() { + for (t_id, t) in &*guard { // If the task has been stopped, we should not count this. if t.is_end() { continue; diff --git a/src/bench/Cargo.toml b/src/bench/Cargo.toml index 3800bb12d045d..80d8e484dfa8f 100644 --- a/src/bench/Cargo.toml +++ b/src/bench/Cargo.toml @@ -55,3 +55,6 @@ path = "s3_bench/main.rs" [features] bpf = ["bcc", "risingwave_storage/bpf"] trace = ["opentelemetry", "risingwave_rt", "tracing/release_max_level_trace"] + +[lints] +workspace = true diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 59d7a8c13b34b..45b715ebcb206 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -67,3 +67,6 @@ path = "src/bin/compactor.rs" [[bin]] name = "risectl" path = "src/bin/ctl.rs" + +[lints] +workspace = true diff --git a/src/cmd_all/Cargo.toml b/src/cmd_all/Cargo.toml index f07d262923e82..3c3b207637b10 100644 --- a/src/cmd_all/Cargo.toml +++ b/src/cmd_all/Cargo.toml @@ -64,3 +64,6 @@ tikv-jemallocator = { git = "https://github.com/yuhao-su/jemallocator.git", feat [[bin]] name = "risingwave" path = "src/bin/risingwave.rs" + +[lints] +workspace = true diff --git a/src/common/Cargo.toml b/src/common/Cargo.toml index 99a0887d567ae..83f85b8a3be6e 100644 --- a/src/common/Cargo.toml +++ b/src/common/Cargo.toml @@ -151,3 +151,6 @@ harness = false [[bin]] name = "example-config" path = "src/bin/default_config.rs" + +[lints] +workspace = true diff --git a/src/common/common_service/Cargo.toml b/src/common/common_service/Cargo.toml index c7e7f717eaaa2..1eaa14c46b8e9 100644 --- a/src/common/common_service/Cargo.toml +++ b/src/common/common_service/Cargo.toml @@ -30,3 +30,6 @@ tracing = "0.1" [target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../../workspace-hack" } + +[lints] +workspace = true diff --git a/src/common/common_service/src/tracing.rs b/src/common/common_service/src/tracing.rs index 1b6a27e6648a8..54637a5945a69 100644 --- a/src/common/common_service/src/tracing.rs +++ b/src/common/common_service/src/tracing.rs @@ -71,18 +71,17 @@ where let mut inner = std::mem::replace(&mut self.inner, clone); async move { - let span = if let Some(tracing_context) = - TracingContext::from_http_headers(req.headers()) - { - let span = tracing::info_span!( - "grpc_serve", - "otel.name" = req.uri().path(), - uri = %req.uri() - ); - tracing_context.attach(span) - } else { - tracing::Span::none() // if there's no parent span, disable tracing for this request - }; + let span = + if let Some(tracing_context) = TracingContext::from_http_headers(req.headers()) { + let span = tracing::info_span!( + "grpc_serve", + "otel.name" = req.uri().path(), + uri = %req.uri() + ); + tracing_context.attach(span) + } else { + tracing::Span::none() // if there's no parent span, disable tracing for this request + }; inner.call(req).instrument(span).await } diff --git a/src/common/proc_macro/Cargo.toml b/src/common/proc_macro/Cargo.toml index 99aeb5f33403d..b129cedc0e183 100644 --- a/src/common/proc_macro/Cargo.toml +++ b/src/common/proc_macro/Cargo.toml @@ -24,4 +24,6 @@ syn = "1" bae = "0.1.7" [target.'cfg(not(madsim))'.dependencies] -workspace-hack = { path = "../../workspace-hack" } \ No newline at end of file +workspace-hack = { path = "../../workspace-hack" } +[lints] +workspace = true diff --git a/src/common/proc_macro/src/estimate_size.rs b/src/common/proc_macro/src/estimate_size.rs index 741a0f598ca54..9ff6df956d590 100644 --- a/src/common/proc_macro/src/estimate_size.rs +++ b/src/common/proc_macro/src/estimate_size.rs @@ -41,7 +41,7 @@ fn has_nested_flag_attribute( if let Some(ident) = meta.path().get_ident() { if *ident == name { if let syn::Meta::List(list) = meta { - for nested in list.nested.iter() { + for nested in &list.nested { if let syn::NestedMeta::Meta(syn::Meta::Path(path)) = nested { let path = path .get_ident() @@ -65,7 +65,7 @@ pub fn has_nested_flag_attribute_list( name: &'static str, flag: &'static str, ) -> bool { - for attr in list.iter() { + for attr in list { if has_nested_flag_attribute(attr, name, flag) { return true; } @@ -77,7 +77,7 @@ pub fn has_nested_flag_attribute_list( pub fn extract_ignored_generics_list(list: &[syn::Attribute]) -> Vec { let mut collection = Vec::new(); - for attr in list.iter() { + for attr in list { let mut list = extract_ignored_generics(attr); collection.append(&mut list); @@ -95,7 +95,7 @@ pub fn extract_ignored_generics(attr: &syn::Attribute) -> Vec { return collection; } if let syn::Meta::List(list) = meta { - for nested in list.nested.iter() { + for nested in &list.nested { if let syn::NestedMeta::Meta(nmeta) = nested { let ident = nmeta .path() @@ -109,7 +109,7 @@ pub fn extract_ignored_generics(attr: &syn::Attribute) -> Vec { } if let syn::Meta::List(list) = nmeta { - for nested in list.nested.iter() { + for nested in &list.nested { if let syn::NestedMeta::Meta(syn::Meta::Path(path)) = nested { let path = path .get_ident() @@ -134,7 +134,7 @@ pub fn add_trait_bounds(mut generics: syn::Generics, ignored: &[String]) -> syn: if let syn::GenericParam::Type(type_param) = param { let name = type_param.ident.to_string(); let mut found = false; - for ignored in ignored.iter() { + for ignored in ignored { if ignored == &name { found = true; break; diff --git a/src/common/proc_macro/src/lib.rs b/src/common/proc_macro/src/lib.rs index 6894c57b6ebeb..060ee1950624e 100644 --- a/src/common/proc_macro/src/lib.rs +++ b/src/common/proc_macro/src/lib.rs @@ -100,7 +100,7 @@ pub fn derive_estimate_size(input: TokenStream) -> TokenStream { let mut cmds = Vec::with_capacity(data_enum.variants.len()); - for variant in data_enum.variants.iter() { + for variant in &data_enum.variants { let ident = &variant.ident; match &variant.fields { @@ -143,7 +143,7 @@ pub fn derive_estimate_size(input: TokenStream) -> TokenStream { let mut field_cmds = Vec::with_capacity(num_fields); - for field in named_fields.named.iter() { + for field in &named_fields.named { let field_ident = field.ident.as_ref().unwrap(); field_idents.push(field_ident); @@ -212,7 +212,7 @@ pub fn derive_estimate_size(input: TokenStream) -> TokenStream { } } syn::Fields::Named(named_fields) => { - for field in named_fields.named.iter() { + for field in &named_fields.named { // Check if the value should be ignored. If so skip it. if has_nested_flag_attribute_list(&field.attrs, "estimate_size", "ignore") { continue; diff --git a/src/common/src/array/bool_array.rs b/src/common/src/array/bool_array.rs index 347ec73d28ad2..9835ee8a5865c 100644 --- a/src/common/src/array/bool_array.rs +++ b/src/common/src/array/bool_array.rs @@ -287,7 +287,7 @@ mod tests { None => NULL_VAL_FOR_HASH.hash(state), }) }); - let hashes = hash_finish(&mut states[..]); + let hashes = hash_finish(&states[..]); let count = hashes.iter().counts().len(); assert_eq!(count, 6); diff --git a/src/common/src/array/data_chunk.rs b/src/common/src/array/data_chunk.rs index 53c8b8b9c15da..f335b56a60edb 100644 --- a/src/common/src/array/data_chunk.rs +++ b/src/common/src/array/data_chunk.rs @@ -67,7 +67,7 @@ pub struct DataChunk { } impl DataChunk { - pub(crate) const PRETTY_TABLE_PRESET: &str = "||--+-++| ++++++"; + pub(crate) const PRETTY_TABLE_PRESET: &'static str = "||--+-++| ++++++"; /// Create a `DataChunk` with `columns` and visibility. The visibility can either be a `Bitmap` /// or a simple cardinality number. @@ -228,7 +228,7 @@ impl DataChunk { columns: Default::default(), }; let column_ref = &mut proto.columns; - for array in self.columns.iter() { + for array in &*self.columns { column_ref.push(array.to_protobuf()); } proto @@ -371,7 +371,7 @@ impl DataChunk { let array = self.column_at(*column_idx); array.hash_vec(&mut states[..]); } - finalize_hashers(&mut states[..]) + finalize_hashers(&states[..]) .into_iter() .map(|hash_code| hash_code.into()) .collect_vec() @@ -502,7 +502,7 @@ impl DataChunk { fn partition_sizes(&self) -> (usize, Vec<&ArrayRef>) { let mut col_variable: Vec<&ArrayRef> = vec![]; let mut row_len_fixed: usize = 0; - for c in self.columns.iter() { + for c in &*self.columns { if let Some(field_len) = try_get_exact_serialize_datum_size(c) { row_len_fixed += field_len; } else { @@ -558,8 +558,7 @@ impl DataChunk { } // Then do the actual serialization - for c in self.columns.iter() { - let c = c; + for c in &*self.columns { assert_eq!(c.len(), rows_num); for (i, buffer) in buffers.iter_mut().enumerate() { // SAFETY(value_at_unchecked): the idx is always in bound. @@ -580,8 +579,7 @@ impl DataChunk { buffers.push(Self::init_buffer(row_len_fixed, &col_variable, i)); } } - for c in self.columns.iter() { - let c = c; + for c in &*self.columns { assert_eq!(c.len(), *rows_num); for (i, buffer) in buffers.iter_mut().enumerate() { // SAFETY(value_at_unchecked): the idx is always in bound. @@ -839,7 +837,7 @@ impl DataChunkTestExt for DataChunk { let cols = self.columns(); let vis = &self.vis2; let n = vis.len(); - for col in cols.iter() { + for col in cols { assert_eq!(col.len(), n); } } diff --git a/src/common/src/array/decimal_array.rs b/src/common/src/array/decimal_array.rs index 046d21a311b1e..03fb6a672dc1e 100644 --- a/src/common/src/array/decimal_array.rs +++ b/src/common/src/array/decimal_array.rs @@ -128,7 +128,7 @@ mod tests { None => NULL_VAL_FOR_HASH.hash(state), }) }); - let hashes = hash_finish(&mut states[..]); + let hashes = hash_finish(&states[..]); let count = hashes.iter().counts().len(); assert_eq!(count, 30); diff --git a/src/common/src/array/list_array.rs b/src/common/src/array/list_array.rs index ef9b59ea646bc..2c4a8cf042548 100644 --- a/src/common/src/array/list_array.rs +++ b/src/common/src/array/list_array.rs @@ -30,8 +30,7 @@ use crate::buffer::{Bitmap, BitmapBuilder}; use crate::estimate_size::EstimateSize; use crate::row::Row; use crate::types::{ - hash_datum, DataType, Datum, DatumRef, DefaultPartialOrd, Scalar, ScalarRefImpl, ToDatumRef, - ToText, + hash_datum, DataType, Datum, DatumRef, DefaultOrd, Scalar, ScalarRefImpl, ToDatumRef, ToText, }; use crate::util::memcmp_encoding; use crate::util::value_encoding::estimate_serialize_datum_size; @@ -350,13 +349,13 @@ pub struct ListValue { impl PartialOrd for ListValue { fn partial_cmp(&self, other: &Self) -> Option { - self.as_scalar_ref().partial_cmp(&other.as_scalar_ref()) + Some(self.cmp(other)) } } impl Ord for ListValue { fn cmp(&self, other: &Self) -> Ordering { - self.partial_cmp(other).unwrap() + self.as_scalar_ref().cmp(&other.as_scalar_ref()) } } @@ -532,11 +531,19 @@ impl PartialEq for ListRef<'_> { } } +impl Eq for ListRef<'_> {} + impl PartialOrd for ListRef<'_> { fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ListRef<'_> { + fn cmp(&self, other: &Self) -> Ordering { iter_elems_ref!(*self, lhs, { iter_elems_ref!(*other, rhs, { - lhs.partial_cmp_by(rhs, |lv, rv| lv.default_partial_cmp(&rv)) + lhs.cmp_by(rhs, |lv, rv| lv.default_cmp(&rv)) }) }) } @@ -599,15 +606,6 @@ impl ToText for ListRef<'_> { } } -impl Eq for ListRef<'_> {} - -impl Ord for ListRef<'_> { - fn cmp(&self, other: &Self) -> Ordering { - // The order between two lists is deterministic. - self.partial_cmp(other).unwrap() - } -} - #[cfg(test)] mod tests { use more_asserts::{assert_gt, assert_lt}; diff --git a/src/common/src/array/mod.rs b/src/common/src/array/mod.rs index 5dff9214e4298..ba9b0bff0f4f5 100644 --- a/src/common/src/array/mod.rs +++ b/src/common/src/array/mod.rs @@ -709,7 +709,7 @@ mod test_util { use super::Array; use crate::util::iter_util::ZipEqFast; - pub fn hash_finish(hashers: &mut [H]) -> Vec { + pub fn hash_finish(hashers: &[H]) -> Vec { return hashers .iter() .map(|hasher| hasher.finish()) @@ -733,8 +733,8 @@ mod test_util { itertools::cons_tuples( expects .iter() - .zip_eq_fast(hash_finish(&mut states_scalar[..])) - .zip_eq_fast(hash_finish(&mut states_vec[..])), + .zip_eq_fast(hash_finish(&states_scalar[..])) + .zip_eq_fast(hash_finish(&states_vec[..])), ) .all(|(a, b, c)| *a == b && b == c); } diff --git a/src/common/src/array/stream_chunk.rs b/src/common/src/array/stream_chunk.rs index f4e58036b2273..e280af1257c23 100644 --- a/src/common/src/array/stream_chunk.rs +++ b/src/common/src/array/stream_chunk.rs @@ -374,7 +374,7 @@ impl From for Arc<[Op]> { /// A mutable wrapper for `StreamChunk`. can only set the visibilities and ops in place, can not /// change the length. -struct StreamChunkMut { +pub struct StreamChunkMut { columns: Arc<[ArrayRef]>, ops: OpsMut, vis: VisMut, diff --git a/src/common/src/array/struct_array.rs b/src/common/src/array/struct_array.rs index dfb2ad721cfd0..9dfb23fe4e921 100644 --- a/src/common/src/array/struct_array.rs +++ b/src/common/src/array/struct_array.rs @@ -28,8 +28,7 @@ use crate::array::ArrayRef; use crate::buffer::{Bitmap, BitmapBuilder}; use crate::estimate_size::EstimateSize; use crate::types::{ - hash_datum, DataType, Datum, DatumRef, DefaultPartialOrd, Scalar, StructType, ToDatumRef, - ToText, + hash_datum, DataType, Datum, DatumRef, DefaultOrd, Scalar, StructType, ToDatumRef, ToText, }; use crate::util::iter_util::ZipEqFast; use crate::util::memcmp_encoding; @@ -280,13 +279,13 @@ pub struct StructValue { impl PartialOrd for StructValue { fn partial_cmp(&self, other: &Self) -> Option { - self.as_scalar_ref().partial_cmp(&other.as_scalar_ref()) + Some(self.cmp(other)) } } impl Ord for StructValue { fn cmp(&self, other: &Self) -> Ordering { - self.partial_cmp(other).unwrap() + self.as_scalar_ref().cmp(&other.as_scalar_ref()) } } @@ -374,14 +373,20 @@ impl PartialEq for StructRef<'_> { } } +impl Eq for StructRef<'_> {} + impl PartialOrd for StructRef<'_> { fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for StructRef<'_> { + fn cmp(&self, other: &Self) -> Ordering { iter_fields_ref!(*self, lhs, { iter_fields_ref!(*other, rhs, { - if lhs.len() != rhs.len() { - return None; - } - lhs.partial_cmp_by(rhs, |lv, rv| lv.default_partial_cmp(&rv)) + assert_eq!(lhs.len(), rhs.len()); + lhs.cmp_by(rhs, |lv, rv| lv.default_cmp(&rv)) }) }) } @@ -429,15 +434,6 @@ impl ToText for StructRef<'_> { } } -impl Eq for StructRef<'_> {} - -impl Ord for StructRef<'_> { - fn cmp(&self, other: &Self) -> Ordering { - // The order between two structs is deterministic. - self.partial_cmp(other).unwrap() - } -} - #[cfg(test)] mod tests { use more_asserts::assert_gt; diff --git a/src/common/src/array/utf8_array.rs b/src/common/src/array/utf8_array.rs index 7ce186ad17529..577e5e4f4005d 100644 --- a/src/common/src/array/utf8_array.rs +++ b/src/common/src/array/utf8_array.rs @@ -372,7 +372,7 @@ mod tests { None => NULL_VAL_FOR_HASH.hash(state), }) }); - let hashes = hash_finish(&mut states[..]); + let hashes = hash_finish(&states[..]); let count = hashes.iter().counts().len(); assert_eq!(count, 30); diff --git a/src/common/src/error.rs b/src/common/src/error.rs index 121101de368ed..b0c577ea42947 100644 --- a/src/common/src/error.rs +++ b/src/common/src/error.rs @@ -234,9 +234,7 @@ impl Debug for RwError { "{}\n{}", self.inner, // Use inner error's backtrace by default, otherwise use the generated one in `From`. - (&self.inner as &dyn std::error::Error) - .request_ref::() - .unwrap_or(&*self.backtrace) + std::error::request_ref::(&self.inner).unwrap_or(&*self.backtrace) ) } } diff --git a/src/common/src/hash/key.rs b/src/common/src/hash/key.rs index 4347e1dec0b9e..b4bee5aa83d0b 100644 --- a/src/common/src/hash/key.rs +++ b/src/common/src/hash/key.rs @@ -29,7 +29,6 @@ use std::marker::PhantomData; use bytes::{Buf, BufMut}; use chrono::{Datelike, Timelike}; -use educe::Educe; use fixedbitset::FixedBitSet; use smallbitset::Set64; use static_assertions::const_assert_eq; @@ -208,14 +207,26 @@ impl + IntoIterator> From for HeapNullBitmap { } /// A wrapper for u64 hash result. Generic over the hasher. -#[derive(Educe)] -#[educe(Default, Clone, Copy, Debug, PartialEq)] +#[derive(Default, Clone, Copy)] pub struct HashCode { value: u64, - #[educe(Debug(ignore))] _phantom: PhantomData<&'static T>, } +impl Debug for HashCode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("HashCode") + .field("value", &self.value) + .finish() + } +} + +impl PartialEq for HashCode { + fn eq(&self, other: &Self) -> bool { + self.value == other.value + } +} + impl From for HashCode { fn from(hash_code: u64) -> Self { Self { diff --git a/src/common/src/lib.rs b/src/common/src/lib.rs index 554815d43e753..ed1c65a4b56c1 100644 --- a/src/common/src/lib.rs +++ b/src/common/src/lib.rs @@ -12,8 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![allow(rustdoc::private_intra_doc_links)] -#![feature(drain_filter)] +#![expect( + refining_impl_trait, + reason = "Some of the Row::iter() implementations returns ExactSizeIterator. Is this reasonable?" +)] +#![feature(extract_if)] #![feature(trait_alias)] #![feature(binary_heap_drain_sorted)] #![feature(is_sorted)] @@ -26,7 +29,6 @@ #![feature(map_try_insert)] #![feature(lazy_cell)] #![feature(error_generic_member_access)] -#![feature(provide_any)] #![feature(let_chains)] #![feature(return_position_impl_trait_in_trait)] #![feature(portable_simd)] diff --git a/src/common/src/types/interval.rs b/src/common/src/types/interval.rs index 67f61f5805d46..aca4d090bcac2 100644 --- a/src/common/src/types/interval.rs +++ b/src/common/src/types/interval.rs @@ -606,7 +606,6 @@ pub mod test_utils { #[must_use] fn from_ymd(year: i32, month: i32, days: i32) -> Self { let months = year * 12 + month; - let days = days; let usecs = 0; Interval { months, @@ -1263,7 +1262,7 @@ fn parse_interval(s: &str) -> Result> { convert_digit(&mut num_buf, &mut tokens)?; } convert_unit(&mut char_buf, &mut tokens)?; - convert_hms(&mut hour_min_sec, &mut tokens).ok_or_else(|| { + convert_hms(&hour_min_sec, &mut tokens).ok_or_else(|| { ErrorCode::InvalidInputSyntax(format!("Invalid interval: {:?}", hour_min_sec)) })?; @@ -1301,7 +1300,7 @@ fn convert_unit(c: &mut String, t: &mut Vec) -> Result<()> { /// [`TimeStrToken::Num(1)`, `TimeStrToken::TimeUnit(DateTimeField::Hour)`, /// `TimeStrToken::Num(2)`, `TimeStrToken::TimeUnit(DateTimeField::Minute)`, /// `TimeStrToken::Second("3")`, `TimeStrToken::TimeUnit(DateTimeField::Second)`] -fn convert_hms(c: &mut Vec, t: &mut Vec) -> Option<()> { +fn convert_hms(c: &Vec, t: &mut Vec) -> Option<()> { if c.len() > 3 { return None; } diff --git a/src/common/src/types/ordered.rs b/src/common/src/types/ordered.rs index 9dfc882c0c09b..75b07e529d7b9 100644 --- a/src/common/src/types/ordered.rs +++ b/src/common/src/types/ordered.rs @@ -138,6 +138,7 @@ impl From for DefaultOrdered { } } +#[allow(clippy::incorrect_partial_ord_impl_on_ord_type)] impl PartialOrd for DefaultOrdered { fn partial_cmp(&self, other: &Self) -> Option { self.0.default_partial_cmp(other.as_inner()) diff --git a/src/common/src/types/to_binary.rs b/src/common/src/types/to_binary.rs index 2b868545ef14d..540167cc4f02c 100644 --- a/src/common/src/types/to_binary.rs +++ b/src/common/src/types/to_binary.rs @@ -31,6 +31,7 @@ macro_rules! implement_using_to_sql { match ty { DataType::$data_type => { let mut output = BytesMut::new(); + #[allow(clippy::redundant_closure_call)] $accessor(self).to_sql(&Type::ANY, &mut output).unwrap(); Ok(Some(output.freeze())) }, diff --git a/src/common/src/util/hash_util.rs b/src/common/src/util/hash_util.rs index efe95bf96548c..76a597c11d2b3 100644 --- a/src/common/src/util/hash_util.rs +++ b/src/common/src/util/hash_util.rs @@ -14,7 +14,7 @@ use std::hash::{BuildHasher, Hasher}; -pub fn finalize_hashers(hashers: &mut [H]) -> Vec { +pub fn finalize_hashers(hashers: &[H]) -> Vec { return hashers .iter() .map(|hasher| hasher.finish()) diff --git a/src/common/src/util/memcmp_encoding.rs b/src/common/src/util/memcmp_encoding.rs index d132aaf0518b9..8593071e18c71 100644 --- a/src/common/src/util/memcmp_encoding.rs +++ b/src/common/src/util/memcmp_encoding.rs @@ -618,10 +618,7 @@ mod tests { OrderType::descending(), ) .unwrap(); - let concated_encoded_row1 = encoded_v10 - .into_iter() - .chain(encoded_v11.into_iter()) - .collect(); + let concated_encoded_row1 = encoded_v10.into_iter().chain(encoded_v11).collect(); assert_eq!(encoded_row1, concated_encoded_row1); let encoded_row2 = encode_row(row2.project(&order_col_indices), &order_types).unwrap(); diff --git a/src/common/src/util/sort_util.rs b/src/common/src/util/sort_util.rs index 38bca3d1a3f55..3129cbcac5172 100644 --- a/src/common/src/util/sort_util.rs +++ b/src/common/src/util/sort_util.rs @@ -446,7 +446,7 @@ pub fn compare_rows_in_chunk( rhs_idx: usize, column_orders: &[ColumnOrder], ) -> Result { - for column_order in column_orders.iter() { + for column_order in column_orders { let lhs_array = lhs_data_chunk.column_at(column_order.column_index); let rhs_array = rhs_data_chunk.column_at(column_order.column_index); @@ -504,7 +504,7 @@ pub fn partial_cmp_datum_iter( order_types: impl IntoIterator, ) -> Option { let mut order_types_iter = order_types.into_iter(); - lhs.into_iter().partial_cmp_by(rhs.into_iter(), |x, y| { + lhs.into_iter().partial_cmp_by(rhs, |x, y| { let Some(order_type) = order_types_iter.next() else { return None; }; @@ -524,7 +524,7 @@ pub fn cmp_datum_iter( order_types: impl IntoIterator, ) -> Ordering { let mut order_types_iter = order_types.into_iter(); - lhs.into_iter().cmp_by(rhs.into_iter(), |x, y| { + lhs.into_iter().cmp_by(rhs, |x, y| { let order_type = order_types_iter .next() .expect("number of `OrderType`s is not enough"); @@ -546,12 +546,9 @@ pub fn partial_cmp_rows( lhs.iter() .zip_eq_debug(rhs.iter()) .zip_eq_debug(order_types) - .fold(Some(Ordering::Equal), |acc, ((l, r), order_type)| { - if acc == Some(Ordering::Equal) { - partial_cmp_datum(l, r, *order_type) - } else { - acc - } + .try_fold(Ordering::Equal, |acc, ((l, r), order_type)| match acc { + Ordering::Equal => partial_cmp_datum(l, r, *order_type), + acc => Some(acc), }) } @@ -562,8 +559,14 @@ pub fn partial_cmp_rows( /// Panics if the length of `lhs`, `rhs` and `order_types` are not equal, /// or, if the schemas of `lhs` and `rhs` are not matched. pub fn cmp_rows(lhs: impl Row, rhs: impl Row, order_types: &[OrderType]) -> Ordering { - partial_cmp_rows(&lhs, &rhs, order_types) - .unwrap_or_else(|| panic!("cannot compare {lhs:?} with {rhs:?}")) + assert_eq!(lhs.len(), rhs.len()); + lhs.iter() + .zip_eq_debug(rhs.iter()) + .zip_eq_debug(order_types) + .fold(Ordering::Equal, |acc, ((l, r), order_type)| match acc { + Ordering::Equal => cmp_datum(l, r, *order_type), + acc => acc, + }) } #[cfg(test)] diff --git a/src/common/src/vnode_mapping/vnode_placement.rs b/src/common/src/vnode_mapping/vnode_placement.rs index db342e0fdec3a..65142178bf0a6 100644 --- a/src/common/src/vnode_mapping/vnode_placement.rs +++ b/src/common/src/vnode_mapping/vnode_placement.rs @@ -49,14 +49,16 @@ pub fn place_vnode( // evenly among workers. let mut selected_pu_ids = Vec::new(); while !new_pus.is_empty() { - new_pus.drain_filter(|ps| { - if let Some(p) = ps.next() { - selected_pu_ids.push(p.id); - false - } else { - true - } - }); + new_pus + .extract_if(|ps| { + if let Some(p) = ps.next() { + selected_pu_ids.push(p.id); + false + } else { + true + } + }) + .for_each(drop); } selected_pu_ids.drain(serving_parallelism..); let selected_pu_id_set: HashSet = selected_pu_ids.iter().cloned().collect(); @@ -197,6 +199,7 @@ mod tests { is_serving: true, is_streaming: false, }; + let mut gen_pus_for_worker = |worker_node_id: u32, number: u32, pu_to_worker: &mut HashMap| { let mut results = vec![]; @@ -212,6 +215,7 @@ mod tests { } results }; + let count_same_vnode_mapping = |pm1: &ParallelUnitMapping, pm2: &ParallelUnitMapping| { assert_eq!(pm1.len(), 256); assert_eq!(pm2.len(), 256); @@ -224,6 +228,7 @@ mod tests { } count }; + let worker_1 = WorkerNode { id: 1, parallel_units: gen_pus_for_worker(1, 1, &mut pu_to_worker), @@ -234,6 +239,7 @@ mod tests { place_vnode(None, &[worker_1.clone()], 0).is_none(), "max_parallelism should >= 0" ); + let re_pu_mapping_2 = place_vnode(None, &[worker_1.clone()], 10000).unwrap(); assert_eq!(re_pu_mapping_2.iter_unique().count(), 1); let worker_2 = WorkerNode { @@ -248,6 +254,7 @@ mod tests { 10000, ) .unwrap(); + assert_eq!(re_pu_mapping.iter_unique().count(), 51); // 1 * 256 + 0 -> 51 * 5 + 1 let score = count_same_vnode_mapping(&re_pu_mapping_2, &re_pu_mapping); @@ -265,6 +272,7 @@ mod tests { 10000, ) .unwrap(); + // limited by total pu number assert_eq!(re_pu_mapping_2.iter_unique().count(), 111); // 51 * 5 + 1 -> 111 * 2 + 34 diff --git a/src/compute/Cargo.toml b/src/compute/Cargo.toml index 70aaf895e7b73..8276f93db8ae7 100644 --- a/src/compute/Cargo.toml +++ b/src/compute/Cargo.toml @@ -63,3 +63,6 @@ workspace-hack = { path = "../workspace-hack" } futures-async-stream = { workspace = true } rand = "0.8" tempfile = "3" + +[lints] +workspace = true diff --git a/src/compute/src/memory_management/policy.rs b/src/compute/src/memory_management/policy.rs index 9078054ac2a38..085d7cfcf98a5 100644 --- a/src/compute/src/memory_management/policy.rs +++ b/src/compute/src/memory_management/policy.rs @@ -125,7 +125,7 @@ impl JemallocMemoryControl { { tracing::warn!("Auto Jemalloc dump heap file failed! {:?}", e); } - unsafe { Box::from_raw(file_path_ptr) }; + let _ = unsafe { Box::from_raw(file_path_ptr) }; } } } diff --git a/src/compute/src/rpc/service/exchange_service.rs b/src/compute/src/rpc/service/exchange_service.rs index 40b5785bbefad..c50322cc2c94f 100644 --- a/src/compute/src/rpc/service/exchange_service.rs +++ b/src/compute/src/rpc/service/exchange_service.rs @@ -42,8 +42,8 @@ pub struct ExchangeServiceImpl { metrics: Arc, } -type BatchDataStream = ReceiverStream>; -type StreamDataStream = impl Stream>; +pub type BatchDataStream = ReceiverStream>; +pub type StreamDataStream = impl Stream>; #[async_trait::async_trait] impl ExchangeService for ExchangeServiceImpl { diff --git a/src/compute/src/rpc/service/monitor_service.rs b/src/compute/src/rpc/service/monitor_service.rs index b5f28545b5b44..640eb03be7415 100644 --- a/src/compute/src/rpc/service/monitor_service.rs +++ b/src/compute/src/rpc/service/monitor_service.rs @@ -144,7 +144,7 @@ impl MonitorService for MonitorServiceImpl { } else { Ok(Response::new(HeapProfilingResponse {})) }; - unsafe { Box::from_raw(file_path_ptr) }; + let _ = unsafe { Box::from_raw(file_path_ptr) }; response } diff --git a/src/compute/tests/integration_tests.rs b/src/compute/tests/integration_tests.rs index 31cf4256823af..a43ae2e5762da 100644 --- a/src/compute/tests/integration_tests.rs +++ b/src/compute/tests/integration_tests.rs @@ -430,7 +430,7 @@ async fn test_row_seq_scan() -> Result<()> { Field::unnamed(DataType::Int32), Field::unnamed(DataType::Int64), ]); - let _column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let _column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(ColumnId::from(0), schema[0].data_type.clone()), diff --git a/src/connector/Cargo.toml b/src/connector/Cargo.toml index 9c6f0b5c8fc84..34b416fe4d8e6 100644 --- a/src/connector/Cargo.toml +++ b/src/connector/Cargo.toml @@ -124,3 +124,6 @@ prost-build = "0.11" [[bench]] name = "parser" harness = false + +[lints] +workspace = true diff --git a/src/connector/build.rs b/src/connector/build.rs index 6439fcf00f932..3ace772d46039 100644 --- a/src/connector/build.rs +++ b/src/connector/build.rs @@ -17,7 +17,7 @@ fn main() { println!("cargo:rerun-if-changed={}", proto_dir); - let proto_files = vec!["recursive"]; + let proto_files = ["recursive"]; let protos: Vec = proto_files .iter() .map(|f| format!("{}/{}.proto", proto_dir, f)) diff --git a/src/connector/src/parser/util.rs b/src/connector/src/parser/util.rs index 86b83be4cd892..7444fe202de46 100644 --- a/src/connector/src/parser/util.rs +++ b/src/connector/src/parser/util.rs @@ -100,8 +100,8 @@ pub(super) fn at_least_one_ok(mut results: Vec>) -> Result { - if $payload.is_some() { - $self.parse_inner($payload.unwrap(), $writer).await + if let Some(payload) = $payload { + $self.parse_inner(payload, $writer).await } else { Err(RwError::from(ErrorCode::InternalError( "Empty payload with nonempty key".into(), diff --git a/src/connector/src/sink/clickhouse.rs b/src/connector/src/sink/clickhouse.rs index db6a714c8922f..187b87397dbf4 100644 --- a/src/connector/src/sink/clickhouse.rs +++ b/src/connector/src/sink/clickhouse.rs @@ -606,7 +606,7 @@ impl Serialize for ClickHouseField { ClickHouseField::Bool(v) => serializer.serialize_bool(*v), ClickHouseField::List(v) => { let mut s = serializer.serialize_seq(Some(v.len()))?; - for i in v.iter() { + for i in v { s.serialize_element(i)?; } s.end() diff --git a/src/ctl/Cargo.toml b/src/ctl/Cargo.toml index cb713c92d2e6b..802262ff02fb3 100644 --- a/src/ctl/Cargo.toml +++ b/src/ctl/Cargo.toml @@ -52,3 +52,6 @@ uuid = { version = "1", features = ["v4"] } [target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../workspace-hack" } + +[lints] +workspace = true diff --git a/src/ctl/src/cmd_impl/hummock/compaction_group.rs b/src/ctl/src/cmd_impl/hummock/compaction_group.rs index f59d097d71cd8..75a9884aece75 100644 --- a/src/ctl/src/cmd_impl/hummock/compaction_group.rs +++ b/src/ctl/src/cmd_impl/hummock/compaction_group.rs @@ -170,7 +170,7 @@ pub async fn list_compaction_status(context: &CtlContext, verbose: bool) -> anyh for a in assignment { assignment_lite .entry(a.context_id) - .or_insert(vec![]) + .or_default() .push(a.compact_task.unwrap().task_id); } for (k, v) in assignment_lite { diff --git a/src/ctl/src/cmd_impl/hummock/list_version.rs b/src/ctl/src/cmd_impl/hummock/list_version.rs index 3cc082671273d..6935dcf604142 100644 --- a/src/ctl/src/cmd_impl/hummock/list_version.rs +++ b/src/ctl/src/cmd_impl/hummock/list_version.rs @@ -70,7 +70,7 @@ pub async fn list_version( } } - for level in levels.get_levels().iter() { + for level in levels.get_levels() { println!( "level_idx {} type {} sst_num {} size {}", level.level_idx, diff --git a/src/ctl/src/cmd_impl/meta/serving.rs b/src/ctl/src/cmd_impl/meta/serving.rs index 5039e06f2d341..867317c0915b4 100644 --- a/src/ctl/src/cmd_impl/meta/serving.rs +++ b/src/ctl/src/cmd_impl/meta/serving.rs @@ -50,7 +50,7 @@ pub async fn list_serving_fragment_mappings(context: &CtlContext) -> anyhow::Res .flat_map(|(fragment_id, (table_id, mapping))| { let mut pu_vnodes: HashMap> = HashMap::new(); for (vnode, pu) in mapping.iter_with_vnode() { - pu_vnodes.entry(pu).or_insert(vec![]).push(vnode); + pu_vnodes.entry(pu).or_default().push(vnode); } pu_vnodes.into_iter().map(|(pu_id, vnodes)| { ( diff --git a/src/ctl/src/cmd_impl/scale/resize.rs b/src/ctl/src/cmd_impl/scale/resize.rs index 46063c9075024..786d0fa4c83b7 100644 --- a/src/ctl/src/cmd_impl/scale/resize.rs +++ b/src/ctl/src/cmd_impl/scale/resize.rs @@ -206,7 +206,7 @@ pub async fn resize(ctl_ctx: &CtlContext, scale_ctx: ScaleCommandContext) -> any (_, Some(_)) if include_worker_ids.is_empty() => { fail!("Cannot specify target parallelism per worker without including any worker") } - (Some(target), _) if target == 0 => fail!("Target parallelism must be greater than 0"), + (Some(0), _) => fail!("Target parallelism must be greater than 0"), _ => {} } diff --git a/src/ctl/src/lib.rs b/src/ctl/src/lib.rs index a92629c29bead..adcb6cf3b3472 100644 --- a/src/ctl/src/lib.rs +++ b/src/ctl/src/lib.rs @@ -13,7 +13,7 @@ // limitations under the License. #![feature(let_chains)] -#![feature(hash_drain_filter)] +#![feature(hash_extract_if)] use anyhow::Result; use clap::{Parser, Subcommand}; diff --git a/src/expr/Cargo.toml b/src/expr/Cargo.toml index 7b3fccd17bffa..0d575232a10a4 100644 --- a/src/expr/Cargo.toml +++ b/src/expr/Cargo.toml @@ -65,3 +65,6 @@ serde_json = "1" [[bench]] name = "expr" harness = false + +[lints] +workspace = true diff --git a/src/expr/macro/Cargo.toml b/src/expr/macro/Cargo.toml index f34f82ccebcc6..c73d9c723dd69 100644 --- a/src/expr/macro/Cargo.toml +++ b/src/expr/macro/Cargo.toml @@ -12,3 +12,6 @@ itertools = "0.11" proc-macro2 = "1" quote = "1" syn = "2" + +[lints] +workspace = true diff --git a/src/expr/src/agg/approx_count_distinct/mod.rs b/src/expr/src/agg/approx_count_distinct/mod.rs index 4002de68ba61a..98eb203015a82 100644 --- a/src/expr/src/agg/approx_count_distinct/mod.rs +++ b/src/expr/src/agg/approx_count_distinct/mod.rs @@ -173,7 +173,7 @@ impl Registers { let mut mean = 0.0; // Get harmonic mean of all the counts in results - for bucket in self.registers.iter() { + for bucket in &*self.registers { let count = bucket.max(); mean += 1.0 / ((1 << count) as f64); } @@ -184,7 +184,7 @@ impl Registers { // m * log(m/V) where V is the number of registers with value 0 let answer = if raw_estimate <= 2.5 * m { let mut zero_registers: f64 = 0.0; - for i in self.registers.iter() { + for i in &*self.registers { if i.max() == 0 { zero_registers += 1.0; } diff --git a/src/expr/src/expr/expr_concat_ws.rs b/src/expr/src/expr/expr_concat_ws.rs index 78f4d7da7db01..5bca7d0aea75c 100644 --- a/src/expr/src/expr/expr_concat_ws.rs +++ b/src/expr/src/expr/expr_concat_ws.rs @@ -235,7 +235,7 @@ mod tests { vec![None, None, None, None], ]; - let expected = vec![Some("a,b,c"), None, Some("b,c"), Some(""), None]; + let expected = [Some("a,b,c"), None, Some("b,c"), Some(""), None]; for (i, row_input) in row_inputs.iter().enumerate() { let datum_vec: Vec = row_input.iter().map(|e| e.map(|s| s.into())).collect(); diff --git a/src/expr/src/expr/expr_in.rs b/src/expr/src/expr/expr_in.rs index 28aaf36bc29a5..cbe356bc1bbd6 100644 --- a/src/expr/src/expr/expr_in.rs +++ b/src/expr/src/expr/expr_in.rs @@ -172,7 +172,7 @@ mod tests { }, ]; let mut in_children = vec![input_ref_expr_node]; - in_children.extend(constant_values.into_iter()); + in_children.extend(constant_values); let call = FunctionCall { children: in_children, }; diff --git a/src/expr/src/expr/expr_unary.rs b/src/expr/src/expr/expr_unary.rs index 08b318dc18a28..a286af8177e82 100644 --- a/src/expr/src/expr/expr_unary.rs +++ b/src/expr/src/expr/expr_unary.rs @@ -64,8 +64,8 @@ mod tests { #[tokio::test] async fn test_neg() { - let input = vec![Some(1), Some(0), Some(-1)]; - let target = vec![Some(-1), Some(0), Some(1)]; + let input = [Some(1), Some(0), Some(-1)]; + let target = [Some(-1), Some(0), Some(1)]; let col1 = I32Array::from_iter(&input).into_ref(); let data_chunk = DataChunk::new(vec![col1], 3); diff --git a/src/expr/src/lib.rs b/src/expr/src/lib.rs index 7f955249909b2..ee4cea38e4bb5 100644 --- a/src/expr/src/lib.rs +++ b/src/expr/src/lib.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![allow(rustdoc::private_intra_doc_links)] #![allow(non_snake_case)] // for `ctor` generated code #![feature(let_chains)] #![feature(assert_matches)] diff --git a/src/expr/src/vector_op/encdec.rs b/src/expr/src/vector_op/encdec.rs index b0f6bd9acbdd7..7ea9d27e4778a 100644 --- a/src/expr/src/vector_op/encdec.rs +++ b/src/expr/src/vector_op/encdec.rs @@ -273,11 +273,7 @@ mod tests { let cases = [ (r#"ABCDE"#.as_bytes(), "base64", r#"QUJDREU="#.as_bytes()), (r#"\""#.as_bytes(), "escape", r#"\\""#.as_bytes()), - ( - b"\x00\x40\x41\x42\xff", - "escape", - r#"\000@AB\377"#.as_bytes(), - ), + (b"\x00\x40\x41\x42\xff", "escape", r"\000@AB\377".as_bytes()), ( "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeefffffff".as_bytes(), "base64", diff --git a/src/expr/src/vector_op/like.rs b/src/expr/src/vector_op/like.rs index c62547d420026..13acc3aab53a7 100644 --- a/src/expr/src/vector_op/like.rs +++ b/src/expr/src/vector_op/like.rs @@ -101,11 +101,11 @@ mod tests { ), (r#"ABC_123"#, r#"ABC_123"#, false, true), (r#"ABCD123"#, r#"ABC_123"#, false, true), - (r#"ABC_123"#, r#"ABC\_123"#, false, true), - (r#"ABCD123"#, r#"ABC\_123"#, false, false), - (r#"ABC\123"#, r#"ABC_123"#, false, true), - (r#"ABC\123"#, r#"ABC\\123"#, false, true), - (r#"ABC\123"#, r#"ABC\123"#, false, false), + (r#"ABC_123"#, r"ABC\_123", false, true), + (r#"ABCD123"#, r"ABC\_123", false, false), + (r"ABC\123", r#"ABC_123"#, false, true), + (r"ABC\123", r"ABC\\123", false, true), + (r"ABC\123", r"ABC\123", false, false), ("apple", r#"App%"#, true, true), ("banana", r#"B%nana"#, true, true), ("apple", r#"B%nana"#, true, false), diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml index a728f5999b37c..a80aa743d9c4b 100644 --- a/src/frontend/Cargo.toml +++ b/src/frontend/Cargo.toml @@ -88,3 +88,6 @@ workspace-hack = { path = "../workspace-hack" } [dev-dependencies] assert_matches = "1" tempfile = "3" + +[lints] +workspace = true diff --git a/src/frontend/planner_test/Cargo.toml b/src/frontend/planner_test/Cargo.toml index d282be1c8fbc4..6dfc1e9cc9d48 100644 --- a/src/frontend/planner_test/Cargo.toml +++ b/src/frontend/planner_test/Cargo.toml @@ -47,3 +47,6 @@ test = false [[test]] name = "planner_test_runner" harness = false + +[lints] +workspace = true diff --git a/src/frontend/src/binder/bind_context.rs b/src/frontend/src/binder/bind_context.rs index a49022ee4f38c..ca34eab3a3dbd 100644 --- a/src/frontend/src/binder/bind_context.rs +++ b/src/frontend/src/binder/bind_context.rs @@ -299,7 +299,7 @@ impl BindContext { c })); for (k, v) in other.indices_of { - let entry = self.indices_of.entry(k).or_insert_with(Vec::new); + let entry = self.indices_of.entry(k).or_default(); entry.extend(v.into_iter().map(|x| x + begin)); } for (k, (x, y)) in other.range_of { diff --git a/src/frontend/src/binder/expr/function.rs b/src/frontend/src/binder/expr/function.rs index 4cb703906bf8c..51b18b0837584 100644 --- a/src/frontend/src/binder/expr/function.rs +++ b/src/frontend/src/binder/expr/function.rs @@ -185,10 +185,16 @@ impl Binder { } }; - let ast::FunctionArgExpr::Expr(ast::Expr::LambdaFunction { args: lambda_args, body: lambda_body }) = lambda.get_expr() else { + let ast::FunctionArgExpr::Expr(ast::Expr::LambdaFunction { + args: lambda_args, + body: lambda_body, + }) = lambda.get_expr() + else { return Err(ErrorCode::BindError( - "The `lambda` argument for `array_transform` should be a lambda function".to_string() - ).into()); + "The `lambda` argument for `array_transform` should be a lambda function" + .to_string(), + ) + .into()); }; let [lambda_arg] = <[Ident; 1]>::try_from(lambda_args).map_err(|args| -> RwError { diff --git a/src/frontend/src/binder/expr/value.rs b/src/frontend/src/binder/expr/value.rs index 0d599fc6cdd03..b958ae8338a49 100644 --- a/src/frontend/src/binder/expr/value.rs +++ b/src/frontend/src/binder/expr/value.rs @@ -214,7 +214,7 @@ mod tests { use super::*; let mut binder = mock_binder(); - let values = vec![ + let values = [ "1", "111111111111111", "111111111.111111", @@ -258,7 +258,7 @@ mod tests { use super::*; let mut binder = mock_binder(); - let values = vec![ + let values = [ ("1e6"), ("1.25e6"), ("1.25e1"), @@ -336,7 +336,7 @@ mod tests { use super::*; let mut binder = mock_binder(); - let values = vec![ + let values = [ "1 hour", "1 h", "1 year", diff --git a/src/frontend/src/expr/utils.rs b/src/frontend/src/expr/utils.rs index 37237c30f8da4..d07287b08dbe2 100644 --- a/src/frontend/src/expr/utils.rs +++ b/src/frontend/src/expr/utils.rs @@ -307,7 +307,7 @@ pub fn factorization_expr(expr: ExprImpl) -> Vec { let (last, remaining) = disjunctions.split_last_mut().unwrap(); // now greatest_common_factor == [C, D] let greatest_common_divider: Vec<_> = last - .drain_filter(|factor| remaining.iter().all(|expr| expr.contains(factor))) + .extract_if(|factor| remaining.iter().all(|expr| expr.contains(factor))) .collect(); for disjunction in remaining { // remove common factors diff --git a/src/frontend/src/handler/alter_table_column.rs b/src/frontend/src/handler/alter_table_column.rs index 47c214ab6a738..be314befdfae3 100644 --- a/src/frontend/src/handler/alter_table_column.rs +++ b/src/frontend/src/handler/alter_table_column.rs @@ -131,7 +131,7 @@ pub async fn handle_alter_table_column( // Locate the column by name and remove it. let column_name = column_name.real_value(); let removed_column = columns - .drain_filter(|c| c.name.real_value() == column_name) + .extract_if(|c| c.name.real_value() == column_name) .at_most_one() .ok() .unwrap(); diff --git a/src/frontend/src/handler/drop_index.rs b/src/frontend/src/handler/drop_index.rs index ee51b10fc12af..20987fd26950d 100644 --- a/src/frontend/src/handler/drop_index.rs +++ b/src/frontend/src/handler/drop_index.rs @@ -49,7 +49,7 @@ pub async fn handle_drop_index( } Err(err) => { match err { - CatalogError::NotFound(kind, _) if kind == "index" => { + CatalogError::NotFound("index", _) => { // index not found, try to find table below to give a better error message } _ => return Err(err.into()), @@ -69,7 +69,7 @@ pub async fn handle_drop_index( .into()) } else { match e { - CatalogError::NotFound(kind, name) if kind == "table" => { + CatalogError::NotFound("table", name) => { Err(CatalogError::NotFound("index", name).into()) } _ => Err(e.into()), diff --git a/src/frontend/src/handler/drop_mv.rs b/src/frontend/src/handler/drop_mv.rs index a8be43e4940e7..50b462c612e2b 100644 --- a/src/frontend/src/handler/drop_mv.rs +++ b/src/frontend/src/handler/drop_mv.rs @@ -52,7 +52,7 @@ pub async fn handle_drop_mv( .into()) } else { match e { - CatalogError::NotFound(kind, name) if kind == "table" => { + CatalogError::NotFound("table", name) => { Err(CatalogError::NotFound("materialized view", name).into()) } _ => Err(e.into()), diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs index 35b91c9b551bf..2d1aaea26b3c0 100644 --- a/src/frontend/src/lib.rs +++ b/src/frontend/src/lib.rs @@ -13,13 +13,12 @@ // limitations under the License. #![allow(clippy::derive_partial_eq_without_eq)] -#![allow(rustdoc::private_intra_doc_links)] #![feature(map_try_insert)] #![feature(negative_impls)] #![feature(generators)] #![feature(proc_macro_hygiene, stmt_expr_attributes)] #![feature(trait_alias)] -#![feature(drain_filter)] +#![feature(extract_if)] #![feature(if_let_guard)] #![feature(let_chains)] #![feature(assert_matches)] diff --git a/src/frontend/src/optimizer/mod.rs b/src/frontend/src/optimizer/mod.rs index 64c2d3c4ee9ea..8daa0e7a45b31 100644 --- a/src/frontend/src/optimizer/mod.rs +++ b/src/frontend/src/optimizer/mod.rs @@ -438,7 +438,8 @@ impl PlanRoot { append_only, columns .iter() - .filter_map(|c| (!c.is_generated()).then(|| c.column_desc.clone())) + .filter(|&c| (!c.is_generated())) + .map(|c| c.column_desc.clone()) .collect(), ) .into(); diff --git a/src/frontend/src/optimizer/plan_node/generic/join.rs b/src/frontend/src/optimizer/plan_node/generic/join.rs index e8bf2a0ddf3c4..47c6b66286d98 100644 --- a/src/frontend/src/optimizer/plan_node/generic/join.rs +++ b/src/frontend/src/optimizer/plan_node/generic/join.rs @@ -202,11 +202,7 @@ impl GenericPlanNode for Join { get_new_left_fd_set(left_fd_set) .into_dependencies() .into_iter() - .chain( - get_new_right_fd_set(right_fd_set) - .into_dependencies() - .into_iter(), - ) + .chain(get_new_right_fd_set(right_fd_set).into_dependencies()) .for_each(|fd| fd_set.add_functional_dependency(fd)); fd_set } @@ -407,7 +403,7 @@ pub fn push_down_into_join( // Do not push now on to the on, it will be pulled up into a filter instead. let on = Condition { conjunctions: conjunctions - .drain_filter(|expr| expr.count_nows() == 0) + .extract_if(|expr| expr.count_nows() == 0) .collect(), }; predicate.conjunctions = conjunctions; diff --git a/src/frontend/src/optimizer/plan_node/logical_join.rs b/src/frontend/src/optimizer/plan_node/logical_join.rs index 0971df069ffea..640b31170c546 100644 --- a/src/frontend/src/optimizer/plan_node/logical_join.rs +++ b/src/frontend/src/optimizer/plan_node/logical_join.rs @@ -226,7 +226,7 @@ impl LogicalJoin { Condition { conjunctions: others .conjunctions - .drain_filter(|expr| expr.count_nows() == 0) + .extract_if(|expr| expr.count_nows() == 0) .collect(), } } else { @@ -655,8 +655,8 @@ impl ExprRewritable for LogicalJoin { /// then we proceed. Else abort. /// 2. Then, we collect `InputRef`s in the conjunction. /// 3. If they are all columns in the given side of join eq condition, then we proceed. Else abort. -/// 4. We then rewrite the `ExprImpl`, by replacing `InputRef` column indices with -/// the equivalent in the other side. +/// 4. We then rewrite the `ExprImpl`, by replacing `InputRef` column indices with the equivalent in +/// the other side. /// /// # Arguments /// diff --git a/src/frontend/src/optimizer/plan_node/logical_multi_join.rs b/src/frontend/src/optimizer/plan_node/logical_multi_join.rs index 99a0a0f64fbda..b3d61cd495fb9 100644 --- a/src/frontend/src/optimizer/plan_node/logical_multi_join.rs +++ b/src/frontend/src/optimizer/plan_node/logical_multi_join.rs @@ -317,7 +317,7 @@ impl LogicalMultiJoin { impl PlanTreeNode for LogicalMultiJoin { fn inputs(&self) -> smallvec::SmallVec<[crate::optimizer::PlanRef; 2]> { let mut vec = smallvec::SmallVec::new(); - vec.extend(self.inputs.clone().into_iter()); + vec.extend(self.inputs.clone()); vec } diff --git a/src/frontend/src/optimizer/plan_node/logical_over_window.rs b/src/frontend/src/optimizer/plan_node/logical_over_window.rs index 9fbc9c6b39f1b..b2057f28e05fc 100644 --- a/src/frontend/src/optimizer/plan_node/logical_over_window.rs +++ b/src/frontend/src/optimizer/plan_node/logical_over_window.rs @@ -653,7 +653,7 @@ impl ColPrunable for LogicalOverWindow { let (req_cols_input_part, req_cols_win_func_part) = { let mut in_input = required_cols.to_vec(); - let in_win_funcs: IndexSet = in_input.drain_filter(|i| *i >= input_len).collect(); + let in_win_funcs: IndexSet = in_input.extract_if(|i| *i >= input_len).collect(); (IndexSet::from(in_input), in_win_funcs) }; diff --git a/src/frontend/src/optimizer/plan_node/logical_scan.rs b/src/frontend/src/optimizer/plan_node/logical_scan.rs index ea0a725e17d86..d7574abed7b29 100644 --- a/src/frontend/src/optimizer/plan_node/logical_scan.rs +++ b/src/frontend/src/optimizer/plan_node/logical_scan.rs @@ -401,7 +401,7 @@ impl PredicatePushdown for LogicalScan { } let non_pushable_predicate: Vec<_> = predicate .conjunctions - .drain_filter(|expr| expr.count_nows() > 0 || HasCorrelated {}.visit_expr(expr)) + .extract_if(|expr| expr.count_nows() > 0 || HasCorrelated {}.visit_expr(expr)) .collect(); let predicate = predicate.rewrite_expr(&mut ColIndexMapping::with_target_size( self.output_col_idx().iter().map(|i| Some(*i)).collect(), diff --git a/src/frontend/src/optimizer/plan_node/mod.rs b/src/frontend/src/optimizer/plan_node/mod.rs index 66fcdfe0f9f39..926cf85048f3e 100644 --- a/src/frontend/src/optimizer/plan_node/mod.rs +++ b/src/frontend/src/optimizer/plan_node/mod.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![allow(rustdoc::private_intra_doc_links)] //! Defines all kinds of node in the plan tree, each node represent a relational expression. //! //! We use a immutable style tree structure, every Node are immutable and cannot be modified after @@ -331,7 +330,7 @@ impl PlanRef { .map(|mut c| Condition { conjunctions: c .conjunctions - .drain_filter(|e| e.count_nows() == 0 && e.is_pure()) + .extract_if(|e| e.count_nows() == 0 && e.is_pure()) .collect(), }) .reduce(|a, b| a.or(b)) diff --git a/src/frontend/src/optimizer/plan_node/stream_table_scan.rs b/src/frontend/src/optimizer/plan_node/stream_table_scan.rs index 8083560655417..51f61e0f663b8 100644 --- a/src/frontend/src/optimizer/plan_node/stream_table_scan.rs +++ b/src/frontend/src/optimizer/plan_node/stream_table_scan.rs @@ -143,7 +143,7 @@ impl StreamTableScan { catalog_builder.add_order_column(0, OrderType::ascending()); // pk columns - for col_order in self.logical.primary_key().iter() { + for col_order in self.logical.primary_key() { let col = &upstream_schema[col_order.column_index]; catalog_builder.add_column(&Field::from(col)); } diff --git a/src/frontend/src/optimizer/plan_node/utils.rs b/src/frontend/src/optimizer/plan_node/utils.rs index 1f97b28d89e49..475c5c0e32eb1 100644 --- a/src/frontend/src/optimizer/plan_node/utils.rs +++ b/src/frontend/src/optimizer/plan_node/utils.rs @@ -172,7 +172,7 @@ impl TableCatalogBuilder { read_prefix_len_hint, version: None, // the internal table is not versioned and can't be schema changed watermark_columns, - dist_key_in_pk: self.dist_key_in_pk.unwrap_or(vec![]), + dist_key_in_pk: self.dist_key_in_pk.unwrap_or_default(), cardinality: Cardinality::unknown(), // TODO(card): cardinality of internal table created_at_epoch: None, initialized_at_epoch: None, diff --git a/src/frontend/src/optimizer/property/func_dep.rs b/src/frontend/src/optimizer/property/func_dep.rs index bb6cbb146faf9..738aa36ba57fe 100644 --- a/src/frontend/src/optimizer/property/func_dep.rs +++ b/src/frontend/src/optimizer/property/func_dep.rs @@ -393,8 +393,8 @@ mod tests { fd.add_functional_dependency_by_column_indices(&[1, 2], &[0]); // (1, 2) --> (0) fd.add_functional_dependency_by_column_indices(&[0, 1], &[3]); // (0, 1) --> (3) fd.add_functional_dependency_by_column_indices(&[3], &[4]); // (3) --> (4) - let from = FixedBitSet::from_iter([1usize, 2usize].into_iter()); - let to = FixedBitSet::from_iter([4usize].into_iter()); + let from = FixedBitSet::from_iter([1usize, 2usize]); + let to = FixedBitSet::from_iter([4usize]); assert!(fd.is_determined_by(&from, &to)); // (1, 2) --> (4) holds } } diff --git a/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs index 51f2aa0c9d62f..089a66f0ad08b 100644 --- a/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs +++ b/src/frontend/src/optimizer/rule/apply_join_transpose_rule.rs @@ -435,7 +435,7 @@ impl ApplyJoinTransposeRule { .clone() .into_iter() .map(|expr| rewriter.rewrite_expr(expr)) - .chain(natural_conjunctions.into_iter()) + .chain(natural_conjunctions) .collect_vec(), }; diff --git a/src/frontend/src/optimizer/rule/apply_project_set_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_project_set_transpose_rule.rs index a63755cdbe7e2..ba79e34e57269 100644 --- a/src/frontend/src/optimizer/rule/apply_project_set_transpose_rule.rs +++ b/src/frontend/src/optimizer/rule/apply_project_set_transpose_rule.rs @@ -75,7 +75,7 @@ impl Rule for ApplyProjectSetTransposeRule { .map(|expr| rewriter.rewrite_expr(expr)) .collect_vec(); - exprs.extend(new_proj_exprs.clone().into_iter()); + exprs.extend(new_proj_exprs.clone()); let mut rewriter = ApplyOnCondRewriterForProjectSet::new(left.schema().len(), new_proj_exprs); diff --git a/src/frontend/src/optimizer/rule/apply_project_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_project_transpose_rule.rs index 46f06cc5ac703..599e69f656206 100644 --- a/src/frontend/src/optimizer/rule/apply_project_transpose_rule.rs +++ b/src/frontend/src/optimizer/rule/apply_project_transpose_rule.rs @@ -71,7 +71,7 @@ impl Rule for ApplyProjectTransposeRule { .map(|expr| rewriter.rewrite_expr(expr)) .collect_vec(); - exprs.extend(new_proj_exprs.clone().into_iter()); + exprs.extend(new_proj_exprs.clone()); let mut rewriter = ApplyOnConditionRewriter { left_input_len: left.schema().len(), diff --git a/src/frontend/src/optimizer/rule/apply_topn_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_topn_transpose_rule.rs index 6efddefa832e7..fad13dd2be760 100644 --- a/src/frontend/src/optimizer/rule/apply_topn_transpose_rule.rs +++ b/src/frontend/src/optimizer/rule/apply_topn_transpose_rule.rs @@ -74,9 +74,7 @@ impl Rule for ApplyTopNTransposeRule { .column_orders .iter_mut() .for_each(|ord| ord.column_index += apply_left_len); - let new_group_key = (0..apply_left_len) - .chain(group_key.into_iter()) - .collect_vec(); + let new_group_key = (0..apply_left_len).chain(group_key).collect_vec(); LogicalTopN::new(new_apply, limit, offset, with_ties, order, new_group_key) }; diff --git a/src/frontend/src/optimizer/rule/distinct_agg_rule.rs b/src/frontend/src/optimizer/rule/distinct_agg_rule.rs index 2f60f85a431fd..b7f9a5f902109 100644 --- a/src/frontend/src/optimizer/rule/distinct_agg_rule.rs +++ b/src/frontend/src/optimizer/rule/distinct_agg_rule.rs @@ -171,7 +171,7 @@ impl DistinctAggRule { // shift the indices of filter first to make later rewrite more convenient. let mut shift_with_offset = ColIndexMapping::with_shift_offset(input_schema_len, input_schema_len as isize); - for agg_call in agg_calls.iter_mut() { + for agg_call in &mut *agg_calls { agg_call.filter = mem::replace(&mut agg_call.filter, Condition::true_cond()) .rewrite_expr(&mut shift_with_offset); } @@ -180,7 +180,7 @@ impl DistinctAggRule { let expand_schema_len = expand.schema().len(); let mut input_indices = CollectInputRef::with_capacity(expand_schema_len); input_indices.extend(group_keys.indices()); - for agg_call in agg_calls.iter() { + for agg_call in &*agg_calls { input_indices.extend(agg_call.input_indices()); agg_call.filter.visit_expr(&mut input_indices); } diff --git a/src/frontend/src/optimizer/rule/index_selection_rule.rs b/src/frontend/src/optimizer/rule/index_selection_rule.rs index 7c8bdfe604321..c16cd7e31bf28 100644 --- a/src/frontend/src/optimizer/rule/index_selection_rule.rs +++ b/src/frontend/src/optimizer/rule/index_selection_rule.rs @@ -255,7 +255,7 @@ impl IndexSelectionRule { .clone(), ) }) - .chain(new_predicate.into_iter()) + .chain(new_predicate) .collect_vec(); let on = Condition { conjunctions }; let join: PlanRef = LogicalJoin::new( @@ -351,7 +351,7 @@ impl IndexSelectionRule { primary_table_desc.columns[y.column_index].data_type.clone(), ) }) - .chain(new_predicate.into_iter()) + .chain(new_predicate) .collect_vec(); let on = Condition { conjunctions }; diff --git a/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs b/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs index 0a42395af7266..394d569050c27 100644 --- a/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs +++ b/src/frontend/src/optimizer/rule/rewrite_like_expr_rule.rs @@ -238,9 +238,9 @@ mod tests { ("test_name", (Some(4), None, "test_name")), ("test_name_2", (Some(4), None, "test_name_2")), ("test%name", (None, Some(4), "test%name")), - (r#"test\_name"#, (None, None, "test_name")), - (r#"test\_name_2"#, (Some(9), None, "test_name_2")), - (r#"test\\_name_2"#, (Some(5), None, r#"test\_name_2"#)), + (r"test\_name", (None, None, "test_name")), + (r"test\_name_2", (Some(9), None, "test_name_2")), + (r"test\\_name_2", (Some(5), None, r"test\_name_2")), ]; for (pattern, (c, s, ub)) in testcases { diff --git a/src/frontend/src/scheduler/distributed/stage.rs b/src/frontend/src/scheduler/distributed/stage.rs index 55a650f804000..fde1bc7244368 100644 --- a/src/frontend/src/scheduler/distributed/stage.rs +++ b/src/frontend/src/scheduler/distributed/stage.rs @@ -781,7 +781,7 @@ impl StageRunner { // *state = StageState::Failed // } - for (task, task_status) in self.tasks.iter() { + for (task, task_status) in &*self.tasks { // 1. Collect task info and client. let loc = &task_status.get_status().location; let addr = loc.as_ref().expect("Get address should not fail"); diff --git a/src/frontend/src/scheduler/local.rs b/src/frontend/src/scheduler/local.rs index 114e21e5af069..f3906ffbcc755 100644 --- a/src/frontend/src/scheduler/local.rs +++ b/src/frontend/src/scheduler/local.rs @@ -374,8 +374,8 @@ impl LocalQueryExecution { } Ok(PlanNodePb { - /// Since all the rest plan is embedded into the exchange node, - /// there is no children any more. + // Since all the rest plan is embedded into the exchange node, + // there is no children any more. children: vec![], identity, node_body: Some(node_body), diff --git a/src/frontend/src/stream_fragmenter/mod.rs b/src/frontend/src/stream_fragmenter/mod.rs index 1e033e7aa7c8d..d049a7d656a7e 100644 --- a/src/frontend/src/stream_fragmenter/mod.rs +++ b/src/frontend/src/stream_fragmenter/mod.rs @@ -128,7 +128,7 @@ pub fn build_graph(plan_node: PlanRef) -> StreamFragmentGraphProto { fragment_graph } -#[expect(dead_code)] +#[cfg(any())] fn is_stateful_executor(stream_node: &StreamNode) -> bool { matches!( stream_node.get_node_body().unwrap(), @@ -144,7 +144,7 @@ fn is_stateful_executor(stream_node: &StreamNode) -> bool { /// Currently, it will split the fragment with multiple stateful operators (those have high I/O /// throughput) into multiple fragments, which may help improve the I/O concurrency. /// Known as "no-shuffle exchange" or "1v1 exchange". -#[expect(dead_code)] +#[cfg(any())] fn rewrite_stream_node( state: &mut BuildFragmentGraphState, stream_node: StreamNode, @@ -211,7 +211,7 @@ fn generate_fragment_graph( } /// Use the given `stream_node` to create a fragment and add it to graph. -pub(self) fn build_and_add_fragment( +fn build_and_add_fragment( state: &mut BuildFragmentGraphState, stream_node: StreamNode, ) -> Result> { diff --git a/src/frontend/src/stream_fragmenter/rewrite/delta_join.rs b/src/frontend/src/stream_fragmenter/rewrite/delta_join.rs index 4574805b09563..b09dc847fc3fd 100644 --- a/src/frontend/src/stream_fragmenter/rewrite/delta_join.rs +++ b/src/frontend/src/stream_fragmenter/rewrite/delta_join.rs @@ -108,7 +108,7 @@ fn build_lookup_for_delta_join( fn build_delta_join_inner( state: &mut BuildFragmentGraphState, - current_fragment: &mut StreamFragment, + current_fragment: &StreamFragment, arrange_0_frag: Rc, arrange_1_frag: Rc, node: &StreamNode, @@ -315,7 +315,7 @@ fn build_delta_join_inner( pub(crate) fn build_delta_join_without_arrange( state: &mut BuildFragmentGraphState, - current_fragment: &mut StreamFragment, + current_fragment: &StreamFragment, mut node: StreamNode, ) -> Result { match &node.node_body { diff --git a/src/frontend/src/user/user_authentication.rs b/src/frontend/src/user/user_authentication.rs index 6df38f2fcdd34..ad6c6d2e758a8 100644 --- a/src/frontend/src/user/user_authentication.rs +++ b/src/frontend/src/user/user_authentication.rs @@ -146,7 +146,7 @@ mod tests { sha256_hash(user_name, password) ); - let input_passwords = vec![ + let input_passwords = [ "bar", "", "md596948aad3fcae80c08a35c9b5958cd89", diff --git a/src/frontend/src/utils/connected_components.rs b/src/frontend/src/utils/connected_components.rs index cbcce2cc79d6c..1f3c10493f776 100644 --- a/src/frontend/src/utils/connected_components.rs +++ b/src/frontend/src/utils/connected_components.rs @@ -50,10 +50,7 @@ impl ConnectedComponentLabeller { }; { - let edges = self - .labels_to_edges - .entry(new_label) - .or_insert_with(BTreeSet::new); + let edges = self.labels_to_edges.entry(new_label).or_default(); let new_edge = if v1 < v2 { (v1, v2) } else { (v2, v1) }; edges.insert(new_edge); @@ -73,10 +70,7 @@ impl ConnectedComponentLabeller { self.vertex_to_label.insert(v, new_label); } if let Some(old_edges) = self.labels_to_edges.remove(&old_label) { - let edges = self - .labels_to_edges - .entry(new_label) - .or_insert_with(BTreeSet::new); + let edges = self.labels_to_edges.entry(new_label).or_default(); edges.extend(old_edges); } } diff --git a/src/java_binding/Cargo.toml b/src/java_binding/Cargo.toml index 3280125f3ac49..d8d90693f44a6 100644 --- a/src/java_binding/Cargo.toml +++ b/src/java_binding/Cargo.toml @@ -32,3 +32,6 @@ bench = false name = "data-chunk-payload-convert-generator" test = false bench = false + +[lints] +workspace = true diff --git a/src/java_binding/src/lib.rs b/src/java_binding/src/lib.rs index 6ccc450c09d5a..12a3c59fc829f 100644 --- a/src/java_binding/src/lib.rs +++ b/src/java_binding/src/lib.rs @@ -13,7 +13,6 @@ // limitations under the License. #![feature(error_generic_member_access)] -#![feature(provide_any)] #![feature(lazy_cell)] #![feature(once_cell_try)] #![feature(type_alias_impl_trait)] diff --git a/src/jni_core/Cargo.toml b/src/jni_core/Cargo.toml index e0e4bd75022fd..c8bba371c8dea 100644 --- a/src/jni_core/Cargo.toml +++ b/src/jni_core/Cargo.toml @@ -36,3 +36,6 @@ tracing = "0.1" [dev-dependencies] risingwave_expr = { workspace = true } + +[lints] +workspace = true diff --git a/src/jni_core/src/lib.rs b/src/jni_core/src/lib.rs index 62625e14d21fe..be350ae57a460 100644 --- a/src/jni_core/src/lib.rs +++ b/src/jni_core/src/lib.rs @@ -13,7 +13,6 @@ // limitations under the License. #![feature(error_generic_member_access)] -#![feature(provide_any)] #![feature(lazy_cell)] #![feature(once_cell_try)] #![feature(type_alias_impl_trait)] diff --git a/src/meta/Cargo.toml b/src/meta/Cargo.toml index 61fb6118fdb3b..d8401cc5a7f71 100644 --- a/src/meta/Cargo.toml +++ b/src/meta/Cargo.toml @@ -90,3 +90,6 @@ tempfile = "3" [features] test = [] failpoints = ["fail/failpoints"] + +[lints] +workspace = true diff --git a/src/meta/src/barrier/command.rs b/src/meta/src/barrier/command.rs index 3f75c8d5f2257..d0deac65b3207 100644 --- a/src/meta/src/barrier/command.rs +++ b/src/meta/src/barrier/command.rs @@ -378,7 +378,7 @@ impl CommandContext { Command::RescheduleFragment { reschedules, .. } => { let mut dispatcher_update = HashMap::new(); - for (_fragment_id, reschedule) in reschedules.iter() { + for reschedule in reschedules.values() { for &(upstream_fragment_id, dispatcher_id) in &reschedule.upstream_fragment_dispatcher_ids { @@ -463,7 +463,7 @@ impl CommandContext { let merge_update = merge_update.into_values().collect(); let mut actor_vnode_bitmap_update = HashMap::new(); - for (_fragment_id, reschedule) in reschedules.iter() { + for reschedule in reschedules.values() { // Record updates for all actors in this fragment. for (&actor_id, bitmap) in &reschedule.vnode_bitmap_updates { let bitmap = bitmap.to_protobuf(); @@ -547,7 +547,7 @@ impl CommandContext { .values() .flatten() .flat_map(|dispatcher| dispatcher.downstream_actor_id.iter().copied()) - .chain(table_fragments.values_actor_ids().into_iter()) + .chain(table_fragments.values_actor_ids()) .collect(), _ => Default::default(), } diff --git a/src/meta/src/barrier/mod.rs b/src/meta/src/barrier/mod.rs index f341cdd02497d..cd3ee0360009f 100644 --- a/src/meta/src/barrier/mod.rs +++ b/src/meta/src/barrier/mod.rs @@ -205,7 +205,7 @@ impl CheckpointControl { async fn finish_commands(&mut self, checkpoint: bool) -> MetaResult { for command in self .finished_commands - .drain_filter(|c| checkpoint || c.context.kind.is_barrier()) + .extract_if(|c| checkpoint || c.context.kind.is_barrier()) { // The command is ready to finish. We can now call `pre_finish`. command.context.pre_finish().await?; @@ -516,7 +516,7 @@ impl GlobalBarrierManager { } } - pub async fn start(barrier_manager: BarrierManagerRef) -> (JoinHandle<()>, Sender<()>) { + pub fn start(barrier_manager: BarrierManagerRef) -> (JoinHandle<()>, Sender<()>) { let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); let join_handle = tokio::spawn(async move { barrier_manager.run(shutdown_rx).await; @@ -1123,7 +1123,7 @@ fn collect_synced_ssts( ) { let mut sst_to_worker: HashMap = HashMap::new(); let mut synced_ssts: Vec = vec![]; - for resp in resps.iter_mut() { + for resp in &mut *resps { let mut t: Vec = resp .synced_sstables .iter_mut() diff --git a/src/meta/src/error.rs b/src/meta/src/error.rs index be6688a8e2a16..b6c86b3f1eec0 100644 --- a/src/meta/src/error.rs +++ b/src/meta/src/error.rs @@ -102,7 +102,7 @@ impl std::fmt::Debug for MetaError { write!(f, "{}", self.inner)?; writeln!(f)?; - if let Some(backtrace) = (&self.inner as &dyn Error).request_ref::() { + if let Some(backtrace) = std::error::request_ref::(&self.inner as &dyn Error) { write!(f, " backtrace of inner error:\n{}", backtrace)?; } else { write!(f, " backtrace of `MetaError`:\n{}", self.backtrace)?; diff --git a/src/meta/src/hummock/compaction/level_selector.rs b/src/meta/src/hummock/compaction/level_selector.rs index 0edfeb0e5e130..893dffd79d6bc 100644 --- a/src/meta/src/hummock/compaction/level_selector.rs +++ b/src/meta/src/hummock/compaction/level_selector.rs @@ -548,10 +548,7 @@ impl LevelSelector for SpaceReclaimCompactionSelector { levels.member_table_ids.iter().cloned().collect(), ); let ctx = dynamic_level_core.calculate_level_base_size(levels); - let state = self - .state - .entry(group.group_id) - .or_insert_with(SpaceReclaimPickerState::default); + let state = self.state.entry(group.group_id).or_default(); let compaction_input = picker.pick_compaction(levels, level_handlers, state)?; compaction_input.add_pending_task(task_id, level_handlers); @@ -594,10 +591,7 @@ impl LevelSelector for TtlCompactionSelector { group.compaction_config.max_space_reclaim_bytes, table_id_to_options, ); - let state = self - .state - .entry(group.group_id) - .or_insert_with(TtlPickerState::default); + let state = self.state.entry(group.group_id).or_default(); let compaction_input = picker.pick_compaction(levels, level_handlers, state)?; compaction_input.add_pending_task(task_id, level_handlers); diff --git a/src/meta/src/hummock/compaction/mod.rs b/src/meta/src/hummock/compaction/mod.rs index a9f8d95457ccf..a30e4b0422111 100644 --- a/src/meta/src/hummock/compaction/mod.rs +++ b/src/meta/src/hummock/compaction/mod.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![expect(clippy::arc_with_non_send_sync, reason = "FIXME: later")] + pub mod compaction_config; mod level_selector; mod overlap_strategy; diff --git a/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs index efacf94d7dd93..0cf44795e0acb 100644 --- a/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/min_overlap_compaction_picker.rs @@ -197,6 +197,7 @@ impl NonOverlapSubLevelPicker { ret.sstable_infos[0].extend(vec![sst.clone()]); let mut overlap_info = self.overlap_strategy.create_overlap_info(); let mut select_sst_id_set = BTreeSet::default(); + #[allow(clippy::single_range_in_vec_init)] let mut overlap_len_and_begins = vec![(sst_index..(sst_index + 1))]; for sst in &ret.sstable_infos[0] { overlap_info.update(sst); diff --git a/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs index 95fd5d2d52353..a3ff21831fef8 100644 --- a/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/space_reclaim_compaction_picker.rs @@ -399,7 +399,7 @@ mod test { selector = SpaceReclaimCompactionSelector::default(); // cut range [3,4] [6] [8,9,10] levels.member_table_ids = vec![0, 1, 2, 5, 7]; - let expect_task_file_count = vec![2, 1, 4]; + let expect_task_file_count = [2, 1, 4]; let expect_task_sst_id_range = vec![vec![3, 4], vec![6], vec![8, 9, 10, 11]]; for (index, x) in expect_task_file_count.iter().enumerate() { // // pick space reclaim @@ -449,7 +449,7 @@ mod test { selector = SpaceReclaimCompactionSelector::default(); // cut range [3,4] [6] [8,9,10] levels.member_table_ids = vec![0, 1, 2, 5, 7]; - let expect_task_file_count = vec![2, 1, 5]; + let expect_task_file_count = [2, 1, 5]; let expect_task_sst_id_range = vec![vec![3, 4], vec![6], vec![7, 8, 9, 10, 11]]; for (index, x) in expect_task_file_count.iter().enumerate() { if index == expect_task_file_count.len() - 1 { diff --git a/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs b/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs index a822d33db3cfa..9f84b99453f17 100644 --- a/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs +++ b/src/meta/src/hummock/compaction/picker/ttl_reclaim_compaction_picker.rs @@ -633,7 +633,7 @@ mod test { }, ); - let expect_task_file_count = vec![3, 2, 1]; + let expect_task_file_count = [3, 2, 1]; let expect_task_sst_id_range = vec![vec![2, 3, 4], vec![6, 7], vec![10]]; for (index, x) in expect_task_file_count.iter().enumerate() { // // pick ttl reclaim @@ -715,7 +715,7 @@ mod test { }, ); - let expect_task_file_count = vec![3, 3]; + let expect_task_file_count = [3, 3]; let expect_task_sst_id_range = vec![vec![2, 3, 4], vec![5, 6, 7]]; for (index, x) in expect_task_file_count.iter().enumerate() { if index == expect_task_file_count.len() - 1 { diff --git a/src/meta/src/hummock/compaction/tombstone_compaction_selector.rs b/src/meta/src/hummock/compaction/tombstone_compaction_selector.rs index f587865276dae..f6a26dcc13013 100644 --- a/src/meta/src/hummock/compaction/tombstone_compaction_selector.rs +++ b/src/meta/src/hummock/compaction/tombstone_compaction_selector.rs @@ -52,10 +52,7 @@ impl LevelSelector for TombstoneCompactionSelector { group.compaction_config.tombstone_reclaim_ratio as u64, group.compaction_config.tombstone_reclaim_ratio as u64 / 2, ); - let state = self - .state - .entry(group.group_id) - .or_insert_with(TombstoneReclaimPickerState::default); + let state = self.state.entry(group.group_id).or_default(); let compaction_input = picker.pick_compaction(levels, level_handlers, state)?; compaction_input.add_pending_task(task_id, level_handlers); diff --git a/src/meta/src/hummock/manager/compaction_group_manager.rs b/src/meta/src/hummock/manager/compaction_group_manager.rs index b3062722f20bf..44df14b141aa7 100644 --- a/src/meta/src/hummock/manager/compaction_group_manager.rs +++ b/src/meta/src/hummock/manager/compaction_group_manager.rs @@ -106,7 +106,7 @@ impl HummockManager { == Some(true); let mut pairs = vec![]; if let Some(mv_table) = mv_table { - if internal_tables.drain_filter(|t| *t == mv_table).count() > 0 { + if internal_tables.extract_if(|t| *t == mv_table).count() > 0 { tracing::warn!("`mv_table` {} found in `internal_tables`", mv_table); } // materialized_view @@ -180,7 +180,7 @@ impl HummockManager { let versioning = versioning_guard.deref_mut(); let current_version = &versioning.current_version; - for (table_id, _) in pairs.iter() { + for (table_id, _) in pairs { if let Some(old_group) = try_get_compaction_group_id_by_table_id(current_version, *table_id) { @@ -198,7 +198,7 @@ impl HummockManager { build_version_delta_after_version(current_version), ); - for (table_id, raw_group_id) in pairs.iter() { + for (table_id, raw_group_id) in pairs { let mut group_id = *raw_group_id; if group_id == StaticCompactionGroupId::NewCompactionGroup as u64 { let mut is_group_init = false; diff --git a/src/meta/src/hummock/manager/gc.rs b/src/meta/src/hummock/manager/gc.rs index cf39698f0cd0b..5533d9be68e85 100644 --- a/src/meta/src/hummock/manager/gc.rs +++ b/src/meta/src/hummock/manager/gc.rs @@ -206,7 +206,7 @@ pub async fn collect_global_gc_watermark( spin_interval: Duration, ) -> Result { let mut global_watermark = HummockSstableObjectId::MAX; - let workers = vec![ + let workers = [ cluster_manager.list_active_streaming_compute_nodes().await, cluster_manager .list_worker_node(WorkerType::Compactor, Some(Running)) diff --git a/src/meta/src/hummock/manager/mod.rs b/src/meta/src/hummock/manager/mod.rs index 47ffe743fb538..4da0e86d88f57 100644 --- a/src/meta/src/hummock/manager/mod.rs +++ b/src/meta/src/hummock/manager/mod.rs @@ -1994,7 +1994,7 @@ impl HummockManager { } #[named] - pub async fn hummock_timer_task(hummock_manager: Arc) -> (JoinHandle<()>, Sender<()>) { + pub fn hummock_timer_task(hummock_manager: Arc) -> (JoinHandle<()>, Sender<()>) { use futures::{FutureExt, StreamExt}; let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); @@ -2512,7 +2512,7 @@ impl HummockManager { } } - pub async fn compaction_event_loop( + pub fn compaction_event_loop( hummock_manager: Arc, mut compactor_streams_change_rx: UnboundedReceiver<( u32, diff --git a/src/meta/src/hummock/manager/tests.rs b/src/meta/src/hummock/manager/tests.rs index 697be2cfd7a3b..ac4fbd42b256e 100644 --- a/src/meta/src/hummock/manager/tests.rs +++ b/src/meta/src/hummock/manager/tests.rs @@ -880,8 +880,7 @@ async fn test_hummock_compaction_task_heartbeat() { let compactor_manager = hummock_manager.compactor_manager_ref_for_test(); let _tx = compactor_manager.add_compactor(context_id); - let (join_handle, shutdown_tx) = - HummockManager::hummock_timer_task(hummock_manager.clone()).await; + let (join_handle, shutdown_tx) = HummockManager::hummock_timer_task(hummock_manager.clone()); // No compaction task available. assert!(hummock_manager @@ -1001,8 +1000,7 @@ async fn test_hummock_compaction_task_heartbeat_removal_on_node_removal() { let compactor_manager = hummock_manager.compactor_manager_ref_for_test(); let _tx = compactor_manager.add_compactor(context_id); - let (join_handle, shutdown_tx) = - HummockManager::hummock_timer_task(hummock_manager.clone()).await; + let (join_handle, shutdown_tx) = HummockManager::hummock_timer_task(hummock_manager.clone()); // No compaction task available. assert!(hummock_manager diff --git a/src/meta/src/hummock/model/compaction_group_config.rs b/src/meta/src/hummock/model/compaction_group_config.rs index 757895709fd64..8331abac62017 100644 --- a/src/meta/src/hummock/model/compaction_group_config.rs +++ b/src/meta/src/hummock/model/compaction_group_config.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::borrow::Borrow; use std::sync::Arc; pub use risingwave_common::catalog::TableOption; @@ -78,7 +77,7 @@ impl MetadataModel for CompactionGroup { } fn to_protobuf(&self) -> Self::PbType { - self.borrow().into() + self.into() } fn from_protobuf(prost: Self::PbType) -> Self { diff --git a/src/meta/src/lib.rs b/src/meta/src/lib.rs index e812ee5217296..92d3c571f57c5 100644 --- a/src/meta/src/lib.rs +++ b/src/meta/src/lib.rs @@ -16,23 +16,21 @@ #![feature(trait_alias)] #![feature(binary_heap_drain_sorted)] #![feature(type_alias_impl_trait)] -#![feature(drain_filter)] +#![feature(extract_if)] #![feature(custom_test_frameworks)] #![feature(lint_reasons)] #![feature(map_try_insert)] -#![feature(hash_drain_filter)] -#![feature(btree_drain_filter)] +#![feature(hash_extract_if)] +#![feature(btree_extract_if)] #![feature(result_option_inspect)] #![feature(lazy_cell)] #![feature(let_chains)] #![feature(error_generic_member_access)] -#![feature(provide_any)] #![feature(assert_matches)] #![feature(try_blocks)] #![cfg_attr(coverage, feature(no_coverage))] #![test_runner(risingwave_test_runner::test_runner::run_failpont_tests)] #![feature(is_sorted)] -#![feature(string_leak)] #![feature(impl_trait_in_assoc_type)] #![feature(type_name_of_val)] diff --git a/src/meta/src/manager/catalog/fragment.rs b/src/meta/src/manager/catalog/fragment.rs index 8b725602ae93c..1a74608c848a1 100644 --- a/src/meta/src/manager/catalog/fragment.rs +++ b/src/meta/src/manager/catalog/fragment.rs @@ -823,7 +823,7 @@ impl FragmentManager { assert!(actor_id_set.contains(actor_id)); } - actors.drain_filter(|actor_id| to_remove.contains(actor_id)); + actors.retain(|actor_id| !to_remove.contains(actor_id)); actors.extend_from_slice(to_create); } @@ -862,7 +862,7 @@ impl FragmentManager { for table_id in to_update_table_fragments { // Takes out the reschedules of the fragments in this table. let reschedules = reschedules - .drain_filter(|fragment_id, _| { + .extract_if(|fragment_id, _| { table_fragments .get(&table_id) .unwrap() @@ -1063,7 +1063,7 @@ impl FragmentManager { .map(|table_fragments| table_fragments.worker_actor_ids()) .reduce(|mut btree_map, next_map| { next_map.into_iter().for_each(|(k, v)| { - btree_map.entry(k).or_insert_with(Vec::new).extend(v); + btree_map.entry(k).or_default().extend(v); }); btree_map }) diff --git a/src/meta/src/manager/catalog/user.rs b/src/meta/src/manager/catalog/user.rs index ab3b8717c1b84..8037938937015 100644 --- a/src/meta/src/manager/catalog/user.rs +++ b/src/meta/src/manager/catalog/user.rs @@ -84,7 +84,7 @@ impl UserManager { for option in &grant_privilege_item.action_with_opts { self.user_grant_relation .entry(option.get_granted_by()) - .or_insert_with(HashSet::new) + .or_default() .insert(*user_id); } } diff --git a/src/meta/src/manager/cluster.rs b/src/meta/src/manager/cluster.rs index e3de9b18aa6da..da5b4fce20711 100644 --- a/src/meta/src/manager/cluster.rs +++ b/src/meta/src/manager/cluster.rs @@ -329,7 +329,7 @@ impl ClusterManager { )) } - pub async fn start_heartbeat_checker( + pub fn start_heartbeat_checker( cluster_manager: ClusterManagerRef, check_interval: Duration, ) -> (JoinHandle<()>, Sender<()>) { @@ -666,7 +666,7 @@ impl ClusterManagerCore { let mut streaming_worker_node = self.list_streaming_worker_node(Some(State::Running)); let unschedulable_worker_node = streaming_worker_node - .drain_filter(|worker| { + .extract_if(|worker| { worker .property .as_ref() @@ -949,7 +949,7 @@ mod tests { ); let (join_handle, shutdown_sender) = - ClusterManager::start_heartbeat_checker(cluster_manager.clone(), check_interval).await; + ClusterManager::start_heartbeat_checker(cluster_manager.clone(), check_interval); tokio::time::sleep(ttl * 2 + check_interval).await; // One live node left. diff --git a/src/meta/src/manager/idle.rs b/src/meta/src/manager/idle.rs index 4659d52c3fd95..d6defb8603409 100644 --- a/src/meta/src/manager/idle.rs +++ b/src/meta/src/manager/idle.rs @@ -74,7 +74,7 @@ impl IdleManager { } /// Idle checker send signal when the meta does not receive requests for long time. - pub async fn start_idle_checker( + pub fn start_idle_checker( idle_manager: IdleManagerRef, check_interval: Duration, idle_send: tokio::sync::oneshot::Sender<()>, diff --git a/src/meta/src/manager/sink_coordination/manager.rs b/src/meta/src/manager/sink_coordination/manager.rs index 522b40cd58001..b38d70119e576 100644 --- a/src/meta/src/manager/sink_coordination/manager.rs +++ b/src/meta/src/manager/sink_coordination/manager.rs @@ -434,7 +434,7 @@ mod tests { let (first, second) = all_vnode.split_at(VirtualNode::COUNT / 2); let build_bitmap = |indexes: &[usize]| { let mut builder = BitmapBuilder::zeroed(VirtualNode::COUNT); - for i in indexes.iter() { + for i in indexes { builder.set(*i, true); } builder.finish() @@ -644,7 +644,7 @@ mod tests { let (first, second) = all_vnode.split_at(VirtualNode::COUNT / 2); let build_bitmap = |indexes: &[usize]| { let mut builder = BitmapBuilder::zeroed(VirtualNode::COUNT); - for i in indexes.iter() { + for i in indexes { builder.set(*i, true); } builder.finish() @@ -726,7 +726,7 @@ mod tests { let (first, second) = all_vnode.split_at(VirtualNode::COUNT / 2); let build_bitmap = |indexes: &[usize]| { let mut builder = BitmapBuilder::zeroed(VirtualNode::COUNT); - for i in indexes.iter() { + for i in indexes { builder.set(*i, true); } builder.finish() diff --git a/src/meta/src/manager/system_param/mod.rs b/src/meta/src/manager/system_param/mod.rs index 4f6bdb4c65b93..861234bdfe9fe 100644 --- a/src/meta/src/manager/system_param/mod.rs +++ b/src/meta/src/manager/system_param/mod.rs @@ -113,9 +113,7 @@ impl SystemParamsManager { } // Periodically sync params to worker nodes. - pub async fn start_params_notifier( - system_params_manager: Arc, - ) -> (JoinHandle<()>, Sender<()>) { + pub fn start_params_notifier(system_params_manager: Arc) -> (JoinHandle<()>, Sender<()>) { const NOTIFY_INTERVAL: Duration = Duration::from_millis(5000); let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); diff --git a/src/meta/src/rpc/server.rs b/src/meta/src/rpc/server.rs index 5f85a494c0624..cb64adc13dae9 100644 --- a/src/meta/src/rpc/server.rs +++ b/src/meta/src/rpc/server.rs @@ -170,7 +170,6 @@ pub async fn rpc_serve( opts, init_system_params, ) - .await } MetaStoreBackend::Mem => { let meta_store = MemStore::new().into_ref(); @@ -183,12 +182,12 @@ pub async fn rpc_serve( opts, init_system_params, ) - .await } } } -pub async fn rpc_serve_with_store( +#[expect(clippy::type_complexity)] +pub fn rpc_serve_with_store( meta_store: MetaStoreRef, election_client: Option, address_info: AddressInfo, @@ -607,11 +606,14 @@ pub async fn start_service_as_election_leader( ) .await, ); - sub_tasks.push(SystemParamsManager::start_params_notifier(system_params_manager.clone()).await); - sub_tasks.push(HummockManager::hummock_timer_task(hummock_manager.clone()).await); - sub_tasks.push( - HummockManager::compaction_event_loop(hummock_manager, compactor_streams_change_rx).await, - ); + sub_tasks.push(SystemParamsManager::start_params_notifier( + system_params_manager.clone(), + )); + sub_tasks.push(HummockManager::hummock_timer_task(hummock_manager.clone())); + sub_tasks.push(HummockManager::compaction_event_loop( + hummock_manager, + compactor_streams_change_rx, + )); sub_tasks.push( serving::start_serving_vnode_mapping_worker( env.notification_manager_ref(), @@ -623,20 +625,18 @@ pub async fn start_service_as_election_leader( ); if cfg!(not(test)) { - sub_tasks.push( - ClusterManager::start_heartbeat_checker( - cluster_manager.clone(), - Duration::from_secs(1), - ) - .await, - ); - sub_tasks.push(GlobalBarrierManager::start(barrier_manager).await); + sub_tasks.push(ClusterManager::start_heartbeat_checker( + cluster_manager.clone(), + Duration::from_secs(1), + )); + sub_tasks.push(GlobalBarrierManager::start(barrier_manager)); } let (idle_send, idle_recv) = tokio::sync::oneshot::channel(); - sub_tasks.push( - IdleManager::start_idle_checker(env.idle_manager_ref(), Duration::from_secs(30), idle_send) - .await, - ); + sub_tasks.push(IdleManager::start_idle_checker( + env.idle_manager_ref(), + Duration::from_secs(30), + idle_send, + )); let (abort_sender, abort_recv) = tokio::sync::oneshot::channel(); let notification_mgr = env.notification_manager_ref(); diff --git a/src/meta/src/stream/scale.rs b/src/meta/src/stream/scale.rs index 0c335e6be849e..a125d61d91703 100644 --- a/src/meta/src/stream/scale.rs +++ b/src/meta/src/stream/scale.rs @@ -382,7 +382,7 @@ impl GlobalStreamManager { }) .collect(); - for (fragment_id, reschedule) in reschedule.iter() { + for (fragment_id, reschedule) in &*reschedule { for parallel_unit_id in &reschedule.added_parallel_units { if let Some(worker_id) = unschedulable_parallel_unit_ids.get(parallel_unit_id) { bail!( @@ -475,7 +475,7 @@ impl GlobalStreamManager { added_parallel_units, removed_parallel_units, }, - ) in reschedule.iter() + ) in &*reschedule { let fragment = fragment_map .get(fragment_id) @@ -682,7 +682,7 @@ impl GlobalStreamManager { if let Some(downstream_actor) = actor_map.get(downstream_actor_id) { fragment_dispatcher_map .entry(actor.fragment_id as FragmentId) - .or_insert(HashMap::new()) + .or_default() .insert( downstream_actor.fragment_id as FragmentId, dispatcher.r#type(), @@ -1549,7 +1549,7 @@ impl GlobalStreamManager { { dispatcher .downstream_actor_id - .drain_filter(|id| downstream_actors_to_remove.contains_key(id)); + .retain(|id| !downstream_actors_to_remove.contains_key(id)); } if let Some(downstream_actors_to_create) = downstream_fragment_actors_to_create @@ -1902,8 +1902,8 @@ impl GlobalStreamManager { } } - target_plan.drain_filter(|_, plan| { - plan.added_parallel_units.is_empty() && plan.removed_parallel_units.is_empty() + target_plan.retain(|_, plan| { + !(plan.added_parallel_units.is_empty() && plan.removed_parallel_units.is_empty()) }); Ok(target_plan) diff --git a/src/meta/src/stream/source_manager.rs b/src/meta/src/stream/source_manager.rs index 73e2426170ce8..a6b25d5fba4d7 100644 --- a/src/meta/src/stream/source_manager.rs +++ b/src/meta/src/stream/source_manager.rs @@ -329,7 +329,7 @@ impl SourceManagerCore { self.source_fragments .entry(source_id) - .or_insert_with(BTreeSet::default) + .or_default() .append(&mut fragment_ids); } } @@ -397,7 +397,7 @@ impl PartialEq for ActorSplitsAssignment { impl PartialOrd for ActorSplitsAssignment { fn partial_cmp(&self, other: &Self) -> Option { - other.splits.len().partial_cmp(&self.splits.len()) + Some(self.cmp(other)) } } @@ -527,7 +527,6 @@ impl SourceManager { &mut managed_sources, metrics.clone(), ) - .await } } @@ -708,7 +707,7 @@ impl SourceManager { Ok(()) } - async fn create_source_worker_async( + fn create_source_worker_async( connector_client: Option, source: Source, managed_sources: &mut HashMap, diff --git a/src/meta/src/stream/stream_graph/schedule.rs b/src/meta/src/stream/stream_graph/schedule.rs index 72012a816f109..4df57ea901331 100644 --- a/src/meta/src/stream/stream_graph/schedule.rs +++ b/src/meta/src/stream/stream_graph/schedule.rs @@ -218,14 +218,16 @@ impl Scheduler { // Visit the parallel units in a round-robin manner on each worker. let mut round_robin = Vec::new(); while !parallel_units.is_empty() { - parallel_units.drain_filter(|ps| { - if let Some(p) = ps.next() { - round_robin.push(p); - false - } else { - true - } - }); + parallel_units + .extract_if(|ps| { + if let Some(p) = ps.next() { + round_robin.push(p); + false + } else { + true + } + }) + .for_each(drop); } round_robin.truncate(default_parallelism.get()); assert_eq!(round_robin.len(), default_parallelism.get()); diff --git a/src/meta/src/stream/stream_manager.rs b/src/meta/src/stream/stream_manager.rs index 1be53330ac735..558149787c85f 100644 --- a/src/meta/src/stream/stream_manager.rs +++ b/src/meta/src/stream/stream_manager.rs @@ -823,7 +823,7 @@ mod tests { hummock_manager, )?; - let (join_handle_2, shutdown_tx_2) = GlobalBarrierManager::start(barrier_manager).await; + let (join_handle_2, shutdown_tx_2) = GlobalBarrierManager::start(barrier_manager); // Wait until the bootstrap recovery is done. loop { diff --git a/src/meta/src/stream/test_fragmenter.rs b/src/meta/src/stream/test_fragmenter.rs index 3a36025525a6c..add6811272b04 100644 --- a/src/meta/src/stream/test_fragmenter.rs +++ b/src/meta/src/stream/test_fragmenter.rs @@ -188,7 +188,7 @@ fn make_materialize_table(id: u32) -> PbTable { fn make_stream_fragments() -> Vec { let mut fragments = vec![]; // table source node - let column_ids = vec![1, 2, 0]; + let column_ids = [1, 2, 0]; let columns = column_ids .iter() .map(|column_id| ColumnCatalog { diff --git a/src/object_store/Cargo.toml b/src/object_store/Cargo.toml index f13c22050d04e..88f73ca1d3c7d 100644 --- a/src/object_store/Cargo.toml +++ b/src/object_store/Cargo.toml @@ -31,9 +31,7 @@ thiserror = "1" tokio = { version = "0.2", package = "madsim-tokio", features = ["fs"] } tokio-retry = "0.3" tracing = "0.1" -# This crate is excluded from hakari (see hakari.toml) after hdfs is introduced... -# -# [target.'cfg(not(madsim))'.dependencies] +# This crate is excluded from hakari (see hakari.toml) after hdfs is introduced...## [target.'cfg(not(madsim))'.dependencies] # workspace-hack = { path = "../workspace-hack" } # # [package.metadata.cargo-machete] diff --git a/src/object_store/src/lib.rs b/src/object_store/src/lib.rs index 4f1233bb627bf..f586d3be9e858 100644 --- a/src/object_store/src/lib.rs +++ b/src/object_store/src/lib.rs @@ -17,7 +17,6 @@ #![feature(lazy_cell)] #![feature(lint_reasons)] #![feature(error_generic_member_access)] -#![feature(provide_any)] #![feature(let_chains)] pub mod object; diff --git a/src/object_store/src/object/error.rs b/src/object_store/src/object/error.rs index 98da40da7b5b0..a61f236e75636 100644 --- a/src/object_store/src/object/error.rs +++ b/src/object_store/src/object/error.rs @@ -55,11 +55,9 @@ pub struct ObjectError { impl std::fmt::Debug for ObjectError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use std::error::Error; - write!(f, "{}", self.inner)?; writeln!(f)?; - if let Some(backtrace) = (&self.inner as &dyn Error).request_ref::() { + if let Some(backtrace) = std::error::request_ref::(&self.inner) { write!(f, " backtrace of inner error:\n{}", backtrace)?; } else { write!(f, " backtrace of `ObjectError`:\n{}", self.backtrace)?; diff --git a/src/object_store/src/object/mod.rs b/src/object_store/src/object/mod.rs index 8f9b3a6cd3781..deb29a07dc484 100644 --- a/src/object_store/src/object/mod.rs +++ b/src/object_store/src/object/mod.rs @@ -469,6 +469,9 @@ impl MonitoredStreamingReader { } } + // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. + // TODO: remove `allow` here after the issued is closed. + #[expect(clippy::needless_pass_by_ref_mut)] pub async fn read_bytes(&mut self, buf: &mut [u8]) -> ObjectResult { let operation_type = "streaming_read_read_bytes"; let data_len = buf.len(); diff --git a/src/object_store/src/object/opendal_engine/opendal_object_store.rs b/src/object_store/src/object/opendal_engine/opendal_object_store.rs index 19787c53ec08d..de410507da622 100644 --- a/src/object_store/src/object/opendal_engine/opendal_object_store.rs +++ b/src/object_store/src/object/opendal_engine/opendal_object_store.rs @@ -405,7 +405,7 @@ mod tests { .unwrap(); let metadata = store.metadata("test.obj").await.unwrap(); assert_eq!(payload.len(), metadata.total_size); - let test_loc = vec![(0, 1000), (10000, 1000), (20000, 1000)]; + let test_loc = [(0, 1000), (10000, 1000), (20000, 1000)]; let read_data = store .readv( "test.obj", diff --git a/src/object_store/src/object/s3.rs b/src/object_store/src/object/s3.rs index bc1766dd52696..e0d561e3bdb32 100644 --- a/src/object_store/src/object/s3.rs +++ b/src/object_store/src/object/s3.rs @@ -722,7 +722,7 @@ impl S3ObjectStore { .send() .await; if let Ok(config) = &get_config_result { - for rule in config.rules().unwrap_or_default().iter() { + for rule in config.rules().unwrap_or_default() { if matches!(rule.status().unwrap(), ExpirationStatus::Enabled) && rule.abort_incomplete_multipart_upload().is_some() { diff --git a/src/prost/Cargo.toml b/src/prost/Cargo.toml index a853dc8c7f464..a1acb61dc86d8 100644 --- a/src/prost/Cargo.toml +++ b/src/prost/Cargo.toml @@ -29,3 +29,6 @@ ignored = ["workspace-hack"] [package.metadata.cargo-udeps.ignore] normal = ["workspace-hack"] + +[lints] +workspace = true diff --git a/src/prost/helpers/Cargo.toml b/src/prost/helpers/Cargo.toml index 5f090e94d0bd0..50d9b4febd80b 100644 --- a/src/prost/helpers/Cargo.toml +++ b/src/prost/helpers/Cargo.toml @@ -19,3 +19,6 @@ ignored = ["workspace-hack"] [package.metadata.cargo-udeps.ignore] normal = ["workspace-hack"] + +[lints] +workspace = true diff --git a/src/risedevtool/Cargo.toml b/src/risedevtool/Cargo.toml index b50ca017c1149..b67fa31498c01 100644 --- a/src/risedevtool/Cargo.toml +++ b/src/risedevtool/Cargo.toml @@ -45,3 +45,6 @@ tracing = "0.1" tracing-subscriber = "0.3" workspace-hack = { path = "../workspace-hack" } yaml-rust = "0.4" + +[lints] +workspace = true diff --git a/src/risedevtool/config/Cargo.toml b/src/risedevtool/config/Cargo.toml index dc9d445ff8282..441742e3c2b6c 100644 --- a/src/risedevtool/config/Cargo.toml +++ b/src/risedevtool/config/Cargo.toml @@ -15,3 +15,6 @@ dialoguer = "0.10" enum-iterator = "1" fs-err = "2.9.0" itertools = "0.11" + +[lints] +workspace = true diff --git a/src/rpc_client/Cargo.toml b/src/rpc_client/Cargo.toml index 97225211d8ce9..7c3707d4fbc4c 100644 --- a/src/rpc_client/Cargo.toml +++ b/src/rpc_client/Cargo.toml @@ -46,3 +46,6 @@ url = "2.4.1" [target.'cfg(not(madsim))'.dependencies] moka = { version = "0.11", features = ["future"] } workspace-hack = { path = "../workspace-hack" } + +[lints] +workspace = true diff --git a/src/rpc_client/src/lib.rs b/src/rpc_client/src/lib.rs index 8230e17227bdb..aabb8e7378b65 100644 --- a/src/rpc_client/src/lib.rs +++ b/src/rpc_client/src/lib.rs @@ -22,7 +22,7 @@ #![feature(associated_type_defaults)] #![feature(generators)] #![feature(iterator_try_collect)] -#![feature(hash_drain_filter)] +#![feature(hash_extract_if)] #![feature(try_blocks)] #![feature(let_chains)] #![feature(impl_trait_in_assoc_type)] diff --git a/src/rpc_client/src/meta_client.rs b/src/rpc_client/src/meta_client.rs index 3179fd500e598..2b87ae995a564 100644 --- a/src/rpc_client/src/meta_client.rs +++ b/src/rpc_client/src/meta_client.rs @@ -1369,7 +1369,7 @@ impl MetaMemberManagement { Either::Right(member_group) => { let mut fetched_members = None; - for (addr, client) in member_group.members.iter_mut() { + for (addr, client) in &mut member_group.members { let client: Result = try { match client { Some(cached_client) => cached_client.to_owned(), @@ -1458,7 +1458,7 @@ impl GrpcMetaClient { // Max retry times for connecting to meta server. const INIT_RETRY_MAX_INTERVAL_MS: u64 = 5000; - async fn start_meta_member_monitor( + fn start_meta_member_monitor( &self, init_leader_addr: String, members: Either, @@ -1559,9 +1559,7 @@ impl GrpcMetaClient { } }; - client - .start_meta_member_monitor(addr, members, force_refresh_receiver, config) - .await?; + client.start_meta_member_monitor(addr, members, force_refresh_receiver, config)?; client.force_refresh_leader().await?; diff --git a/src/source/Cargo.toml b/src/source/Cargo.toml index 6e98c9a4526a5..bf60bc45f7395 100644 --- a/src/source/Cargo.toml +++ b/src/source/Cargo.toml @@ -39,3 +39,6 @@ tempfile = "3" [[bench]] name = "json_parser" harness = false + +[lints] +workspace = true diff --git a/src/source/src/fs_connector_source.rs b/src/source/src/fs_connector_source.rs index 94a90ff5f69e0..daee19569db0f 100644 --- a/src/source/src/fs_connector_source.rs +++ b/src/source/src/fs_connector_source.rs @@ -40,8 +40,7 @@ impl FsConnectorSource { parser_config: SpecificParserConfig, ) -> Result { // Store the connector node address to properties for later use. - let mut source_props: HashMap = - HashMap::from_iter(properties.clone().into_iter()); + let mut source_props: HashMap = HashMap::from_iter(properties.clone()); connector_node_addr .map(|addr| source_props.insert("connector_node_addr".to_string(), addr)); let config = diff --git a/src/source/src/lib.rs b/src/source/src/lib.rs index a1b588b005988..30c7d90cfe771 100644 --- a/src/source/src/lib.rs +++ b/src/source/src/lib.rs @@ -13,13 +13,12 @@ // limitations under the License. #![allow(clippy::derive_partial_eq_without_eq)] -#![allow(rustdoc::private_intra_doc_links)] #![feature(trait_alias)] #![feature(binary_heap_drain_sorted)] #![feature(lint_reasons)] #![feature(result_option_inspect)] #![feature(generators)] -#![feature(hash_drain_filter)] +#![feature(hash_extract_if)] #![feature(type_alias_impl_trait)] #![feature(box_patterns)] diff --git a/src/sqlparser/Cargo.toml b/src/sqlparser/Cargo.toml index 74b1cc2301805..56566b4090a53 100644 --- a/src/sqlparser/Cargo.toml +++ b/src/sqlparser/Cargo.toml @@ -44,3 +44,6 @@ disable-publish = true [[bin]] name = "sqlparser" path = "src/bin/sqlparser.rs" + +[lints] +workspace = true diff --git a/src/sqlparser/fuzz/Cargo.toml b/src/sqlparser/fuzz/Cargo.toml index d53162b86059e..24ebb8e6ba7fd 100644 --- a/src/sqlparser/fuzz/Cargo.toml +++ b/src/sqlparser/fuzz/Cargo.toml @@ -20,3 +20,6 @@ members = ["."] [[bin]] name = "fuzz_parse_sql" path = "fuzz_targets/fuzz_parse_sql.rs" + +[lints] +workspace = true diff --git a/src/sqlparser/sqlparser_bench/Cargo.toml b/src/sqlparser/sqlparser_bench/Cargo.toml index de5f06c2ebede..f28d7ef75e2a2 100644 --- a/src/sqlparser/sqlparser_bench/Cargo.toml +++ b/src/sqlparser/sqlparser_bench/Cargo.toml @@ -19,3 +19,6 @@ criterion = { workspace = true } [[bench]] name = "sqlparser_bench" harness = false + +[lints] +workspace = true diff --git a/src/sqlparser/src/ast/mod.rs b/src/sqlparser/src/ast/mod.rs index 98b3bb9d7c4f7..ecae5a9663a88 100644 --- a/src/sqlparser/src/ast/mod.rs +++ b/src/sqlparser/src/ast/mod.rs @@ -181,7 +181,7 @@ impl fmt::Display for Ident { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.quote_style { Some(q) if q == '"' || q == '\'' || q == '`' => write!(f, "{}{}{}", q, self.value, q), - Some(q) if q == '[' => write!(f, "[{}]", self.value), + Some('[') => write!(f, "[{}]", self.value), None => f.write_str(&self.value), _ => panic!("unexpected quote style"), } diff --git a/src/sqlparser/src/parser.rs b/src/sqlparser/src/parser.rs index 84dc34ada3b64..1917a04e85b9d 100644 --- a/src/sqlparser/src/parser.rs +++ b/src/sqlparser/src/parser.rs @@ -3150,7 +3150,11 @@ impl Parser { pub fn parse_literal_string(&mut self) -> Result { let token = self.next_token(); match token.token { - Token::Word(Word { value, keyword, .. }) if keyword == Keyword::NoKeyword => Ok(value), + Token::Word(Word { + value, + keyword: Keyword::NoKeyword, + .. + }) => Ok(value), Token::SingleQuotedString(s) => Ok(s), unexpected => self.expected("literal string", unexpected.with_location(token.location)), } @@ -3160,7 +3164,11 @@ impl Parser { pub fn parse_map_key(&mut self) -> Result { let token = self.next_token(); match token.token { - Token::Word(Word { value, keyword, .. }) if keyword == Keyword::NoKeyword => { + Token::Word(Word { + value, + keyword: Keyword::NoKeyword, + .. + }) => { if self.peek_token() == Token::LParen { return self.parse_function(ObjectName(vec![Ident::new_unchecked(value)])); } diff --git a/src/sqlparser/test_runner/Cargo.toml b/src/sqlparser/test_runner/Cargo.toml index fbcd8a4317757..b7da53b7435b3 100644 --- a/src/sqlparser/test_runner/Cargo.toml +++ b/src/sqlparser/test_runner/Cargo.toml @@ -37,3 +37,6 @@ workspace-hack = { path = "../../workspace-hack" } [build-dependencies] walkdir = "2" + +[lints] +workspace = true diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml index 1b0c42502e995..166b76cec1b5e 100644 --- a/src/storage/Cargo.toml +++ b/src/storage/Cargo.toml @@ -135,3 +135,6 @@ harness = false [[bench]] name = "bench_row" harness = false + +[lints] +workspace = true diff --git a/src/storage/backup/Cargo.toml b/src/storage/backup/Cargo.toml index d6cb6831b10bd..c36dd17f364ca 100644 --- a/src/storage/backup/Cargo.toml +++ b/src/storage/backup/Cargo.toml @@ -28,3 +28,6 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" thiserror = "1" twox-hash = "1" + +[lints] +workspace = true diff --git a/src/storage/backup/cmd/Cargo.toml b/src/storage/backup/cmd/Cargo.toml index c18da95ed293f..e94b16685a620 100644 --- a/src/storage/backup/cmd/Cargo.toml +++ b/src/storage/backup/cmd/Cargo.toml @@ -33,3 +33,6 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [ [[bin]] name = "backup-restore" path = "src/bin/backup_restore.rs" + +[lints] +workspace = true diff --git a/src/storage/backup/src/lib.rs b/src/storage/backup/src/lib.rs index 2de5c0aee7fee..330dfbc4de44c 100644 --- a/src/storage/backup/src/lib.rs +++ b/src/storage/backup/src/lib.rs @@ -16,17 +16,16 @@ #![feature(trait_alias)] #![feature(binary_heap_drain_sorted)] #![feature(type_alias_impl_trait)] -#![feature(drain_filter)] +#![feature(extract_if)] #![feature(custom_test_frameworks)] #![feature(lint_reasons)] #![feature(map_try_insert)] -#![feature(hash_drain_filter)] -#![feature(btree_drain_filter)] +#![feature(hash_extract_if)] +#![feature(btree_extract_if)] #![feature(result_option_inspect)] #![feature(lazy_cell)] #![feature(let_chains)] #![feature(error_generic_member_access)] -#![feature(provide_any)] #![cfg_attr(coverage, feature(no_coverage))] pub mod error; diff --git a/src/storage/benches/bench_compression.rs b/src/storage/benches/bench_compression.rs index 63f283187222e..8f64d75e46ddc 100644 --- a/src/storage/benches/bench_compression.rs +++ b/src/storage/benches/bench_compression.rs @@ -42,7 +42,7 @@ fn gen_dataset(vsize: usize) -> Vec> { fn gen_data(dataset: &[Vec]) -> Vec { let mut data = vec![]; - for entry in dataset.iter() { + for entry in dataset { data.put_slice(entry); } data diff --git a/src/storage/compactor/Cargo.toml b/src/storage/compactor/Cargo.toml index e4e6e984e5c7c..f4118ff639b5d 100644 --- a/src/storage/compactor/Cargo.toml +++ b/src/storage/compactor/Cargo.toml @@ -43,3 +43,6 @@ tracing = "0.1" [target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../../workspace-hack" } + +[lints] +workspace = true diff --git a/src/storage/hummock_sdk/Cargo.toml b/src/storage/hummock_sdk/Cargo.toml index 6cdf019f49453..a4773c0cd0e74 100644 --- a/src/storage/hummock_sdk/Cargo.toml +++ b/src/storage/hummock_sdk/Cargo.toml @@ -24,3 +24,6 @@ tracing = "0.1" [target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../../workspace-hack" } + +[lints] +workspace = true diff --git a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs index 89600cc1d2bd0..1193877a14c9b 100644 --- a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs +++ b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs @@ -324,7 +324,7 @@ impl HummockVersionUpdateExt for HummockVersion { ); sub_level .table_infos - .drain_filter(|sst_info| sst_info.table_ids.is_empty()) + .extract_if(|sst_info| sst_info.table_ids.is_empty()) .for_each(|sst_info| { sub_level.total_file_size -= sst_info.file_size; sub_level.uncompressed_file_size -= sst_info.uncompressed_file_size; @@ -377,7 +377,7 @@ impl HummockVersionUpdateExt for HummockVersion { assert!(can_concat(&cur_levels.levels[idx].table_infos)); level .table_infos - .drain_filter(|sst_info| sst_info.table_ids.is_empty()) + .extract_if(|sst_info| sst_info.table_ids.is_empty()) .for_each(|sst_info| { level.total_file_size -= sst_info.file_size; level.uncompressed_file_size -= sst_info.uncompressed_file_size; @@ -502,7 +502,7 @@ impl HummockVersionUpdateExt for HummockVersion { .expect("compaction group should exist"); let mut moving_tables = levels .member_table_ids - .drain_filter(|t| group_change.table_ids.contains(t)) + .extract_if(|t| group_change.table_ids.contains(t)) .collect_vec(); self.levels .get_mut(compaction_group_id) diff --git a/src/storage/hummock_sdk/src/lib.rs b/src/storage/hummock_sdk/src/lib.rs index 0d4125618af1e..0fc6735571e4d 100644 --- a/src/storage/hummock_sdk/src/lib.rs +++ b/src/storage/hummock_sdk/src/lib.rs @@ -13,8 +13,8 @@ // limitations under the License. #![feature(async_closure)] -#![feature(drain_filter)] -#![feature(hash_drain_filter)] +#![feature(extract_if)] +#![feature(hash_extract_if)] #![feature(lint_reasons)] #![feature(map_many_mut)] #![feature(bound_map)] diff --git a/src/storage/hummock_test/Cargo.toml b/src/storage/hummock_test/Cargo.toml index f58acd7457db0..600a5249ddf1b 100644 --- a/src/storage/hummock_test/Cargo.toml +++ b/src/storage/hummock_test/Cargo.toml @@ -65,3 +65,6 @@ required-features = ["test"] [[bin]] name = "replay" path = "src/bin/replay/main.rs" + +[lints] +workspace = true diff --git a/src/storage/hummock_trace/Cargo.toml b/src/storage/hummock_trace/Cargo.toml index 2150ae25306bc..f9b8fa85bc101 100644 --- a/src/storage/hummock_trace/Cargo.toml +++ b/src/storage/hummock_trace/Cargo.toml @@ -27,3 +27,6 @@ tracing = "0.1" [dev-dependencies] itertools = "0.10.5" mockall = "0.11.4" + +[lints] +workspace = true diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs index 53696b335e812..631d9039e76c5 100644 --- a/src/storage/src/error.rs +++ b/src/storage/src/error.rs @@ -77,7 +77,7 @@ impl std::fmt::Debug for StorageError { write!(f, "{}", self)?; writeln!(f)?; - if let Some(backtrace) = (&self as &dyn Error).request_ref::() { + if let Some(backtrace) = std::error::request_ref::(&self as &dyn Error) { // Since we forward all backtraces from source, `self.backtrace()` is the backtrace of // inner error. write!(f, " backtrace of inner error:\n{}", backtrace)?; diff --git a/src/storage/src/hummock/compactor/compactor_runner.rs b/src/storage/src/hummock/compactor/compactor_runner.rs index 6ce3d0b829bbc..be13e32893d64 100644 --- a/src/storage/src/hummock/compactor/compactor_runner.rs +++ b/src/storage/src/hummock/compactor/compactor_runner.rs @@ -138,6 +138,9 @@ impl CompactorRunner { Ok((self.split_index, ssts, compaction_stat)) } + // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. + // TODO: remove `allow` here after the issued is closed. + #[expect(clippy::needless_pass_by_ref_mut)] pub async fn build_delete_range_iter( sstable_infos: &Vec, sstable_store: &SstableStoreRef, diff --git a/src/storage/src/hummock/compactor/iterator.rs b/src/storage/src/hummock/compactor/iterator.rs index c41d14b846a7e..60c775bbc4ae4 100644 --- a/src/storage/src/hummock/compactor/iterator.rs +++ b/src/storage/src/hummock/compactor/iterator.rs @@ -143,7 +143,6 @@ impl SstableStreamIterator { if !block_iter.is_valid() { // `seek_key` is larger than everything in the first block. self.next_block().await?; - } else { } } diff --git a/src/storage/src/hummock/compactor/mod.rs b/src/storage/src/hummock/compactor/mod.rs index 60567f2d44af0..c10e85a2da6d2 100644 --- a/src/storage/src/hummock/compactor/mod.rs +++ b/src/storage/src/hummock/compactor/mod.rs @@ -387,7 +387,7 @@ pub fn start_compactor( let event: Option> = tokio::select! { _ = periodic_event_interval.tick() => { let mut progress_list = Vec::new(); - for (&task_id, progress) in task_progress.lock().iter() { + for (&task_id, progress) in &*task_progress.lock() { progress_list.push(CompactTaskProgress { task_id, num_ssts_sealed: progress.num_ssts_sealed.load(Ordering::Relaxed), diff --git a/src/storage/src/hummock/compactor/shared_buffer_compact.rs b/src/storage/src/hummock/compactor/shared_buffer_compact.rs index 6e01be793abf2..bcbaa19e2c3b1 100644 --- a/src/storage/src/hummock/compactor/shared_buffer_compact.rs +++ b/src/storage/src/hummock/compactor/shared_buffer_compact.rs @@ -75,7 +75,7 @@ pub async fn compact( }; grouped_payload .entry(compaction_group_id) - .or_insert_with(std::vec::Vec::new) + .or_default() .push(imm); } diff --git a/src/storage/src/hummock/conflict_detector.rs b/src/storage/src/hummock/conflict_detector.rs index 419c81273cf0c..c21ec478fa5ce 100644 --- a/src/storage/src/hummock/conflict_detector.rs +++ b/src/storage/src/hummock/conflict_detector.rs @@ -94,7 +94,7 @@ impl ConflictDetector { .entry(epoch) .or_insert(Some(HashSet::new())); - for (key, value) in kv_pairs.iter() { + for (key, value) in kv_pairs { assert!( written_key .as_mut() diff --git a/src/storage/src/hummock/error.rs b/src/storage/src/hummock/error.rs index fedb8b976712e..efd25c8076383 100644 --- a/src/storage/src/hummock/error.rs +++ b/src/storage/src/hummock/error.rs @@ -190,7 +190,7 @@ impl std::fmt::Debug for HummockError { write!(f, "{}", self.inner)?; writeln!(f)?; - if let Some(backtrace) = (&self.inner as &dyn Error).request_ref::() { + if let Some(backtrace) = std::error::request_ref::(&self.inner as &dyn Error) { write!(f, " backtrace of inner error:\n{}", backtrace)?; } else { write!(f, " backtrace of `HummockError`:\n{}", self.backtrace)?; diff --git a/src/storage/src/hummock/event_handler/hummock_event_handler.rs b/src/storage/src/hummock/event_handler/hummock_event_handler.rs index 366f8af391428..d9e25ebe46555 100644 --- a/src/storage/src/hummock/event_handler/hummock_event_handler.rs +++ b/src/storage/src/hummock/event_handler/hummock_event_handler.rs @@ -373,7 +373,7 @@ impl HummockEventHandler { ); self.uploader.clear(); - for (epoch, result_sender) in self.pending_sync_requests.drain_filter(|_, _| true) { + for (epoch, result_sender) in self.pending_sync_requests.extract_if(|_, _| true) { send_sync_result( result_sender, Err(HummockError::other(format!( @@ -511,7 +511,7 @@ impl HummockEventHandler { UploaderEvent::ImmMerged(merge_output) => { // update read version for corresponding table shards let read_guard = self.read_version_mapping.read(); - read_guard.get(&merge_output.table_id).map_or((), |shards| { + if let Some(shards) = read_guard.get(&merge_output.table_id) { shards.get(&merge_output.instance_id).map_or_else( || { warn!( @@ -525,7 +525,7 @@ impl HummockEventHandler { )); }, ) - }); + } } } } diff --git a/src/storage/src/hummock/event_handler/uploader.rs b/src/storage/src/hummock/event_handler/uploader.rs index ea0013bcd20c2..f57ac33bfe6a2 100644 --- a/src/storage/src/hummock/event_handler/uploader.rs +++ b/src/storage/src/hummock/event_handler/uploader.rs @@ -436,7 +436,7 @@ impl SealedData { .rev() // in `imms`, newer data comes first .flat_map(|(_epoch, imms)| imms) - .chain(merged_imms.into_iter()) + .chain(merged_imms) .collect_vec(); if !payload.is_empty() { @@ -1333,11 +1333,9 @@ mod tests { assert_eq!(epoch, uploader.max_sealed_epoch); // check sealed data has two imms let imms_by_epoch = uploader.sealed_data.imms_by_epoch(); - imms_by_epoch.last_key_value().map_or((), |(e, imms)| { - if *e == epoch { - assert_eq!(2, imms.len()); - } - }); + if let Some((e, imms)) = imms_by_epoch.last_key_value() && *e == epoch{ + assert_eq!(2, imms.len()); + } let epoch_cnt = (epoch - INITIAL_EPOCH) as usize; if epoch_cnt < imm_merge_threshold { @@ -1355,18 +1353,14 @@ mod tests { let imms_by_shard = &mut uploader.sealed_data.imms_by_table_shard; // check shard 1 - imms_by_shard - .get(&(TEST_TABLE_ID, 1 as LocalInstanceId)) - .map_or((), |imms| { - assert_eq!(imm_merge_threshold, imms.len()); - }); + if let Some(imms) = imms_by_shard.get(&(TEST_TABLE_ID, 1 as LocalInstanceId)) { + assert_eq!(imm_merge_threshold, imms.len()); + } // check shard 2 - imms_by_shard - .get(&(TEST_TABLE_ID, 2 as LocalInstanceId)) - .map_or((), |imms| { - assert_eq!(imm_merge_threshold, imms.len()); - }); + if let Some(imms) = imms_by_shard.get(&(TEST_TABLE_ID, 2 as LocalInstanceId)) { + assert_eq!(imm_merge_threshold, imms.len()); + } // we have enough sealed imms, start merging task println!("start merging task for epoch {}", epoch); @@ -1375,20 +1369,20 @@ mod tests { assert!(uploader.sealed_data.spilled_data.is_empty()); // check after generate merging task - uploader + if let Some(imms) = uploader .sealed_data .imms_by_table_shard .get(&(TEST_TABLE_ID, 1 as LocalInstanceId)) - .map_or((), |imms| { - assert_eq!(0, imms.len()); - }); - uploader + { + assert_eq!(0, imms.len()); + } + if let Some(imms) = uploader .sealed_data .imms_by_table_shard .get(&(TEST_TABLE_ID, 2 as LocalInstanceId)) - .map_or((), |imms| { - assert_eq!(0, imms.len()); - }); + { + assert_eq!(0, imms.len()); + } // poll the merging task and check the result match uploader.next_event().await { @@ -1647,6 +1641,9 @@ mod tests { (buffer_tracker, uploader, new_task_notifier) } + // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. + // TODO: remove `allow` here after the issued is closed. + #[expect(clippy::needless_pass_by_ref_mut)] async fn assert_uploader_pending(uploader: &mut HummockUploader) { for _ in 0..10 { yield_now().await; diff --git a/src/storage/src/hummock/file_cache/store.rs b/src/storage/src/hummock/file_cache/store.rs index 6866a016afc4c..da47c1562eaee 100644 --- a/src/storage/src/hummock/file_cache/store.rs +++ b/src/storage/src/hummock/file_cache/store.rs @@ -141,7 +141,7 @@ impl Value for Box { fn read(mut buf: &[u8]) -> Self { let id = buf.get_u64(); - let meta = SstableMeta::decode(&mut buf).unwrap(); + let meta = SstableMeta::decode(buf).unwrap(); Box::new(Sstable::new(id, meta)) } } diff --git a/src/storage/src/hummock/iterator/merge_inner.rs b/src/storage/src/hummock/iterator/merge_inner.rs index ec4e1d2dc840f..cf92df72abcdf 100644 --- a/src/storage/src/hummock/iterator/merge_inner.rs +++ b/src/storage/src/hummock/iterator/merge_inner.rs @@ -42,33 +42,33 @@ pub struct Node { } impl Eq for Node where Self: PartialEq {} -impl Ord for Node +impl PartialOrd for Node where - Self: PartialOrd, + Self: Ord, { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.partial_cmp(other).unwrap() + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) } } -/// Implement `PartialOrd` for unordered iter node. Only compare the key. -impl PartialOrd for Node { - fn partial_cmp(&self, other: &Self) -> Option { +/// Implement `Ord` for unordered iter node. Only compare the key. +impl Ord for Node { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { // Note: to implement min-heap by using max-heap internally, the comparing // order should be reversed. - Some(match I::Direction::direction() { + match I::Direction::direction() { DirectionEnum::Forward => other.iter.key().cmp(&self.iter.key()), DirectionEnum::Backward => self.iter.key().cmp(&other.iter.key()), - }) + } } } -/// Implement `PartialOrd` for ordered iter node. Compare key and use order index as tie breaker. -impl PartialOrd for Node { - fn partial_cmp(&self, other: &Self) -> Option { +/// Implement `Ord` for ordered iter node. Compare key and use order index as tie breaker. +impl Ord for Node { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { // The `extra_info` is used as a tie-breaker when the keys are equal. - Some(match I::Direction::direction() { + match I::Direction::direction() { DirectionEnum::Forward => other .iter .key() @@ -79,7 +79,7 @@ impl PartialOrd for Node { .key() .cmp(&other.iter.key()) .then_with(|| self.extra_order_info.cmp(&other.extra_order_info)), - }) + } } } @@ -203,7 +203,7 @@ where self.heap = self .unused_iters - .drain_filter(|i| i.iter.is_valid()) + .extract_if(|i| i.iter.is_valid()) .collect(); } } diff --git a/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs b/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs index 1ac5e6119f78b..7e67b87434e4a 100644 --- a/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs +++ b/src/storage/src/hummock/shared_buffer/shared_buffer_batch.rs @@ -590,7 +590,7 @@ impl SharedBufferBatch { SharedBufferBatch { inner: Arc::new(inner), table_id, - instance_id: instance_id.unwrap_or(LocalInstanceId::default()), + instance_id: instance_id.unwrap_or_default(), } } diff --git a/src/storage/src/hummock/sstable/builder.rs b/src/storage/src/hummock/sstable/builder.rs index e661f7aaa9add..c35417b9d5c04 100644 --- a/src/storage/src/hummock/sstable/builder.rs +++ b/src/storage/src/hummock/sstable/builder.rs @@ -670,7 +670,7 @@ pub(super) mod tests { let (data, meta) = output.writer_output; assert_eq!(info.file_size, meta.estimated_size as u64); let offset = info.meta_offset as usize; - let meta2 = SstableMeta::decode(&mut &data[offset..]).unwrap(); + let meta2 = SstableMeta::decode(&data[offset..]).unwrap(); assert_eq!(meta2, meta); } diff --git a/src/storage/src/hummock/sstable/delete_range_aggregator.rs b/src/storage/src/hummock/sstable/delete_range_aggregator.rs index a80c8dbb7f3b5..2b82e1f3c17f9 100644 --- a/src/storage/src/hummock/sstable/delete_range_aggregator.rs +++ b/src/storage/src/hummock/sstable/delete_range_aggregator.rs @@ -41,11 +41,7 @@ impl PartialEq for SortedBoundary { impl PartialOrd for SortedBoundary { fn partial_cmp(&self, other: &Self) -> Option { - let ret = other - .user_key - .cmp(&self.user_key) - .then_with(|| other.sequence.cmp(&self.sequence)); - Some(ret) + Some(self.cmp(other)) } } diff --git a/src/storage/src/hummock/sstable/mod.rs b/src/storage/src/hummock/sstable/mod.rs index de2ec1f1bcffb..e7e11ace8a2fb 100644 --- a/src/storage/src/hummock/sstable/mod.rs +++ b/src/storage/src/hummock/sstable/mod.rs @@ -433,7 +433,7 @@ impl SstableMeta { buf.put_u32_le(MAGIC); } - pub fn decode(buf: &mut &[u8]) -> HummockResult { + pub fn decode(buf: &[u8]) -> HummockResult { let mut cursor = buf.len(); cursor -= 4; @@ -561,7 +561,7 @@ mod tests { let sz = meta.encoded_size(); let buf = meta.encode_to_bytes(); assert_eq!(sz, buf.len()); - let decoded_meta = SstableMeta::decode(&mut &buf[..]).unwrap(); + let decoded_meta = SstableMeta::decode(&buf[..]).unwrap(); assert_eq!(decoded_meta, meta); } } diff --git a/src/storage/src/hummock/sstable_store.rs b/src/storage/src/hummock/sstable_store.rs index 480e1767cd5b1..987d9080ae563 100644 --- a/src/storage/src/hummock/sstable_store.rs +++ b/src/storage/src/hummock/sstable_store.rs @@ -427,7 +427,7 @@ impl SstableStore { .read(&meta_path, Some(loc)) .await .map_err(HummockError::object_io_error)?; - let meta = SstableMeta::decode(&mut &buf[..])?; + let meta = SstableMeta::decode(&buf[..])?; let sst = Sstable::new(object_id, meta); let charge = sst.estimate_size(); let add = (now.elapsed().as_secs_f64() * 1000.0).ceil(); @@ -451,6 +451,9 @@ impl SstableStore { }) } + // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. + // TODO: remove `allow` here after the issued is closed. + #[expect(clippy::needless_pass_by_ref_mut)] pub async fn sstable( &self, sst: &SstableInfo, diff --git a/src/storage/src/hummock/state_store.rs b/src/storage/src/hummock/state_store.rs index 43b081a33aa66..13947802c4d9b 100644 --- a/src/storage/src/hummock/state_store.rs +++ b/src/storage/src/hummock/state_store.rs @@ -130,7 +130,7 @@ impl HummockStorage { .cloned() .collect_vec() }) - .unwrap_or(Vec::new()) + .unwrap_or_default() }; // When the system has just started and no state has been created, the memory state diff --git a/src/storage/src/hummock/store/memtable.rs b/src/storage/src/hummock/store/memtable.rs index 1167beed46ddf..b21b55e8bb5d1 100644 --- a/src/storage/src/hummock/store/memtable.rs +++ b/src/storage/src/hummock/store/memtable.rs @@ -79,7 +79,7 @@ pub struct BTreeMapMemtable { mem: BTreeMap, } -#[expect(unused_variables, dead_code)] +#[expect(unused_variables)] impl BTreeMapMemtable { fn insert(&mut self, key: Bytes, val: Bytes, epoch: u64) { unimplemented!() diff --git a/src/storage/src/hummock/utils.rs b/src/storage/src/hummock/utils.rs index f91e57af486cb..d24ff4ab09ec9 100644 --- a/src/storage/src/hummock/utils.rs +++ b/src/storage/src/hummock/utils.rs @@ -603,6 +603,9 @@ mod tests { use crate::hummock::utils::MemoryLimiter; + // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. + // TODO: remove `allow` here after the issued is closed. + #[expect(clippy::needless_pass_by_ref_mut)] async fn assert_pending(future: &mut (impl Future + Unpin)) { for _ in 0..10 { assert!(poll_fn(|cx| Poll::Ready(future.poll_unpin(cx))) diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs index baab967610e60..5ebcb4fe78b00 100644 --- a/src/storage/src/lib.rs +++ b/src/storage/src/lib.rs @@ -18,9 +18,9 @@ #![feature(bound_as_ref)] #![feature(bound_map)] #![feature(custom_test_frameworks)] -#![feature(drain_filter)] +#![feature(extract_if)] #![feature(generators)] -#![feature(hash_drain_filter)] +#![feature(hash_extract_if)] #![feature(lint_reasons)] #![feature(proc_macro_hygiene)] #![feature(result_option_inspect)] @@ -33,16 +33,14 @@ #![test_runner(risingwave_test_runner::test_runner::run_failpont_tests)] #![feature(assert_matches)] #![feature(is_sorted)] -#![feature(btree_drain_filter)] +#![feature(btree_extract_if)] #![feature(exact_size_is_empty)] #![feature(lazy_cell)] #![cfg_attr(coverage, feature(no_coverage))] #![recursion_limit = "256"] #![feature(error_generic_member_access)] -#![feature(provide_any)] #![feature(let_chains)] #![feature(associated_type_bounds)] -#![feature(local_key_cell_methods)] #![feature(exclusive_range_pattern)] #![feature(impl_trait_in_assoc_type)] #![feature(async_fn_in_trait)] diff --git a/src/storage/src/monitor/local_metrics.rs b/src/storage/src/monitor/local_metrics.rs index b5cc3bd36279b..aacc71050034f 100644 --- a/src/storage/src/monitor/local_metrics.rs +++ b/src/storage/src/monitor/local_metrics.rs @@ -143,7 +143,7 @@ impl StoreLocalStatistic { } } - fn report_bloom_filter_metrics(&self, metrics: &mut BloomFilterLocalMetrics) { + fn report_bloom_filter_metrics(&self, metrics: &BloomFilterLocalMetrics) { if self.bloom_filter_check_counts == 0 { return; } @@ -171,7 +171,7 @@ impl StoreLocalStatistic { pub fn flush_all() { LOCAL_METRICS.with_borrow_mut(|local_metrics| { - for (_, metrics) in local_metrics.iter_mut() { + for metrics in local_metrics.values_mut() { if metrics.collect_count > 0 { metrics.flush(); metrics.collect_count = 0; @@ -514,7 +514,7 @@ impl Drop for GetLocalMetricsGuard { }); self.local_stats.report(table_metrics); self.local_stats - .report_bloom_filter_metrics(&mut table_metrics.get_filter_metrics); + .report_bloom_filter_metrics(&table_metrics.get_filter_metrics); }); } } @@ -552,7 +552,7 @@ impl Drop for IterLocalMetricsGuard { }); self.local_stats.report(table_metrics); self.local_stats - .report_bloom_filter_metrics(&mut table_metrics.iter_filter_metrics); + .report_bloom_filter_metrics(&table_metrics.iter_filter_metrics); }); } } @@ -586,7 +586,7 @@ impl Drop for MayExistLocalMetricsGuard { }); self.local_stats.report(table_metrics); self.local_stats - .report_bloom_filter_metrics(&mut table_metrics.may_exist_filter_metrics); + .report_bloom_filter_metrics(&table_metrics.may_exist_filter_metrics); }); } } diff --git a/src/storage/src/monitor/monitored_store.rs b/src/storage/src/monitor/monitored_store.rs index 19a7055a657a0..77924a999709d 100644 --- a/src/storage/src/monitor/monitored_store.rs +++ b/src/storage/src/monitor/monitored_store.rs @@ -79,19 +79,18 @@ pub(crate) fn identity(input: impl StateStoreIterItemStream) -> impl StateStoreI input } -pub type MonitoredStateStoreIterStream<'s, S: StateStoreIterItemStream + 's> = - impl StateStoreIterItemStream + 's; +pub type MonitoredStateStoreIterStream = impl StateStoreIterItemStream; // Note: it is important to define the `MonitoredStateStoreIterStream` type alias, as it marks that // the return type of `monitored_iter` only captures the lifetime `'s` and has nothing to do with // `'a`. If we simply use `impl StateStoreIterItemStream + 's`, the rust compiler will also capture // the lifetime `'a` in the scope defined in the scope. impl MonitoredStateStore { - async fn monitored_iter<'a, 's, St: StateStoreIterItemStream + 's>( + async fn monitored_iter<'a, St: StateStoreIterItemStream + 'a>( &'a self, table_id: TableId, iter_stream_future: impl Future> + 'a, - ) -> StorageResult> { + ) -> StorageResult> { // start time takes iterator build time into account let start_time = Instant::now(); let table_id_label = table_id.to_string(); @@ -391,7 +390,7 @@ impl MonitoredStateStoreIter { drop(stats); } - fn into_stream(self) -> impl StateStoreIterItemStream { + fn into_stream(self) -> MonitoredStateStoreIterStream { Self::into_stream_inner(self).instrument(tracing::trace_span!("store_iter")) } } diff --git a/src/storage/src/monitor/traced_store.rs b/src/storage/src/monitor/traced_store.rs index 79f43876ac2e0..505c0460552a7 100644 --- a/src/storage/src/monitor/traced_store.rs +++ b/src/storage/src/monitor/traced_store.rs @@ -65,11 +65,11 @@ impl TracedStateStore { } } - async fn traced_iter<'a, 's, St: StateStoreIterItemStream + 's>( + async fn traced_iter<'a, St: StateStoreIterItemStream>( &'a self, iter_stream_future: impl Future> + 'a, span: MayTraceSpan, - ) -> StorageResult> { + ) -> StorageResult> { let res = iter_stream_future.await; if res.is_ok() { span.may_send_result(OperationResult::Iter(TraceResult::Ok(()))); @@ -104,8 +104,7 @@ impl TracedStateStore { } } -type TracedStateStoreIterStream<'s, S: StateStoreIterItemStream + 's> = - impl StateStoreIterItemStream + 's; +type TracedStateStoreIterStream = impl StateStoreIterItemStream; impl LocalStateStore for TracedStateStore { type IterStream<'a> = impl StateStoreIterItemStream + 'a; @@ -349,7 +348,7 @@ impl TracedStateStoreIter { } } - fn into_stream(self) -> impl StateStoreIterItemStream { + fn into_stream(self) -> TracedStateStoreIterStream { Self::into_stream_inner(self) } } diff --git a/src/storage/src/table/batch_table/storage_table.rs b/src/storage/src/table/batch_table/storage_table.rs index b98336d37b1e5..dc386a719ec22 100644 --- a/src/storage/src/table/batch_table/storage_table.rs +++ b/src/storage/src/table/batch_table/storage_table.rs @@ -449,7 +449,6 @@ impl StorageTableInner { // For each key range, construct an iterator. let iterators: Vec<_> = try_join_all(raw_key_ranges.map(|raw_key_range| { let prefix_hint = prefix_hint.clone(); - let wait_epoch = wait_epoch; let read_backup = matches!(wait_epoch, HummockReadEpoch::Backup(_)); async move { let read_options = ReadOptions { diff --git a/src/stream/Cargo.toml b/src/stream/Cargo.toml index 940b5191a4914..79db63474cfd4 100644 --- a/src/stream/Cargo.toml +++ b/src/stream/Cargo.toml @@ -95,3 +95,6 @@ harness = false [[bench]] name = "bench_state_table" harness = false + +[lints] +workspace = true diff --git a/src/stream/benches/stream_hash_agg.rs b/src/stream/benches/stream_hash_agg.rs index 77315a9556ee3..a5392f011afbb 100644 --- a/src/stream/benches/stream_hash_agg.rs +++ b/src/stream/benches/stream_hash_agg.rs @@ -37,7 +37,7 @@ fn bench_hash_agg(c: &mut Criterion) { group.bench_function("benchmark_hash_agg", |b| { b.to_async(&rt).iter_batched( || setup_bench_hash_agg(MemoryStateStore::new()), - |e| execute_executor(e), + execute_executor, BatchSize::SmallInput, ) }); diff --git a/src/stream/src/common/log_store/in_mem.rs b/src/stream/src/common/log_store/in_mem.rs index f387d971e8ffe..7d136a64a8257 100644 --- a/src/stream/src/common/log_store/in_mem.rs +++ b/src/stream/src/common/log_store/in_mem.rs @@ -95,7 +95,6 @@ impl LogStoreFactory for BoundedInMemLogStoreFactory { type Reader = BoundedInMemLogStoreReader; type Writer = BoundedInMemLogStoreWriter; - #[expect(clippy::unused_async)] async fn build(self) -> (Self::Reader, Self::Writer) { let (init_epoch_tx, init_epoch_rx) = oneshot::channel(); let (item_tx, item_rx) = channel(self.bound); @@ -164,7 +163,6 @@ impl LogReader for BoundedInMemLogStoreReader { } } - #[expect(clippy::unused_async)] async fn truncate(&mut self) -> LogStoreResult<()> { let sealed_epoch = match self.epoch_progress { Consuming(_) => unreachable!("should be awaiting truncate"), @@ -184,7 +182,6 @@ impl LogReader for BoundedInMemLogStoreReader { } impl LogWriter for BoundedInMemLogStoreWriter { - #[expect(clippy::unused_async)] async fn init(&mut self, epoch: EpochPair) -> LogStoreResult<()> { let init_epoch_tx = self.init_epoch_tx.take().expect("cannot be init for twice"); init_epoch_tx diff --git a/src/stream/src/common/log_store/kv_log_store/reader.rs b/src/stream/src/common/log_store/kv_log_store/reader.rs index ffed40d9ce47b..f31235c42da0b 100644 --- a/src/stream/src/common/log_store/kv_log_store/reader.rs +++ b/src/stream/src/common/log_store/kv_log_store/reader.rs @@ -195,7 +195,6 @@ impl LogReader for KvLogStoreReader { }) } - #[expect(clippy::unused_async)] async fn truncate(&mut self) -> LogStoreResult<()> { self.rx.truncate(); Ok(()) diff --git a/src/stream/src/common/log_store/kv_log_store/serde.rs b/src/stream/src/common/log_store/kv_log_store/serde.rs index 472451e007475..15825e9f275e6 100644 --- a/src/stream/src/common/log_store/kv_log_store/serde.rs +++ b/src/stream/src/common/log_store/kv_log_store/serde.rs @@ -757,7 +757,7 @@ mod tests { let (tx, rx) = oneshot::channel(); let row_data = ops .into_iter() - .zip_eq(rows.into_iter()) + .zip_eq(rows) .map(|(op, row)| { let (_, key, value) = serde.serialize_data_row(epoch, *seq_id, op, row); *seq_id += 1; diff --git a/src/stream/src/common/log_store/kv_log_store/test_utils.rs b/src/stream/src/common/log_store/kv_log_store/test_utils.rs index 6961bb805a45f..8eb3a82fb742d 100644 --- a/src/stream/src/common/log_store/kv_log_store/test_utils.rs +++ b/src/stream/src/common/log_store/kv_log_store/test_utils.rs @@ -61,7 +61,7 @@ pub(crate) fn test_log_store_table_schema() -> Vec { ColumnDesc::unnamed(ColumnId::from(1), DataType::Int32), // Seq id ColumnDesc::unnamed(ColumnId::from(2), DataType::Int16), // op code ]; - column_descs.extend(test_payload_schema().into_iter()); + column_descs.extend(test_payload_schema()); column_descs } diff --git a/src/stream/src/common/log_store/kv_log_store/writer.rs b/src/stream/src/common/log_store/kv_log_store/writer.rs index 2b46177be3985..54d7db38b8570 100644 --- a/src/stream/src/common/log_store/kv_log_store/writer.rs +++ b/src/stream/src/common/log_store/kv_log_store/writer.rs @@ -133,7 +133,6 @@ impl LogWriter for KvLogStoreWriter { Ok(()) } - #[expect(clippy::unused_async)] async fn update_vnode_bitmap(&mut self, new_vnodes: Arc) -> LogStoreResult<()> { self.serde.update_vnode_bitmap(new_vnodes.clone()); self.tx.update_vnode(self.state_store.epoch(), new_vnodes); diff --git a/src/stream/src/common/table/test_state_table.rs b/src/stream/src/common/table/test_state_table.rs index 7131d5f4b6029..c3e5759a47ae6 100644 --- a/src/stream/src/common/table/test_state_table.rs +++ b/src/stream/src/common/table/test_state_table.rs @@ -213,7 +213,7 @@ async fn test_state_table_iter_with_prefix() { // let pk_columns = vec![0, 1]; leave a message to indicate pk columns let order_types = vec![OrderType::ascending(), OrderType::descending()]; - let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -341,7 +341,7 @@ async fn test_state_table_iter_with_pk_range() { // let pk_columns = vec![0, 1]; leave a message to indicate pk columns let order_types = vec![OrderType::ascending(), OrderType::descending()]; - let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), // This is the range prefix key ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -516,7 +516,7 @@ async fn test_state_table_iter_with_value_indices() { let test_env = prepare_hummock_test_env().await; let order_types = vec![OrderType::ascending(), OrderType::descending()]; - let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -677,7 +677,7 @@ async fn test_state_table_iter_with_shuffle_value_indices() { let test_env = prepare_hummock_test_env().await; let order_types = vec![OrderType::ascending(), OrderType::descending()]; - let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -1278,7 +1278,7 @@ async fn test_state_table_may_exist() { // let pk_columns = vec![0, 1]; leave a message to indicate pk columns let order_types = vec![OrderType::ascending(), OrderType::descending()]; - let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), diff --git a/src/stream/src/common/table/test_storage_table.rs b/src/stream/src/common/table/test_storage_table.rs index 77cb3708489f1..dc72e33ed5033 100644 --- a/src/stream/src/common/table/test_storage_table.rs +++ b/src/stream/src/common/table/test_storage_table.rs @@ -35,7 +35,7 @@ async fn test_storage_table_value_indices() { const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; let test_env = prepare_hummock_test_env().await; - let column_ids = vec![ + let column_ids = [ ColumnId::from(0), ColumnId::from(1), ColumnId::from(2), @@ -172,7 +172,7 @@ async fn test_shuffled_column_id_for_storage_table_get_row() { const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; let test_env = prepare_hummock_test_env().await; - let column_ids = vec![ColumnId::from(3), ColumnId::from(2), ColumnId::from(1)]; + let column_ids = [ColumnId::from(3), ColumnId::from(2), ColumnId::from(1)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -274,7 +274,7 @@ async fn test_row_based_storage_table_point_get_in_batch_mode() { const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; let test_env = prepare_hummock_test_env().await; - let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; + let column_ids = [ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -377,7 +377,7 @@ async fn test_batch_scan_with_value_indices() { let test_env = prepare_hummock_test_env().await; let order_types = vec![OrderType::ascending(), OrderType::descending()]; - let column_ids = vec![ + let column_ids = [ ColumnId::from(0), ColumnId::from(1), ColumnId::from(2), diff --git a/src/stream/src/error.rs b/src/stream/src/error.rs index c2b2e79cf0f51..b737de4d2560b 100644 --- a/src/stream/src/error.rs +++ b/src/stream/src/error.rs @@ -68,7 +68,9 @@ impl std::fmt::Debug for StreamError { write!(f, "{}", self.inner.kind)?; writeln!(f)?; - if let Some(backtrace) = (&self.inner.kind as &dyn Error).request_ref::() { + if let Some(backtrace) = + std::error::request_ref::(&self.inner.kind as &dyn Error) + { write!(f, " backtrace of inner error:\n{}", backtrace)?; } else { write!(f, " backtrace of `StreamError`:\n{}", self.inner.backtrace)?; diff --git a/src/stream/src/executor/aggregation/distinct.rs b/src/stream/src/executor/aggregation/distinct.rs index 71b827b546604..dd5905c342710 100644 --- a/src/stream/src/executor/aggregation/distinct.rs +++ b/src/stream/src/executor/aggregation/distinct.rs @@ -60,7 +60,6 @@ impl ColumnDeduplicater { group_key: Option<&GroupKey>, ctx: ActorContextRef, ) -> StreamExecutorResult<()> { - let column = column; let n_calls = visibilities.len(); let mut prev_counts_map = HashMap::new(); // also serves as changeset @@ -189,7 +188,7 @@ impl ColumnDeduplicater { } /// Flush the deduplication table. - fn flush(&mut self, dedup_table: &mut StateTable, ctx: ActorContextRef) { + fn flush(&mut self, dedup_table: &StateTable, ctx: ActorContextRef) { // TODO(rc): now we flush the table in `dedup` method. // WARN: if you want to change to batching the write to table. please remember to change // `self.cache.evict()` too. diff --git a/src/stream/src/executor/backfill/arrangement_backfill.rs b/src/stream/src/executor/backfill/arrangement_backfill.rs index 8adb82e06c539..d33aed6d6c441 100644 --- a/src/stream/src/executor/backfill/arrangement_backfill.rs +++ b/src/stream/src/executor/backfill/arrangement_backfill.rs @@ -428,7 +428,7 @@ where barrier.epoch, &mut self.state_table, false, - &mut backfill_state, + &backfill_state, &mut committed_progress, &mut temporary_state, ) @@ -468,7 +468,7 @@ where barrier.epoch, &mut self.state_table, false, - &mut backfill_state, + &backfill_state, &mut committed_progress, &mut temporary_state, ).await?; diff --git a/src/stream/src/executor/backfill/utils.rs b/src/stream/src/executor/backfill/utils.rs index 80f65e28aa8d5..6c49be9e607a1 100644 --- a/src/stream/src/executor/backfill/utils.rs +++ b/src/stream/src/executor/backfill/utils.rs @@ -335,6 +335,9 @@ pub(crate) async fn check_all_vnode_finished( table: &mut StateTableInner, epoch: EpochPair, @@ -502,7 +505,7 @@ pub(crate) async fn persist_state_per_vnode, is_finished: bool, - backfill_state: &mut BackfillState, + backfill_state: &BackfillState, committed_progress: &mut HashMap>, temporary_state: &mut [Datum], ) -> StreamExecutorResult<()> { diff --git a/src/stream/src/executor/dispatch.rs b/src/stream/src/executor/dispatch.rs index 2e810b9064642..17b8866543c6f 100644 --- a/src/stream/src/executor/dispatch.rs +++ b/src/stream/src/executor/dispatch.rs @@ -528,12 +528,12 @@ impl Dispatcher for RoundRobinDataDispatcher { } fn add_outputs(&mut self, outputs: impl IntoIterator) { - self.outputs.extend(outputs.into_iter()); + self.outputs.extend(outputs); } fn remove_outputs(&mut self, actor_ids: &HashSet) { self.outputs - .drain_filter(|output| actor_ids.contains(&output.actor_id())) + .extract_if(|output| actor_ids.contains(&output.actor_id())) .count(); self.cur = self.cur.min(self.outputs.len() - 1); } @@ -589,7 +589,7 @@ impl Dispatcher for HashDataDispatcher { define_dispatcher_associated_types!(); fn add_outputs(&mut self, outputs: impl IntoIterator) { - self.outputs.extend(outputs.into_iter()); + self.outputs.extend(outputs); } fn dispatch_barrier(&mut self, barrier: Barrier) -> Self::BarrierFuture<'_> { @@ -696,7 +696,7 @@ impl Dispatcher for HashDataDispatcher { fn remove_outputs(&mut self, actor_ids: &HashSet) { self.outputs - .drain_filter(|output| actor_ids.contains(&output.actor_id())) + .extract_if(|output| actor_ids.contains(&output.actor_id())) .count(); } @@ -779,7 +779,7 @@ impl Dispatcher for BroadcastDispatcher { fn remove_outputs(&mut self, actor_ids: &HashSet) { self.outputs - .drain_filter(|actor_id, _| actor_ids.contains(actor_id)) + .extract_if(|actor_id, _| actor_ids.contains(actor_id)) .count(); } @@ -838,7 +838,7 @@ impl Dispatcher for SimpleDispatcher { fn dispatch_barrier(&mut self, barrier: Barrier) -> Self::BarrierFuture<'_> { async move { // Only barrier is allowed to be dispatched to multiple outputs during migration. - for output in self.output.iter_mut() { + for output in &mut self.output { output.send(Message::Barrier(barrier.clone())).await?; } Ok(()) @@ -1222,7 +1222,7 @@ mod tests { let hash_builder = Crc32FastBuilder; let mut hasher = hash_builder.build_hasher(); let one_row = (0..dimension).map(|_| start.next().unwrap()).collect_vec(); - for key_idx in key_indices.iter() { + for key_idx in key_indices { let val = one_row[*key_idx]; let bytes = val.to_le_bytes(); hasher.update(&bytes); diff --git a/src/stream/src/executor/error.rs b/src/stream/src/executor/error.rs index 8f3dc84f8e752..32d7ee8479110 100644 --- a/src/stream/src/executor/error.rs +++ b/src/stream/src/executor/error.rs @@ -124,7 +124,9 @@ impl std::fmt::Debug for StreamExecutorError { write!(f, "{}", self.inner.kind)?; writeln!(f)?; - if let Some(backtrace) = (&self.inner.kind as &dyn Error).request_ref::() { + if let Some(backtrace) = + std::error::request_ref::(&self.inner.kind as &dyn Error) + { write!(f, " backtrace of inner error:\n{}", backtrace)?; } else { write!( diff --git a/src/stream/src/executor/hash_agg.rs b/src/stream/src/executor/hash_agg.rs index f3bb799865376..610c2857728b3 100644 --- a/src/stream/src/executor/hash_agg.rs +++ b/src/stream/src/executor/hash_agg.rs @@ -271,7 +271,7 @@ impl HashAggExecutor { } async fn ensure_keys_in_cache( - this: &mut ExecutorInner, + this: &ExecutorInner, cache: &mut AggGroupCache, keys: impl IntoIterator, stats: &mut ExecutionStats, diff --git a/src/stream/src/executor/hop_window.rs b/src/stream/src/executor/hop_window.rs index ae67237dbb971..aa1840aa832ce 100644 --- a/src/stream/src/executor/hop_window.rs +++ b/src/stream/src/executor/hop_window.rs @@ -195,7 +195,7 @@ impl HopWindowExecutor { let data_types = chunks[0].data_types(); let mut chunk_builder = StreamChunkBuilder::new(chunk_size, data_types); - for &op in ops.iter() { + for &op in &*ops { // Since there could be multiple rows for the same input row, we need to // transform the `U-`/`U+` into `-`/`+` and then duplicate it. let op = match op { diff --git a/src/stream/src/executor/lookup/cache.rs b/src/stream/src/executor/lookup/cache.rs index 48d01f2755a22..eed16d52aad40 100644 --- a/src/stream/src/executor/lookup/cache.rs +++ b/src/stream/src/executor/lookup/cache.rs @@ -117,7 +117,7 @@ impl LookupEntryState { fn new(value: VecWithKvSize) -> Self { let kv_heap_size = value.get_kv_size(); Self { - inner: HashSet::from_iter(value.into_iter()), + inner: HashSet::from_iter(value), kv_heap_size: KvSize::with_size(kv_heap_size), } } diff --git a/src/stream/src/executor/managed_state/join/mod.rs b/src/stream/src/executor/managed_state/join/mod.rs index f811d661d364a..7ee23c06a5631 100644 --- a/src/stream/src/executor/managed_state/join/mod.rs +++ b/src/stream/src/executor/managed_state/join/mod.rs @@ -131,7 +131,7 @@ impl EstimateSize for HashValueWrapper { } impl HashValueWrapper { - const MESSAGE: &str = "the state should always be `Some`"; + const MESSAGE: &'static str = "the state should always be `Some`"; /// Take the value out of the wrapper. Panic if the value is `None`. pub fn take(&mut self) -> HashValueType { diff --git a/src/stream/src/executor/mod.rs b/src/stream/src/executor/mod.rs index 653c2c471a30d..8fa7a5d818cc4 100644 --- a/src/stream/src/executor/mod.rs +++ b/src/stream/src/executor/mod.rs @@ -26,7 +26,7 @@ use risingwave_common::array::StreamChunk; use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::Schema; use risingwave_common::row::OwnedRow; -use risingwave_common::types::{DataType, DefaultOrd, DefaultPartialOrd, ScalarImpl}; +use risingwave_common::types::{DataType, DefaultOrd, ScalarImpl}; use risingwave_common::util::epoch::{Epoch, EpochPair}; use risingwave_common::util::tracing::TracingContext; use risingwave_common::util::value_encoding::{deserialize_datum, serialize_datum}; @@ -627,11 +627,7 @@ pub struct Watermark { impl PartialOrd for Watermark { fn partial_cmp(&self, other: &Self) -> Option { - if self.col_idx == other.col_idx { - self.val.default_partial_cmp(&other.val) - } else { - None - } + Some(self.cmp(other)) } } diff --git a/src/stream/src/executor/mview/test_utils.rs b/src/stream/src/executor/mview/test_utils.rs index 72dd393cb25cd..215ba837a8d44 100644 --- a/src/stream/src/executor/mview/test_utils.rs +++ b/src/stream/src/executor/mview/test_utils.rs @@ -26,7 +26,7 @@ pub async fn gen_basic_table(row_count: usize) -> StorageTable let state_store = MemoryStateStore::new(); let order_types = vec![OrderType::ascending(), OrderType::descending()]; - let column_ids = vec![0.into(), 1.into(), 2.into()]; + let column_ids = [0.into(), 1.into(), 2.into()]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), diff --git a/src/stream/src/executor/over_window/eowc.rs b/src/stream/src/executor/over_window/eowc.rs index 1c955f9b0ce5a..22553641369c1 100644 --- a/src/stream/src/executor/over_window/eowc.rs +++ b/src/stream/src/executor/over_window/eowc.rs @@ -181,7 +181,7 @@ impl EowcOverWindowExecutor { } async fn ensure_key_in_cache( - this: &mut ExecutorInner, + this: &ExecutorInner, cache: &mut PartitionCache, partition_key: impl Row, encoded_partition_key: &MemcmpEncoded, diff --git a/src/stream/src/executor/over_window/general.rs b/src/stream/src/executor/over_window/general.rs index ba995f7e7c3cb..091e199d7b52a 100644 --- a/src/stream/src/executor/over_window/general.rs +++ b/src/stream/src/executor/over_window/general.rs @@ -307,7 +307,7 @@ impl OverWindowExecutor { match record { Record::Insert { new_row } => { let part_key = this.get_partition_key(new_row).into(); - let part_delta = deltas.entry(part_key).or_insert(PartitionDelta::new()); + let part_delta = deltas.entry(part_key).or_default(); part_delta.insert( this.row_to_cache_key(new_row)?, Change::Insert(new_row.into_owned_row()), @@ -315,7 +315,7 @@ impl OverWindowExecutor { } Record::Delete { old_row } => { let part_key = this.get_partition_key(old_row).into(); - let part_delta = deltas.entry(part_key).or_insert(PartitionDelta::new()); + let part_delta = deltas.entry(part_key).or_default(); part_delta.insert(this.row_to_cache_key(old_row)?, Change::Delete); } Record::Update { old_row, new_row } => { @@ -325,15 +325,13 @@ impl OverWindowExecutor { let new_state_key = this.row_to_cache_key(new_row)?; if old_part_key == new_part_key && old_state_key == new_state_key { // not a key-change update - let part_delta = - deltas.entry(old_part_key).or_insert(PartitionDelta::new()); + let part_delta = deltas.entry(old_part_key).or_default(); part_delta.insert(old_state_key, Change::Insert(new_row.into_owned_row())); } else if old_part_key == new_part_key { // order-change update, split into delete + insert, will be merged after // building changes key_change_updated_pks.insert(this.get_input_pk(old_row)); - let part_delta = - deltas.entry(old_part_key).or_insert(PartitionDelta::new()); + let part_delta = deltas.entry(old_part_key).or_default(); part_delta.insert(old_state_key, Change::Delete); part_delta.insert(new_state_key, Change::Insert(new_row.into_owned_row())); } else { @@ -341,11 +339,9 @@ impl OverWindowExecutor { // NOTE(rc): Since we append partition key to logical pk, we can't merge the // delete + insert back to update later. // TODO: IMO this behavior is problematic. Deep discussion is needed. - let old_part_delta = - deltas.entry(old_part_key).or_insert(PartitionDelta::new()); + let old_part_delta = deltas.entry(old_part_key).or_default(); old_part_delta.insert(old_state_key, Change::Delete); - let new_part_delta = - deltas.entry(new_part_key).or_insert(PartitionDelta::new()); + let new_part_delta = deltas.entry(new_part_key).or_default(); new_part_delta .insert(new_state_key, Change::Insert(new_row.into_owned_row())); } diff --git a/src/stream/src/executor/over_window/over_partition.rs b/src/stream/src/executor/over_window/over_partition.rs index 28d7ce7d2ef99..ab785acd9b681 100644 --- a/src/stream/src/executor/over_window/over_partition.rs +++ b/src/stream/src/executor/over_window/over_partition.rs @@ -1083,7 +1083,7 @@ mod find_affected_ranges_tests { ) { result .into_iter() - .zip_eq(expected.into_iter()) + .zip_eq(expected) .for_each(|(result, expected)| { assert_eq!( result.0.as_normal_expect().pk.0, diff --git a/src/stream/src/executor/source/source_executor.rs b/src/stream/src/executor/source/source_executor.rs index c833b3179182d..7f8cd9a66c6a9 100644 --- a/src/stream/src/executor/source/source_executor.rs +++ b/src/stream/src/executor/source/source_executor.rs @@ -257,7 +257,7 @@ impl SourceExecutor { // fetch the newest offset, either it's in cache (before barrier) // or in state table (just after barrier) let target_state = if core.state_cache.is_empty() { - for ele in split_info.iter_mut() { + for ele in &mut *split_info { if let Some(recover_state) = core .split_state_store .try_recover_from_state_store(ele) @@ -322,7 +322,7 @@ impl SourceExecutor { let dropped_splits = core .stream_source_splits - .drain_filter(|split_id, _| !target_split_ids.contains(split_id)) + .extract_if(|split_id, _| !target_split_ids.contains(split_id)) .map(|(_, split)| split) .collect_vec(); diff --git a/src/stream/src/executor/temporal_join.rs b/src/stream/src/executor/temporal_join.rs index 6f82ca6e75adf..1372ad808eab0 100644 --- a/src/stream/src/executor/temporal_join.rs +++ b/src/stream/src/executor/temporal_join.rs @@ -115,7 +115,7 @@ impl EstimateSize for JoinEntryWrapper { } impl JoinEntryWrapper { - const MESSAGE: &str = "the state should always be `Some`"; + const MESSAGE: &'static str = "the state should always be `Some`"; /// Take the value out of the wrapper. Panic if the value is `None`. pub fn take(&mut self) -> JoinEntry { diff --git a/src/stream/src/executor/top_n/top_n_cache.rs b/src/stream/src/executor/top_n/top_n_cache.rs index 88f5b06ac2a04..b8275eba52b16 100644 --- a/src/stream/src/executor/top_n/top_n_cache.rs +++ b/src/stream/src/executor/top_n/top_n_cache.rs @@ -527,8 +527,7 @@ impl TopNCacheTrait for TopNCache { // We need to trigger insert for all rows with prefix `high_first_order_by` // in high cache. - for (ordered_pk_row, row) in - self.high.drain_filter(|k, _| k.0 == high_first_order_by) + for (ordered_pk_row, row) in self.high.extract_if(|k, _| k.0 == high_first_order_by) { if ordered_pk_row.0 != high_first_order_by { break; diff --git a/src/stream/src/executor/top_n/topn_cache_state.rs b/src/stream/src/executor/top_n/topn_cache_state.rs index 19c32ac3dd4f6..c6d5eeb363dd5 100644 --- a/src/stream/src/executor/top_n/topn_cache_state.rs +++ b/src/stream/src/executor/top_n/topn_cache_state.rs @@ -14,8 +14,7 @@ use core::fmt; use std::alloc::Global; -use std::collections::btree_map::{DrainFilter, OccupiedEntry, Range}; -use std::collections::BTreeMap; +use std::collections::btree_map::{BTreeMap, ExtractIf, OccupiedEntry, Range}; use std::ops::RangeBounds; use risingwave_common::estimate_size::{EstimateSize, KvSize}; @@ -109,11 +108,11 @@ impl TopNCacheState { self.inner.range(range) } - pub fn drain_filter(&mut self, pred: F) -> DrainFilter<'_, CacheKey, CompactedRow, F, Global> + pub fn extract_if(&mut self, pred: F) -> ExtractIf<'_, CacheKey, CompactedRow, F, Global> where F: FnMut(&CacheKey, &mut CompactedRow) -> bool, { - self.inner.drain_filter(pred) + self.inner.extract_if(pred) } pub fn retain(&mut self, f: F) diff --git a/src/stream/src/lib.rs b/src/stream/src/lib.rs index e57a87d059710..db1a3fe7819b6 100644 --- a/src/stream/src/lib.rs +++ b/src/stream/src/lib.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![allow(rustdoc::private_intra_doc_links)] #![allow(clippy::derive_partial_eq_without_eq)] #![feature(iterator_try_collect)] #![feature(trait_alias)] @@ -21,8 +20,8 @@ #![feature(lint_reasons)] #![feature(binary_heap_drain_sorted)] #![feature(let_chains)] -#![feature(hash_drain_filter)] -#![feature(drain_filter)] +#![feature(hash_extract_if)] +#![feature(extract_if)] #![feature(generators)] #![feature(iter_from_generator)] #![feature(proc_macro_hygiene)] @@ -34,8 +33,7 @@ #![feature(btreemap_alloc)] #![feature(lazy_cell)] #![feature(error_generic_member_access)] -#![feature(provide_any)] -#![feature(btree_drain_filter)] +#![feature(btree_extract_if)] #![feature(bound_map)] #![feature(iter_order_by)] #![feature(exact_size_is_empty)] diff --git a/src/stream/src/task/stream_manager.rs b/src/stream/src/task/stream_manager.rs index a537244965fb3..2abc8212e2984 100644 --- a/src/stream/src/task/stream_manager.rs +++ b/src/stream/src/task/stream_manager.rs @@ -462,6 +462,9 @@ impl LocalStreamManagerCore { } /// Create a chain(tree) of nodes, with given `store`. + // This is a clippy bug, see https://github.com/rust-lang/rust-clippy/issues/11380. + // TODO: remove `allow` here after the issued is closed. + #[expect(clippy::needless_pass_by_ref_mut)] #[allow(clippy::too_many_arguments)] #[async_recursion] async fn create_nodes_inner( diff --git a/src/test_runner/Cargo.toml b/src/test_runner/Cargo.toml index 8e9ee88194e18..f5ed8b05dc03a 100644 --- a/src/test_runner/Cargo.toml +++ b/src/test_runner/Cargo.toml @@ -18,3 +18,6 @@ normal = ["workspace-hack"] fail = "0.5" sync-point = { path = "../utils/sync-point" } workspace-hack = { path = "../workspace-hack" } + +[lints] +workspace = true diff --git a/src/tests/compaction_test/Cargo.toml b/src/tests/compaction_test/Cargo.toml index 447902073c0e7..dd3e5d0a53699 100644 --- a/src/tests/compaction_test/Cargo.toml +++ b/src/tests/compaction_test/Cargo.toml @@ -53,3 +53,6 @@ path = "src/bin/compaction.rs" [[bin]] name = "delete-range-test" path = "src/bin/delete_range.rs" + +[lints] +workspace = true diff --git a/src/tests/compaction_test/src/compaction_test_runner.rs b/src/tests/compaction_test/src/compaction_test_runner.rs index 410bafc05be24..db248ad788fba 100644 --- a/src/tests/compaction_test/src/compaction_test_runner.rs +++ b/src/tests/compaction_test/src/compaction_test_runner.rs @@ -410,7 +410,7 @@ async fn start_replay( replayed_epochs.pop(); let mut epochs = vec![max_committed_epoch]; epochs.extend( - pin_old_snapshots(&meta_client, &mut replayed_epochs, 1) + pin_old_snapshots(&meta_client, &replayed_epochs, 1) .await .into_iter(), ); @@ -521,7 +521,7 @@ async fn start_replay( async fn pin_old_snapshots( meta_client: &MetaClient, - replayed_epochs: &mut [HummockEpoch], + replayed_epochs: &[HummockEpoch], num: usize, ) -> Vec { let mut old_epochs = vec![]; @@ -625,7 +625,7 @@ async fn open_hummock_iters( ))), ); - for &epoch in snapshots.iter() { + for &epoch in snapshots { let iter = hummock .iter( range.clone(), diff --git a/src/tests/e2e_extended_mode/Cargo.toml b/src/tests/e2e_extended_mode/Cargo.toml index 7e89105287602..ea83c5069c774 100644 --- a/src/tests/e2e_extended_mode/Cargo.toml +++ b/src/tests/e2e_extended_mode/Cargo.toml @@ -27,3 +27,6 @@ tracing-subscriber = "0.3.17" [[bin]] name = "risingwave_e2e_extended_mode_test" path = "src/main.rs" + +[lints] +workspace = true diff --git a/src/tests/e2e_extended_mode/src/test.rs b/src/tests/e2e_extended_mode/src/test.rs index 46f6bff8aec05..21fcf1f146787 100644 --- a/src/tests/e2e_extended_mode/src/test.rs +++ b/src/tests/e2e_extended_mode/src/test.rs @@ -512,7 +512,7 @@ impl TestSuite { let rows = new_client .query(&format!("{} LIMIT 10", query_sql), &[]) .await?; - let expect_ans = vec![ + let expect_ans = [ (1, 1, 1), (10, 10, 10), (100, 100, 100), diff --git a/src/tests/libpq_test/Cargo.toml b/src/tests/libpq_test/Cargo.toml index 846a2e1059d66..813cd37ca90b6 100644 --- a/src/tests/libpq_test/Cargo.toml +++ b/src/tests/libpq_test/Cargo.toml @@ -10,3 +10,6 @@ edition = "2021" anyhow = "1" libpq = "3.0" clap = { version = "4", features = ["derive"] } + +[lints] +workspace = true diff --git a/src/tests/regress/Cargo.toml b/src/tests/regress/Cargo.toml index ea00e445e4be1..97b33bb0f2794 100644 --- a/src/tests/regress/Cargo.toml +++ b/src/tests/regress/Cargo.toml @@ -28,3 +28,6 @@ workspace-hack = { path = "../../workspace-hack" } [[bin]] name = "risingwave_regress_test" path = "src/bin/main.rs" + +[lints] +workspace = true diff --git a/src/tests/simulation/Cargo.toml b/src/tests/simulation/Cargo.toml index dd21ac0ac6949..e4f0d5c99bc82 100644 --- a/src/tests/simulation/Cargo.toml +++ b/src/tests/simulation/Cargo.toml @@ -51,3 +51,6 @@ tokio = { version = "0.2.23", package = "madsim-tokio" } tokio-postgres = "0.7" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/src/tests/simulation/tests/integration_tests/main.rs b/src/tests/simulation/tests/integration_tests/main.rs index 82ed948b39a51..d7c79a0ca37f3 100644 --- a/src/tests/simulation/tests/integration_tests/main.rs +++ b/src/tests/simulation/tests/integration_tests/main.rs @@ -19,7 +19,7 @@ #![feature(stmt_expr_attributes)] #![feature(lazy_cell)] -#![feature(drain_filter)] +#![feature(extract_if)] mod backfill_tests; mod batch; diff --git a/src/tests/simulation/tests/integration_tests/scale/plan.rs b/src/tests/simulation/tests/integration_tests/scale/plan.rs index d60991c2246c2..c7244dc826b42 100644 --- a/src/tests/simulation/tests/integration_tests/scale/plan.rs +++ b/src/tests/simulation/tests/integration_tests/scale/plan.rs @@ -131,7 +131,7 @@ async fn test_resize_single() -> Result<()> { .collect(); let prev_workers = workers - .drain_filter(|worker| { + .extract_if(|worker| { worker .parallel_units .iter() diff --git a/src/tests/sqlsmith/Cargo.toml b/src/tests/sqlsmith/Cargo.toml index 3bdcc91b9c6b3..57acbc8d94cca 100644 --- a/src/tests/sqlsmith/Cargo.toml +++ b/src/tests/sqlsmith/Cargo.toml @@ -53,3 +53,6 @@ enable_sqlsmith_unit_test = [] [[test]] name = "test_runner" harness = false + +[lints] +workspace = true diff --git a/src/tests/sqlsmith/src/reducer.rs b/src/tests/sqlsmith/src/reducer.rs index 5ee0afba5fc6b..4f5d6ff7f9c4f 100644 --- a/src/tests/sqlsmith/src/reducer.rs +++ b/src/tests/sqlsmith/src/reducer.rs @@ -13,8 +13,8 @@ // limitations under the License. //! Provides E2E Test runner functionality. - use std::collections::HashSet; +use std::fmt::Write; use anyhow::anyhow; use itertools::Itertools; @@ -86,8 +86,10 @@ fn shrink(sql: &str) -> Result { let sql = reduced_statements .iter() - .map(|s| format!("{s};\n")) - .collect::(); + .fold(String::new(), |mut output, s| { + let _ = writeln!(output, "{s};"); + output + }); Ok(sql) } diff --git a/src/tests/sqlsmith/src/runner.rs b/src/tests/sqlsmith/src/runner.rs index 63db1d72fdcb1..5efc793cdd95c 100644 --- a/src/tests/sqlsmith/src/runner.rs +++ b/src/tests/sqlsmith/src/runner.rs @@ -108,7 +108,7 @@ pub async fn generate( tracing::error!("Unrecoverable error encountered."); return; } - Ok(skipped) if skipped == 0 => { + Ok(0) => { generated_queries += 1; } _ => {} @@ -129,7 +129,7 @@ pub async fn generate( tracing::error!("Unrecoverable error encountered."); return; } - Ok(skipped) if skipped == 0 => { + Ok(0) => { generated_queries += 1; } _ => {} @@ -385,7 +385,7 @@ async fn test_stream_queries( } fn get_seed_table_sql(testdata: &str) -> String { - let seed_files = vec!["tpch.sql", "nexmark.sql", "alltypes.sql"]; + let seed_files = ["tpch.sql", "nexmark.sql", "alltypes.sql"]; seed_files .iter() .map(|filename| read_file_contents(format!("{}/{}", testdata, filename)).unwrap()) @@ -454,7 +454,7 @@ async fn drop_tables(mviews: &[Table], testdata: &str, client: &Client) { drop_mview_table(mview, client).await; } - let seed_files = vec!["drop_tpch.sql", "drop_nexmark.sql", "drop_alltypes.sql"]; + let seed_files = ["drop_tpch.sql", "drop_nexmark.sql", "drop_alltypes.sql"]; let sql = seed_files .iter() .map(|filename| read_file_contents(format!("{}/{}", testdata, filename)).unwrap()) diff --git a/src/tests/sqlsmith/src/sql_gen/dml.rs b/src/tests/sqlsmith/src/sql_gen/dml.rs index 7fc79e2660aee..740054df8b447 100644 --- a/src/tests/sqlsmith/src/sql_gen/dml.rs +++ b/src/tests/sqlsmith/src/sql_gen/dml.rs @@ -109,7 +109,7 @@ impl<'a, R: Rng + 'a> SqlGenerator<'a, R> { }; delete_statements .into_iter() - .chain(insert_statements.into_iter()) + .chain(insert_statements) .collect() } else { let value_indices = (0..table.columns.len()) diff --git a/src/tests/sqlsmith/tests/frontend/mod.rs b/src/tests/sqlsmith/tests/frontend/mod.rs index ddce2783158df..a0ab1d59cf58e 100644 --- a/src/tests/sqlsmith/tests/frontend/mod.rs +++ b/src/tests/sqlsmith/tests/frontend/mod.rs @@ -53,7 +53,7 @@ async fn handle(session: Arc, stmt: Statement, sql: &str) -> Result } fn get_seed_table_sql() -> String { - let seed_files = vec![ + let seed_files = [ "tests/testdata/tpch.sql", "tests/testdata/nexmark.sql", "tests/testdata/alltypes.sql", diff --git a/src/tests/state_cleaning_test/Cargo.toml b/src/tests/state_cleaning_test/Cargo.toml index db9b0ae342790..2116e1d58659a 100644 --- a/src/tests/state_cleaning_test/Cargo.toml +++ b/src/tests/state_cleaning_test/Cargo.toml @@ -34,3 +34,6 @@ workspace-hack = { path = "../../workspace-hack" } [[bin]] name = "risingwave_state_cleaning_test" path = "src/bin/main.rs" + +[lints] +workspace = true diff --git a/src/udf/Cargo.toml b/src/udf/Cargo.toml index b82fad73cc60c..bad8f46a4c62d 100644 --- a/src/udf/Cargo.toml +++ b/src/udf/Cargo.toml @@ -20,3 +20,6 @@ static_assertions = "1" thiserror = "1" tokio = { version = "0.2", package = "madsim-tokio", features = ["rt", "macros"] } tonic = { workspace = true } + +[lints] +workspace = true diff --git a/src/utils/local_stats_alloc/Cargo.toml b/src/utils/local_stats_alloc/Cargo.toml index 42a2118a94e73..d80d3db38109c 100644 --- a/src/utils/local_stats_alloc/Cargo.toml +++ b/src/utils/local_stats_alloc/Cargo.toml @@ -19,3 +19,6 @@ ignored = ["workspace-hack"] [package.metadata.cargo-udeps.ignore] normal = ["workspace-hack"] + +[lints] +workspace = true diff --git a/src/utils/pgwire/Cargo.toml b/src/utils/pgwire/Cargo.toml index 4ca6b556bf281..cfa82c1393de8 100644 --- a/src/utils/pgwire/Cargo.toml +++ b/src/utils/pgwire/Cargo.toml @@ -34,3 +34,6 @@ workspace-hack = { path = "../../workspace-hack" } [dev-dependencies] tokio-postgres = "0.7" + +[lints] +workspace = true diff --git a/src/utils/pgwire/src/lib.rs b/src/utils/pgwire/src/lib.rs index 1d403314f5f4b..1cda373ee9568 100644 --- a/src/utils/pgwire/src/lib.rs +++ b/src/utils/pgwire/src/lib.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![feature(io_error_other)] #![feature(lint_reasons)] #![feature(trait_alias)] #![feature(result_option_inspect)] diff --git a/src/utils/pgwire/src/pg_extended.rs b/src/utils/pgwire/src/pg_extended.rs index 7a095bcded6ed..100c1828eb4f2 100644 --- a/src/utils/pgwire/src/pg_extended.rs +++ b/src/utils/pgwire/src/pg_extended.rs @@ -90,7 +90,7 @@ where .values_stream() .try_next() .await - .map_err(|err| PsqlError::ExecuteError(err))? + .map_err(PsqlError::ExecuteError)? { rows.into_iter() } else { diff --git a/src/utils/pgwire/src/pg_message.rs b/src/utils/pgwire/src/pg_message.rs index a02c68c06382e..408330a2df6ae 100644 --- a/src/utils/pgwire/src/pg_message.rs +++ b/src/utils/pgwire/src/pg_message.rs @@ -544,7 +544,7 @@ impl<'a> BeMessage<'a> { buf.put_u8(b'T'); write_body(buf, |buf| { buf.put_i16(row_descs.len() as i16); // # of fields - for pg_field in row_descs.iter() { + for pg_field in *row_descs { write_cstr(buf, pg_field.get_name().as_bytes())?; buf.put_i32(pg_field.get_table_oid()); // table oid buf.put_i16(pg_field.get_col_attr_num()); // attnum @@ -598,7 +598,7 @@ impl<'a> BeMessage<'a> { buf.put_u8(b't'); write_body(buf, |buf| { buf.put_i16(para_descs.len() as i16); - for oid in para_descs.iter() { + for oid in *para_descs { buf.put_i32(*oid); } Ok(()) diff --git a/src/utils/pgwire/src/pg_protocol.rs b/src/utils/pgwire/src/pg_protocol.rs index 66f61b6b5228d..ff705025a0d64 100644 --- a/src/utils/pgwire/src/pg_protocol.rs +++ b/src/utils/pgwire/src/pg_protocol.rs @@ -497,7 +497,7 @@ where self.stream .write_no_flush(&BeMessage::NoticeResponse(¬ice))?; } - let mut res = res.map_err(|err| PsqlError::QueryError(err))?; + let mut res = res.map_err(PsqlError::QueryError)?; for notice in res.notices() { self.stream @@ -518,7 +518,7 @@ where let mut rows_cnt = 0; while let Some(row_set) = res.values_stream().next().await { - let row_set = row_set.map_err(|err| PsqlError::QueryError(err))?; + let row_set = row_set.map_err(PsqlError::QueryError)?; for row in row_set { self.stream.write_no_flush(&BeMessage::DataRow(&row))?; rows_cnt += 1; @@ -629,7 +629,7 @@ where self.statement_portal_dependency .entry(statement_name) - .or_insert_with(Vec::new) + .or_default() .clear(); self.stream.write_no_flush(&BeMessage::ParseComplete)?; @@ -786,7 +786,7 @@ where for portal_name in self .statement_portal_dependency .remove(&name) - .unwrap_or(vec![]) + .unwrap_or_default() { self.remove_portal(&portal_name); } diff --git a/src/utils/pgwire/src/pg_server.rs b/src/utils/pgwire/src/pg_server.rs index e7cb351ab0c3c..ba52215e4d34a 100644 --- a/src/utils/pgwire/src/pg_server.rs +++ b/src/utils/pgwire/src/pg_server.rs @@ -232,7 +232,6 @@ mod tests { type PreparedStatement = String; type ValuesStream = BoxStream<'static, RowSetResult>; - #[expect(clippy::unused_async)] async fn run_one_query( self: Arc, _sql: Statement, @@ -270,7 +269,6 @@ mod tests { Ok(String::new()) } - #[expect(clippy::unused_async)] async fn execute( self: Arc, _portal: String, diff --git a/src/utils/runtime/Cargo.toml b/src/utils/runtime/Cargo.toml index 84c194341e141..6a11fce54ffb6 100644 --- a/src/utils/runtime/Cargo.toml +++ b/src/utils/runtime/Cargo.toml @@ -49,3 +49,6 @@ tracing-subscriber = { version = "0.3", features = ["fmt", "parking_lot", "std", [target.'cfg(not(madsim))'.dependencies] opentelemetry = { version = "0.20", default-features = false, features = ["rt-tokio"] } workspace-hack = { path = "../../workspace-hack" } + +[lints] +workspace = true diff --git a/src/utils/sync-point/Cargo.toml b/src/utils/sync-point/Cargo.toml index 050085ab47852..d228bfbe79a35 100644 --- a/src/utils/sync-point/Cargo.toml +++ b/src/utils/sync-point/Cargo.toml @@ -18,3 +18,6 @@ tokio = { version = "0.2", package = "madsim-tokio", features = ["sync", "time"] [features] sync_point = [] + +[lints] +workspace = true diff --git a/src/utils/task_stats_alloc/Cargo.toml b/src/utils/task_stats_alloc/Cargo.toml index a8442fc9c09d6..20d5ceed86729 100644 --- a/src/utils/task_stats_alloc/Cargo.toml +++ b/src/utils/task_stats_alloc/Cargo.toml @@ -27,3 +27,6 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [ [target.'cfg(loom)'.dependencies] loom = {version = "0.5", features = ["futures", "checkpoint"]} + +[lints] +workspace = true diff --git a/src/utils/variables/Cargo.toml b/src/utils/variables/Cargo.toml index f7c88f9ed5e4e..7bcc1b2d963c0 100644 --- a/src/utils/variables/Cargo.toml +++ b/src/utils/variables/Cargo.toml @@ -22,3 +22,6 @@ chrono = { version = "0.4", default-features = false, features = [ [target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../../workspace-hack" } + +[lints] +workspace = true diff --git a/src/utils/workspace-config/Cargo.toml b/src/utils/workspace-config/Cargo.toml index fb7334ac03ae2..2159f5508a75b 100644 --- a/src/utils/workspace-config/Cargo.toml +++ b/src/utils/workspace-config/Cargo.toml @@ -22,3 +22,6 @@ tracing = { version = "0.1", features = ["release_max_level_debug"] } zstd-sys = { version = "2", optional = true, default-features = false, features = ["pkg-config"] } # workspace-hack = { path = "../../workspace-hack" } # Don't add workspace-hack into this crate! + +[lints] +workspace = true