From 6b8dbcfb5450efb244eb11ca884cab46a6324d09 Mon Sep 17 00:00:00 2001 From: Ruihang Xia Date: Tue, 19 Dec 2023 15:24:08 +0800 Subject: [PATCH] chore: update toolchain to 20231219 (#2932) * update toolchain file, remove unused feature gates Signed-off-by: Ruihang Xia * fix clippy Signed-off-by: Ruihang Xia * fix format Signed-off-by: Ruihang Xia * update action file Signed-off-by: Ruihang Xia * update to 12-19 Signed-off-by: Ruihang Xia --------- Signed-off-by: Ruihang Xia --- .github/workflows/apidoc.yml | 2 +- .github/workflows/develop.yml | 2 +- .github/workflows/nightly-ci.yml | 2 +- .github/workflows/release.yml | 2 +- rust-toolchain.toml | 2 +- src/catalog/src/lib.rs | 1 - src/common/meta/src/kv_backend/etcd.rs | 6 ++-- src/common/meta/src/rpc/store.rs | 8 +++--- src/common/substrait/src/lib.rs | 1 - src/datanode/src/lib.rs | 1 - src/datatypes/src/prelude.rs | 1 - src/datatypes/src/schema.rs | 4 ++- src/file-engine/src/lib.rs | 1 - src/frontend/src/lib.rs | 1 - src/meta-srv/src/cluster.rs | 2 +- .../src/handler/persist_stats_handler.rs | 4 +-- src/meta-srv/src/keys.rs | 6 ++-- src/meta-srv/src/service/admin/heartbeat.rs | 6 ++-- src/mito2/src/compaction/twcs.rs | 28 ++++++++++++++----- src/mito2/src/memtable/time_series.rs | 4 +-- src/mito2/src/request.rs | 2 +- src/object-store/tests/object_store_test.rs | 2 +- src/partition/src/splitter.rs | 4 +-- .../src/extension_plan/histogram_fold.rs | 4 ++- .../src/extension_plan/instant_manipulate.rs | 11 ++++++-- src/promql/src/planner.rs | 2 +- src/query/src/dist_plan/analyzer.rs | 12 ++++---- src/script/src/python/engine.rs | 2 +- src/script/src/python/ffi_types/copr/parse.rs | 2 +- src/servers/src/http/opentsdb.rs | 6 +--- src/sql/src/parser.rs | 2 +- src/sql/src/parsers/copy_parser.rs | 4 ++- src/sql/src/parsers/create_parser.rs | 2 +- src/sql/src/statements/insert.rs | 2 +- src/table/src/predicate.rs | 1 - tests-integration/src/otlp.rs | 2 +- tests-integration/src/prom_store.rs | 2 +- tests-integration/tests/region_failover.rs | 2 +- 38 files changed, 82 insertions(+), 66 deletions(-) diff --git a/.github/workflows/apidoc.yml b/.github/workflows/apidoc.yml index 97203eafaffb..44cd6e7579fa 100644 --- a/.github/workflows/apidoc.yml +++ b/.github/workflows/apidoc.yml @@ -13,7 +13,7 @@ on: name: Build API docs env: - RUST_TOOLCHAIN: nightly-2023-10-21 + RUST_TOOLCHAIN: nightly-2023-12-19 jobs: apidoc: diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index a978c7f23c51..438cb3336ba4 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -29,7 +29,7 @@ concurrency: cancel-in-progress: true env: - RUST_TOOLCHAIN: nightly-2023-10-21 + RUST_TOOLCHAIN: nightly-2023-12-19 jobs: typos: diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml index bd0326b512e4..38be4294ee09 100644 --- a/.github/workflows/nightly-ci.yml +++ b/.github/workflows/nightly-ci.yml @@ -12,7 +12,7 @@ concurrency: cancel-in-progress: true env: - RUST_TOOLCHAIN: nightly-2023-10-21 + RUST_TOOLCHAIN: nightly-2023-12-19 jobs: sqlness: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 95bef056d06a..d3b5181981cf 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -82,7 +82,7 @@ on: # Use env variables to control all the release process. env: # The arguments of building greptime. - RUST_TOOLCHAIN: nightly-2023-10-21 + RUST_TOOLCHAIN: nightly-2023-12-19 CARGO_PROFILE: nightly # Controls whether to run tests, include unit-test, integration-test and sqlness. diff --git a/rust-toolchain.toml b/rust-toolchain.toml index fe2a026f6e40..81fce2619416 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2023-10-21" +channel = "nightly-2023-12-19" diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs index 25e1f10d1753..98a0bdfd1320 100644 --- a/src/catalog/src/lib.rs +++ b/src/catalog/src/lib.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![feature(trait_upcasting)] #![feature(assert_matches)] #![feature(try_blocks)] diff --git a/src/common/meta/src/kv_backend/etcd.rs b/src/common/meta/src/kv_backend/etcd.rs index 806b90150b8e..5ca2ba62891d 100644 --- a/src/common/meta/src/kv_backend/etcd.rs +++ b/src/common/meta/src/kv_backend/etcd.rs @@ -549,7 +549,7 @@ mod tests { let batch_get: BatchGet = req.try_into().unwrap(); let keys = batch_get.keys; - assert_eq!(b"k1".to_vec(), keys.get(0).unwrap().clone()); + assert_eq!(b"k1".to_vec(), keys.first().unwrap().clone()); assert_eq!(b"k2".to_vec(), keys.get(1).unwrap().clone()); assert_eq!(b"k3".to_vec(), keys.get(2).unwrap().clone()); } @@ -566,7 +566,7 @@ mod tests { let batch_put: BatchPut = req.try_into().unwrap(); - let kv = batch_put.kvs.get(0).unwrap(); + let kv = batch_put.kvs.first().unwrap(); assert_eq!(b"test_key", kv.key()); assert_eq!(b"test_value", kv.value()); let _ = batch_put.options.unwrap(); @@ -582,7 +582,7 @@ mod tests { let batch_delete: BatchDelete = req.try_into().unwrap(); assert_eq!(batch_delete.keys.len(), 3); - assert_eq!(b"k1".to_vec(), batch_delete.keys.get(0).unwrap().clone()); + assert_eq!(b"k1".to_vec(), batch_delete.keys.first().unwrap().clone()); assert_eq!(b"k2".to_vec(), batch_delete.keys.get(1).unwrap().clone()); assert_eq!(b"k3".to_vec(), batch_delete.keys.get(2).unwrap().clone()); let _ = batch_delete.options.unwrap(); diff --git a/src/common/meta/src/rpc/store.rs b/src/common/meta/src/rpc/store.rs index 73b7d0de7cc0..e4873945ab59 100644 --- a/src/common/meta/src/rpc/store.rs +++ b/src/common/meta/src/rpc/store.rs @@ -916,7 +916,7 @@ mod tests { let into_req: PbBatchGetRequest = req.into(); assert!(into_req.header.is_none()); - assert_eq!(b"test_key1".as_slice(), into_req.keys.get(0).unwrap()); + assert_eq!(b"test_key1".as_slice(), into_req.keys.first().unwrap()); assert_eq!(b"test_key2".as_slice(), into_req.keys.get(1).unwrap()); assert_eq!(b"test_key3".as_slice(), into_req.keys.get(2).unwrap()); } @@ -946,10 +946,10 @@ mod tests { let into_req: PbBatchPutRequest = req.into(); assert!(into_req.header.is_none()); - assert_eq!(b"test_key1".to_vec(), into_req.kvs.get(0).unwrap().key); + assert_eq!(b"test_key1".to_vec(), into_req.kvs.first().unwrap().key); assert_eq!(b"test_key2".to_vec(), into_req.kvs.get(1).unwrap().key); assert_eq!(b"test_key3".to_vec(), into_req.kvs.get(2).unwrap().key); - assert_eq!(b"test_value1".to_vec(), into_req.kvs.get(0).unwrap().value); + assert_eq!(b"test_value1".to_vec(), into_req.kvs.first().unwrap().value); assert_eq!(b"test_value2".to_vec(), into_req.kvs.get(1).unwrap().value); assert_eq!(b"test_value3".to_vec(), into_req.kvs.get(2).unwrap().value); assert!(into_req.prev_kv); @@ -981,7 +981,7 @@ mod tests { let into_req: PbBatchDeleteRequest = req.into(); assert!(into_req.header.is_none()); - assert_eq!(&b"test_key1".to_vec(), into_req.keys.get(0).unwrap()); + assert_eq!(&b"test_key1".to_vec(), into_req.keys.first().unwrap()); assert_eq!(&b"test_key2".to_vec(), into_req.keys.get(1).unwrap()); assert_eq!(&b"test_key3".to_vec(), into_req.keys.get(2).unwrap()); assert!(into_req.prev_kv); diff --git a/src/common/substrait/src/lib.rs b/src/common/substrait/src/lib.rs index 87e50efda29f..e0c3046b0868 100644 --- a/src/common/substrait/src/lib.rs +++ b/src/common/substrait/src/lib.rs @@ -13,7 +13,6 @@ // limitations under the License. #![feature(let_chains)] -#![feature(trait_upcasting)] mod df_substrait; pub mod error; diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs index 43e8ee8c2e55..fc7126c2e532 100644 --- a/src/datanode/src/lib.rs +++ b/src/datanode/src/lib.rs @@ -13,7 +13,6 @@ // limitations under the License. #![feature(assert_matches)] -#![feature(trait_upcasting)] pub mod alive_keeper; pub mod config; diff --git a/src/datatypes/src/prelude.rs b/src/datatypes/src/prelude.rs index 2f67d2113a6e..0fdc9b4f729f 100644 --- a/src/datatypes/src/prelude.rs +++ b/src/datatypes/src/prelude.rs @@ -13,7 +13,6 @@ // limitations under the License. pub use crate::data_type::{ConcreteDataType, DataType, DataTypeRef}; -pub use crate::macros::*; pub use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder}; pub use crate::type_id::LogicalTypeId; pub use crate::types::{LogicalPrimitiveType, WrapperType}; diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs index 6b170e3abb41..2e58b87cd817 100644 --- a/src/datatypes/src/schema.rs +++ b/src/datatypes/src/schema.rs @@ -144,7 +144,9 @@ impl Schema { let mut column_schemas = Vec::with_capacity(indices.len()); let mut timestamp_index = None; for index in indices { - if let Some(ts_index) = self.timestamp_index && ts_index == *index { + if let Some(ts_index) = self.timestamp_index + && ts_index == *index + { timestamp_index = Some(column_schemas.len()); } column_schemas.push(self.column_schemas[*index].clone()); diff --git a/src/file-engine/src/lib.rs b/src/file-engine/src/lib.rs index 34fe0f2914f4..cc9bac8c6fdb 100644 --- a/src/file-engine/src/lib.rs +++ b/src/file-engine/src/lib.rs @@ -13,7 +13,6 @@ // limitations under the License. #![feature(assert_matches)] -#![feature(result_option_inspect)] pub mod config; pub mod engine; diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs index 2ad221e6b971..8761cc098c9e 100644 --- a/src/frontend/src/lib.rs +++ b/src/frontend/src/lib.rs @@ -13,7 +13,6 @@ // limitations under the License. #![feature(assert_matches)] -#![feature(trait_upcasting)] pub mod error; pub mod frontend; diff --git a/src/meta-srv/src/cluster.rs b/src/meta-srv/src/cluster.rs index 0b3bcc9b98ec..83a5dd984cd0 100644 --- a/src/meta-srv/src/cluster.rs +++ b/src/meta-srv/src/cluster.rs @@ -308,7 +308,7 @@ mod tests { let _ = kv_map.get(&stat_key).unwrap(); let stat_val = kv_map.get(&stat_key).unwrap(); - let stat = stat_val.stats.get(0).unwrap(); + let stat = stat_val.stats.first().unwrap(); assert_eq!(0, stat.cluster_id); assert_eq!(100, stat.id); diff --git a/src/meta-srv/src/handler/persist_stats_handler.rs b/src/meta-srv/src/handler/persist_stats_handler.rs index c5c42a96bba1..41b9b3922862 100644 --- a/src/meta-srv/src/handler/persist_stats_handler.rs +++ b/src/meta-srv/src/handler/persist_stats_handler.rs @@ -190,7 +190,7 @@ mod tests { cluster_id: 3, node_id: 101, }; - let key: Vec = key.try_into().unwrap(); + let key: Vec = key.into(); let res = ctx.in_memory.get(&key).await.unwrap(); let kv = res.unwrap(); let key: StatKey = kv.key.clone().try_into().unwrap(); @@ -203,7 +203,7 @@ mod tests { handle_request_many_times(ctx.clone(), &handler, 10).await; - let key: Vec = key.try_into().unwrap(); + let key: Vec = key.into(); let res = ctx.in_memory.get(&key).await.unwrap(); let kv = res.unwrap(); let val: StatValue = kv.value.try_into().unwrap(); diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs index cb67af41cbd8..2e4321937265 100644 --- a/src/meta-srv/src/keys.rs +++ b/src/meta-srv/src/keys.rs @@ -314,7 +314,7 @@ mod tests { node_id: 1, }; - let key_bytes: Vec = key.try_into().unwrap(); + let key_bytes: Vec = key.into(); let new_key: StatKey = key_bytes.try_into().unwrap(); assert_eq!(0, new_key.cluster_id); @@ -338,7 +338,7 @@ mod tests { assert_eq!(1, stats.len()); - let stat = stats.get(0).unwrap(); + let stat = stats.first().unwrap(); assert_eq!(0, stat.cluster_id); assert_eq!(101, stat.id); assert_eq!(100, stat.region_num); @@ -452,7 +452,7 @@ mod tests { region_id: 2, }; - let key_bytes: Vec = key.try_into().unwrap(); + let key_bytes: Vec = key.into(); let new_key: InactiveRegionKey = key_bytes.try_into().unwrap(); assert_eq!(new_key, key); diff --git a/src/meta-srv/src/service/admin/heartbeat.rs b/src/meta-srv/src/service/admin/heartbeat.rs index e17fa9cbb4dc..3b7c42d729d1 100644 --- a/src/meta-srv/src/service/admin/heartbeat.rs +++ b/src/meta-srv/src/service/admin/heartbeat.rs @@ -128,13 +128,13 @@ mod tests { let mut stat_vals = vec![stat_value1, stat_value2]; stat_vals = filter_by_addr(stat_vals, "127.0.0.1:3002"); assert_eq!(stat_vals.len(), 1); - assert_eq!(stat_vals.get(0).unwrap().stats.len(), 3); + assert_eq!(stat_vals.first().unwrap().stats.len(), 3); assert_eq!( stat_vals - .get(0) + .first() .unwrap() .stats - .get(0) + .first() .unwrap() .timestamp_millis, 3 diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs index 95b1eee3f1f6..6b853cc98313 100644 --- a/src/mito2/src/compaction/twcs.rs +++ b/src/mito2/src/compaction/twcs.rs @@ -83,11 +83,13 @@ impl TwcsPicker { ) -> Vec { let mut output = vec![]; for (window, files) in time_windows { - if let Some(active_window) = active_window && *window == active_window { + if let Some(active_window) = active_window + && *window == active_window + { if files.len() > self.max_active_window_files { output.push(CompactionOutput { output_file_id: FileId::random(), - output_level: 1, // we only have two levels and always compact to l1 + output_level: 1, // we only have two levels and always compact to l1 inputs: files.clone(), }); } else { @@ -102,7 +104,11 @@ impl TwcsPicker { inputs: files.clone(), }); } else { - debug!("No enough files, current: {}, max_inactive_window_files: {}", files.len(), self.max_inactive_window_files) + debug!( + "No enough files, current: {}, max_inactive_window_files: {}", + files.len(), + self.max_inactive_window_files + ) } } } @@ -207,7 +213,9 @@ fn find_latest_window_in_seconds<'a>( let mut latest_timestamp = None; for f in files { let (_, end) = f.time_range(); - if let Some(latest) = latest_timestamp && end > latest { + if let Some(latest) = latest_timestamp + && end > latest + { latest_timestamp = Some(end); } else { latest_timestamp = Some(end); @@ -542,11 +550,17 @@ mod tests { .iter(), 3, ); - assert_eq!(files[0], windows.get(&0).unwrap().get(0).unwrap().file_id()); - assert_eq!(files[1], windows.get(&3).unwrap().get(0).unwrap().file_id()); + assert_eq!( + files[0], + windows.get(&0).unwrap().first().unwrap().file_id() + ); + assert_eq!( + files[1], + windows.get(&3).unwrap().first().unwrap().file_id() + ); assert_eq!( files[2], - windows.get(&12).unwrap().get(0).unwrap().file_id() + windows.get(&12).unwrap().first().unwrap().file_id() ); } diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs index 4dd590212c33..3b9b713a10ea 100644 --- a/src/mito2/src/memtable/time_series.rs +++ b/src/mito2/src/memtable/time_series.rs @@ -1041,7 +1041,7 @@ mod tests { v0.extend( values .fields - .get(0) + .first() .unwrap() .as_any() .downcast_ref::() @@ -1125,7 +1125,7 @@ mod tests { assert_eq!(1, batch.fields().len()); let v0 = batch .fields() - .get(0) + .first() .unwrap() .data .as_any() diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs index 47241d6bf999..311638792d99 100644 --- a/src/mito2/src/request.rs +++ b/src/mito2/src/request.rs @@ -119,7 +119,7 @@ impl WriteRequest { let row_size = self .rows .rows - .get(0) + .first() .map(|row| row.encoded_len()) .unwrap_or(0); row_size * self.rows.rows.len() diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs index 2937defc419f..166be33e923a 100644 --- a/src/object-store/tests/object_store_test.rs +++ b/src/object-store/tests/object_store_test.rs @@ -74,7 +74,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> { // Only o2 is exists let entries = store.list("/").await?; assert_eq!(1, entries.len()); - assert_eq!(p2, entries.get(0).unwrap().path()); + assert_eq!(p2, entries.first().unwrap().path()); let content = store.read(p2).await?; assert_eq!("Hello, object2!", String::from_utf8(content)?); diff --git a/src/partition/src/splitter.rs b/src/partition/src/splitter.rs index ed0eaa2cc571..7904355e64b5 100644 --- a/src/partition/src/splitter.rs +++ b/src/partition/src/splitter.rs @@ -202,7 +202,7 @@ mod tests { } fn find_region(&self, values: &[Value]) -> Result { - let val = values.get(0).unwrap().clone(); + let val = values.first().unwrap().clone(); let val = match val { Value::String(v) => v.as_utf8().to_string(), _ => unreachable!(), @@ -229,7 +229,7 @@ mod tests { } fn find_region(&self, values: &[Value]) -> Result { - let val = values.get(0).unwrap().clone(); + let val = values.first().unwrap().clone(); let val = match val { Value::Null => 1, _ => 0, diff --git a/src/promql/src/extension_plan/histogram_fold.rs b/src/promql/src/extension_plan/histogram_fold.rs index 6badd2f2d99b..c5fabd4a0f84 100644 --- a/src/promql/src/extension_plan/histogram_fold.rs +++ b/src/promql/src/extension_plan/histogram_fold.rs @@ -624,7 +624,9 @@ impl HistogramFoldStream { )) })?; for (i, v) in le_as_f64_array.iter().enumerate() { - if let Some(v) = v && v == f64::INFINITY { + if let Some(v) = v + && v == f64::INFINITY + { return Ok(i); } } diff --git a/src/promql/src/extension_plan/instant_manipulate.rs b/src/promql/src/extension_plan/instant_manipulate.rs index 3b8e6a910a40..ba155627d2c5 100644 --- a/src/promql/src/extension_plan/instant_manipulate.rs +++ b/src/promql/src/extension_plan/instant_manipulate.rs @@ -360,7 +360,9 @@ impl InstantManipulateStream { let curr = ts_column.value(cursor); match curr.cmp(&expected_ts) { Ordering::Equal => { - if let Some(field_column) = &field_column && field_column.value(cursor).is_nan() { + if let Some(field_column) = &field_column + && field_column.value(cursor).is_nan() + { // ignore the NaN value } else { take_indices.push(cursor as u64); @@ -393,7 +395,8 @@ impl InstantManipulateStream { if prev_ts + self.lookback_delta >= expected_ts { // only use the point in the time range if let Some(field_column) = &field_column - && field_column.value(prev_cursor).is_nan() { + && field_column.value(prev_cursor).is_nan() + { // if the newest value is NaN, it means the value is stale, so we should not use it continue; } @@ -402,7 +405,9 @@ impl InstantManipulateStream { aligned_ts.push(expected_ts); } } - } else if let Some(field_column) = &field_column && field_column.value(cursor).is_nan() { + } else if let Some(field_column) = &field_column + && field_column.value(cursor).is_nan() + { // if the newest value is NaN, it means the value is stale, so we should not use it } else { // use this point diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs index 5ba138dc5ecd..137035755bd5 100644 --- a/src/promql/src/planner.rs +++ b/src/promql/src/planner.rs @@ -419,7 +419,7 @@ impl PromPlanner { .time_index_column .clone() .expect("time index should be set in `setup_context`"), - self.ctx.field_columns.get(0).cloned(), + self.ctx.field_columns.first().cloned(), normalize, ); LogicalPlan::Extension(Extension { diff --git a/src/query/src/dist_plan/analyzer.rs b/src/query/src/dist_plan/analyzer.rs index a7f4fa5b4c26..10c2c2e9e25b 100644 --- a/src/query/src/dist_plan/analyzer.rs +++ b/src/query/src/dist_plan/analyzer.rs @@ -158,17 +158,19 @@ impl PlanRewriter { } Commutativity::ConditionalCommutative(transformer) => { if let Some(transformer) = transformer - && let Some(plan) = transformer(plan) { + && let Some(plan) = transformer(plan) + { self.stage.push(plan) } - }, + } Commutativity::TransformedCommutative(transformer) => { if let Some(transformer) = transformer - && let Some(plan) = transformer(plan) { + && let Some(plan) = transformer(plan) + { self.stage.push(plan) } - }, - | Commutativity::NonCommutative + } + Commutativity::NonCommutative | Commutativity::Unimplemented | Commutativity::Unsupported => { return true; diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs index 30c7fbca193f..4311c0d0e388 100644 --- a/src/script/src/python/engine.rs +++ b/src/script/src/python/engine.rs @@ -114,7 +114,7 @@ impl Function for PyUDF { _input_types: &[datatypes::prelude::ConcreteDataType], ) -> common_query::error::Result { // TODO(discord9): use correct return annotation if exist - match self.copr.return_types.get(0) { + match self.copr.return_types.first() { Some(Some(AnnotationInfo { datatype: Some(ty), .. })) => Ok(ty.clone()), diff --git a/src/script/src/python/ffi_types/copr/parse.rs b/src/script/src/python/ffi_types/copr/parse.rs index 2c27b8bbd681..291659e1aacf 100644 --- a/src/script/src/python/ffi_types/copr/parse.rs +++ b/src/script/src/python/ffi_types/copr/parse.rs @@ -272,7 +272,7 @@ fn parse_keywords(keywords: &Vec>) -> Result { "Expect between {len_min} and {len_max} keyword argument, found {}.", keywords.len() ), - loc: keywords.get(0).map(|s| s.location) + loc: keywords.first().map(|s| s.location) } ); let mut ret_args = DecoratorArgs::default(); diff --git a/src/servers/src/http/opentsdb.rs b/src/servers/src/http/opentsdb.rs index 054595252ad3..a963fe81efd7 100644 --- a/src/servers/src/http/opentsdb.rs +++ b/src/servers/src/http/opentsdb.rs @@ -55,11 +55,7 @@ impl From for DataPoint { fn from(request: DataPointRequest) -> Self { let ts_millis = DataPoint::timestamp_to_millis(request.timestamp); - let tags = request - .tags - .into_iter() - .map(|(k, v)| (k, v)) - .collect::>(); + let tags = request.tags.into_iter().collect::>(); DataPoint::new(request.metric, ts_millis, request.value, tags) } diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs index 3dc5ce1983e7..2720ee5a9522 100644 --- a/src/sql/src/parser.rs +++ b/src/sql/src/parser.rs @@ -211,7 +211,7 @@ mod tests { .unwrap() { Statement::CreateTable(CreateTable { columns, .. }) => { - let ts_col = columns.get(0).unwrap(); + let ts_col = columns.first().unwrap(); assert_eq!( expected_type, sql_data_type_to_concrete_data_type(&ts_col.data_type).unwrap() diff --git a/src/sql/src/parsers/copy_parser.rs b/src/sql/src/parsers/copy_parser.rs index 17ee3df152c9..82f50dcf585b 100644 --- a/src/sql/src/parsers/copy_parser.rs +++ b/src/sql/src/parsers/copy_parser.rs @@ -33,7 +33,9 @@ impl<'a> ParserContext<'a> { pub(crate) fn parse_copy(&mut self) -> Result { let _ = self.parser.next_token(); let next = self.parser.peek_token(); - let copy = if let Word(word) = next.token && word.keyword == Keyword::DATABASE { + let copy = if let Word(word) = next.token + && word.keyword == Keyword::DATABASE + { let _ = self.parser.next_token(); let copy_database = self.parser_copy_database()?; crate::statements::copy::Copy::CopyDatabase(copy_database) diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs index d53b512fe736..66318c91f012 100644 --- a/src/sql/src/parsers/create_parser.rs +++ b/src/sql/src/parsers/create_parser.rs @@ -783,7 +783,7 @@ fn ensure_value_lists_strictly_increased<'a>( /// Ensure that value list's length matches the column list. fn ensure_value_list_len_matches_columns( partitions: &Partitions, - partition_columns: &Vec<&ColumnDef>, + partition_columns: &[&ColumnDef], ) -> Result<()> { for entry in partitions.entries.iter() { ensure!( diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs index eaf9785f520e..e08d7247d9c1 100644 --- a/src/sql/src/statements/insert.rs +++ b/src/sql/src/statements/insert.rs @@ -107,7 +107,7 @@ impl Insert { } } -fn sql_exprs_to_values(exprs: &Vec>) -> Result>> { +fn sql_exprs_to_values(exprs: &[Vec]) -> Result>> { let mut values = Vec::with_capacity(exprs.len()); for es in exprs.iter() { let mut vs = Vec::with_capacity(es.len()); diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs index c47b3dded219..2e1c04d5dfae 100644 --- a/src/table/src/predicate.rs +++ b/src/table/src/predicate.rs @@ -433,7 +433,6 @@ mod tests { use common_test_util::temp_dir::{create_temp_dir, TempDir}; use datafusion::parquet::arrow::ArrowWriter; - pub use datafusion::parquet::schema::types::BasicTypeInfo; use datafusion_common::{Column, ScalarValue}; use datafusion_expr::{col, lit, BinaryExpr, Literal, Operator}; use datatypes::arrow::array::Int32Array; diff --git a/tests-integration/src/otlp.rs b/tests-integration/src/otlp.rs index 942fad14a551..9afbe1b9c3e9 100644 --- a/tests-integration/src/otlp.rs +++ b/tests-integration/src/otlp.rs @@ -61,7 +61,7 @@ mod test { ctx.clone(), ) .await - .get(0) + .first() .unwrap() .is_ok()); diff --git a/tests-integration/src/prom_store.rs b/tests-integration/src/prom_store.rs index c3186c9af199..83cdba88e21b 100644 --- a/tests-integration/src/prom_store.rs +++ b/tests-integration/src/prom_store.rs @@ -63,7 +63,7 @@ mod tests { ctx.clone(), ) .await - .get(0) + .first() .unwrap() .is_ok()); diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs index 47dac1ca315f..be1ea3ba92f3 100644 --- a/tests-integration/tests/region_failover.rs +++ b/tests-integration/tests/region_failover.rs @@ -240,7 +240,7 @@ CREATE TABLE my_table ( PARTITION r3 VALUES LESS THAN (MAXVALUE), )"; let result = cluster.frontend.do_query(sql, QueryContext::arc()).await; - result.get(0).unwrap().as_ref().unwrap(); + result.first().unwrap().as_ref().unwrap(); let table = cluster .frontend