diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c06c1c4955..2d4a9e92d0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -40,6 +40,8 @@ jobs: run: cargo clippy --verbose --all-targets - name: Clippy check with all features run: cargo clippy --verbose --all-targets --all-features + - name: Cargo check with cpp_rust_unstable cfg + run: RUSTFLAGS="--cfg cpp_rust_unstable" cargo clippy --verbose --all-targets --all-features - name: Cargo check without features run: cargo check --all-targets --manifest-path "scylla/Cargo.toml" --features "" - name: Cargo check with all serialization features diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f152583b2e..05d68cbfd8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,6 +49,18 @@ The above commands will leave a running ScyllaDB cluster in the background. To stop it, use `make down`.\ Starting a cluster without running any test is possible with `make up`. +### Writing tests that need to connect to Scylla + +If you test requires connecting to Scylla, there are a few things you should consider. + +1. Such tests are considered integration tests and should be placed in `scylla/tests/integration`. +2. To avoid name conflicts while creating a keyspace use `unique_keyspace_name` function from `utils` module. +3. This `utils` module (`scylla/tests/integration/utils.rs`) contains other functions that may be helpful for writing tests. + For example `create_new_session_builder` or `test_with_3_node_cluster`. +4. To perform DDL queries (creating / altering / dropping a keyspace / table /type) use `ddl` method from the utils module. + To do this, import the `PerformDDL` trait (`use crate::utils::PerformDDL;`). Then you can call `ddl` method on a + `Session`. + ### Tracing in tests By default cargo captures `print!` macro's output from tests and prints them for failed tests. diff --git a/Cargo.lock.msrv b/Cargo.lock.msrv index 0c816faa25..531302ff82 100644 --- a/Cargo.lock.msrv +++ b/Cargo.lock.msrv @@ -91,7 +91,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -397,7 +397,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -408,7 +408,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -627,7 +627,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -1097,7 +1097,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -1241,18 +1241,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.33" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -1354,7 +1354,7 @@ checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom", "redox_syscall 0.2.16", - "thiserror", + "thiserror 1.0.48", ] [[package]] @@ -1477,7 +1477,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scylla" -version = "0.15.0" +version = "0.15.1" dependencies = [ "arc-swap", "assert_matches", @@ -1510,7 +1510,7 @@ dependencies = [ "smallvec", "snap", "socket2", - "thiserror", + "thiserror 2.0.6", "time", "tokio", "tokio-openssl", @@ -1522,7 +1522,7 @@ dependencies = [ [[package]] name = "scylla-cql" -version = "0.4.0" +version = "0.4.1" dependencies = [ "assert_matches", "async-trait", @@ -1540,7 +1540,7 @@ dependencies = [ "serde", "snap", "stable_deref_trait", - "thiserror", + "thiserror 2.0.6", "time", "tokio", "uuid", @@ -1549,12 +1549,12 @@ dependencies = [ [[package]] name = "scylla-macros" -version = "0.7.0" +version = "0.7.1" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -1571,7 +1571,7 @@ dependencies = [ "num-bigint 0.3.3", "rand", "scylla-cql", - "thiserror", + "thiserror 2.0.6", "tokio", "tracing", "tracing-subscriber", @@ -1604,7 +1604,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -1729,9 +1729,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.32" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -1746,7 +1746,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -1770,7 +1770,16 @@ version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.48", +] + +[[package]] +name = "thiserror" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +dependencies = [ + "thiserror-impl 2.0.6", ] [[package]] @@ -1781,7 +1790,18 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", ] [[package]] @@ -1872,7 +1892,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -1948,7 +1968,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -2121,7 +2141,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", "wasm-bindgen-shared", ] @@ -2143,7 +2163,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2365,7 +2385,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", "synstructure", ] @@ -2386,7 +2406,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", ] [[package]] @@ -2406,7 +2426,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.90", "synstructure", ] diff --git a/docs/source/conf.py b/docs/source/conf.py index 9e87cf1e79..b3ae513de1 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,10 +13,10 @@ # -- Global variables # Build documentation for the following tags and branches -TAGS = ['v0.14.0', 'v0.15.0'] +TAGS = ['v0.14.0', 'v0.15.1'] BRANCHES = ['main'] # Set the latest version. -LATEST_VERSION = 'v0.15.0' +LATEST_VERSION = 'v0.15.1' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['main'] # Set which versions are deprecated diff --git a/docs/source/data-types/date.md b/docs/source/data-types/date.md index a0166db41e..cebbcb4803 100644 --- a/docs/source/data-types/date.md +++ b/docs/source/data-types/date.md @@ -42,7 +42,7 @@ while let Some((date_value,)) = iter.try_next().await? { ## chrono::NaiveDate -If full range is not required and `chrono` feature is enabled, +If full range is not required and `chrono-04` feature is enabled, [`chrono::NaiveDate`](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveDate.html) can be used. [`chrono::NaiveDate`](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveDate.html) supports dates from -262145-01-01 to 262143-12-31. @@ -78,7 +78,7 @@ while let Some((date_value,)) = iter.try_next().await? { ## time::Date -Alternatively, `time` feature can be used to enable support of +Alternatively, the `time-03` feature can be used to enable support of [`time::Date`](https://docs.rs/time/0.3/time/struct.Date.html). [`time::Date`](https://docs.rs/time/0.3/time/struct.Date.html)'s value range depends on feature flags, see its documentation to get more info. diff --git a/docs/source/data-types/time.md b/docs/source/data-types/time.md index 03c4a524bf..bcc1c2ddfc 100644 --- a/docs/source/data-types/time.md +++ b/docs/source/data-types/time.md @@ -42,7 +42,7 @@ while let Some((value,)) = iter.try_next().await? { ## chrono::NaiveTime -If `chrono` feature is enabled, [`chrono::NaiveTime`](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveDate.html) +If the `chrono-04` feature is enabled, [`chrono::NaiveTime`](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveDate.html) can be used to interact with the database. Although chrono can represent leap seconds, they are not supported. Attempts to convert [`chrono::NaiveTime`](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveDate.html) with leap second to `CqlTime` or write it to the database will return an error. @@ -78,7 +78,7 @@ while let Some((time_value,)) = iter.try_next().await? { ## time::Time -If `time` feature is enabled, [`time::Time`](https://docs.rs/time/0.3/time/struct.Time.html) can be used to interact +If the `time-03` feature is enabled, [`time::Time`](https://docs.rs/time/0.3/time/struct.Time.html) can be used to interact with the database. ```rust diff --git a/docs/source/data-types/timestamp.md b/docs/source/data-types/timestamp.md index 0ddbf118d0..d34a48058a 100644 --- a/docs/source/data-types/timestamp.md +++ b/docs/source/data-types/timestamp.md @@ -43,7 +43,7 @@ while let Some((value,)) = iter.try_next().await? { ## chrono::DateTime -If full value range is not required, `chrono` feature can be used to enable support of +If the full value range is not required, the `chrono-04` feature can be used to enable support of [`chrono::DateTime`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html). All values are expected to be converted to UTC timezone explicitly, as [timestamp](https://docs.scylladb.com/stable/cql/types.html#timestamps) doesn't store timezone information. Any precision finer than 1ms will be lost. @@ -83,7 +83,7 @@ while let Some((timestamp_value,)) = iter.try_next().await? { ## time::OffsetDateTime -Alternatively, `time` feature can be used to enable support of +Alternatively, the `time-03` feature can be used to enable support of [`time::OffsetDateTime`](https://docs.rs/time/0.3/time/struct.OffsetDateTime.html). As [timestamp](https://docs.scylladb.com/stable/cql/types.html#timestamps) doesn't support timezone information, time will be corrected to UTC and timezone info will be erased on write. On read, UTC timestamp is returned. Any precision finer diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index 04e303d255..1a2941c900 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -196,7 +196,10 @@ fn print_result(result: QueryResult) -> Result<(), IntoRowsResultError> { } Ok(()) } - Err(IntoRowsResultError::ResultNotRows(_)) => Ok(println!("OK")), + Err(IntoRowsResultError::ResultNotRows(_)) => { + println!("OK"); + Ok(()) + } Err(e) => Err(e), } } diff --git a/scylla-cql/Cargo.toml b/scylla-cql/Cargo.toml index e80fa716d2..4703ceba0a 100644 --- a/scylla-cql/Cargo.toml +++ b/scylla-cql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "scylla-cql" -version = "0.4.0" +version = "0.4.1" edition = "2021" rust-version = "1.70" description = "CQL data types and primitives, for interacting with Scylla." @@ -11,14 +11,14 @@ categories = ["database"] license = "MIT OR Apache-2.0" [dependencies] -scylla-macros = { version = "0.7.0", path = "../scylla-macros" } +scylla-macros = { version = "0.7.1", path = "../scylla-macros" } byteorder = "1.3.4" bytes = "1.0.1" tokio = { version = "1.34", features = ["io-util", "time"] } secrecy-08 = { package = "secrecy", version = "0.8", optional = true } snap = "1.0" uuid = "1.0" -thiserror = "1.0" +thiserror = "2.0.6" num-bigint-03 = { package = "num-bigint", version = "0.3", optional = true } num-bigint-04 = { package = "num-bigint", version = "0.4", optional = true } bigdecimal-04 = { package = "bigdecimal", version = "0.4", optional = true } diff --git a/scylla-cql/src/frame/frame_errors.rs b/scylla-cql/src/frame/frame_errors.rs index 1f12a6008e..2eac4c50f6 100644 --- a/scylla-cql/src/frame/frame_errors.rs +++ b/scylla-cql/src/frame/frame_errors.rs @@ -425,6 +425,7 @@ pub struct ColumnSpecParseError { pub enum ColumnSpecParseErrorKind { #[error("Invalid table spec: {0}")] TableSpecParseError(#[from] TableSpecParseError), + // TODO: remove this variant before the next major release. #[error("Table spec differs across columns - got specs: {0:?} and {1:?}")] TableSpecDiffersAcrossColumns(TableSpec<'static>, TableSpec<'static>), #[error("Malformed column name: {0}")] diff --git a/scylla-cql/src/frame/request/batch.rs b/scylla-cql/src/frame/request/batch.rs index e193fbbfda..815d78c022 100644 --- a/scylla-cql/src/frame/request/batch.rs +++ b/scylla-cql/src/frame/request/batch.rs @@ -228,6 +228,9 @@ impl BatchStatement<'_> { } } +// Disable the lint, if there is more than one lifetime included. +// Can be removed once https://github.com/rust-lang/rust-clippy/issues/12495 is fixed. +#[allow(clippy::needless_lifetimes)] impl<'s, 'b> From<&'s BatchStatement<'b>> for BatchStatement<'s> { fn from(value: &'s BatchStatement) -> Self { match value { diff --git a/scylla-cql/src/frame/request/execute.rs b/scylla-cql/src/frame/request/execute.rs index e04ec1b205..ee758b01c5 100644 --- a/scylla-cql/src/frame/request/execute.rs +++ b/scylla-cql/src/frame/request/execute.rs @@ -36,7 +36,7 @@ impl SerializableRequest for Execute<'_> { } } -impl<'e> DeserializableRequest for Execute<'e> { +impl DeserializableRequest for Execute<'_> { fn deserialize(buf: &mut &[u8]) -> Result { let id = types::read_short_bytes(buf)?.to_vec().into(); let parameters = QueryParameters::deserialize(buf)?; diff --git a/scylla-cql/src/frame/request/mod.rs b/scylla-cql/src/frame/request/mod.rs index feef653b97..f2f8837da2 100644 --- a/scylla-cql/src/frame/request/mod.rs +++ b/scylla-cql/src/frame/request/mod.rs @@ -140,7 +140,7 @@ pub enum Request<'r> { Batch(Batch<'r, BatchStatement<'r>, Vec>), } -impl<'r> Request<'r> { +impl Request<'_> { pub fn deserialize( buf: &mut &[u8], opcode: RequestOpcode, diff --git a/scylla-cql/src/frame/request/prepare.rs b/scylla-cql/src/frame/request/prepare.rs index 5d209263e7..b4f2d26f09 100644 --- a/scylla-cql/src/frame/request/prepare.rs +++ b/scylla-cql/src/frame/request/prepare.rs @@ -13,7 +13,7 @@ pub struct Prepare<'a> { pub query: &'a str, } -impl<'a> SerializableRequest for Prepare<'a> { +impl SerializableRequest for Prepare<'_> { const OPCODE: RequestOpcode = RequestOpcode::Prepare; fn serialize(&self, buf: &mut Vec) -> Result<(), CqlRequestSerializationError> { diff --git a/scylla-cql/src/frame/request/query.rs b/scylla-cql/src/frame/request/query.rs index 8567cbd419..c0974b700e 100644 --- a/scylla-cql/src/frame/request/query.rs +++ b/scylla-cql/src/frame/request/query.rs @@ -49,7 +49,7 @@ impl SerializableRequest for Query<'_> { } } -impl<'q> DeserializableRequest for Query<'q> { +impl DeserializableRequest for Query<'_> { fn deserialize(buf: &mut &[u8]) -> Result { let contents = Cow::Owned(types::read_long_string(buf)?.to_owned()); let parameters = QueryParameters::deserialize(buf)?; @@ -146,7 +146,7 @@ impl QueryParameters<'_> { } } -impl<'q> QueryParameters<'q> { +impl QueryParameters<'_> { pub fn deserialize(buf: &mut &[u8]) -> Result { let consistency = types::read_consistency(buf)?; diff --git a/scylla-cql/src/frame/response/cql_to_rust.rs b/scylla-cql/src/frame/response/cql_to_rust.rs index a2a450ea9e..89330c6e40 100644 --- a/scylla-cql/src/frame/response/cql_to_rust.rs +++ b/scylla-cql/src/frame/response/cql_to_rust.rs @@ -10,6 +10,11 @@ use std::net::IpAddr; use thiserror::Error; use uuid::Uuid; +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] +#[allow(deprecated)] #[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum FromRowError { #[error("{err} in the column with index {column}")] @@ -29,6 +34,10 @@ pub trait FromCqlVal: Sized { fn from_cql(cql_val: T) -> Result; } +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] #[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum FromCqlValError { #[error("Bad CQL type")] @@ -99,6 +108,10 @@ impl> FromCqlVal> for Option { /// /// impl_from_cql_value_from_method!(MyBytes, into_my_bytes); /// ``` +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] #[macro_export] macro_rules! impl_from_cql_value_from_method { ($T:ty, $convert_func:ident) => { diff --git a/scylla-cql/src/frame/response/result.rs b/scylla-cql/src/frame/response/result.rs index 4858d0efc1..02620de536 100644 --- a/scylla-cql/src/frame/response/result.rs +++ b/scylla-cql/src/frame/response/result.rs @@ -83,7 +83,7 @@ pub enum ColumnType<'frame> { Varint, } -impl<'frame> ColumnType<'frame> { +impl ColumnType<'_> { pub fn into_owned(self) -> ColumnType<'static> { match self { ColumnType::Custom(cow) => ColumnType::Custom(cow.into_owned().into()), @@ -977,61 +977,6 @@ fn mk_col_spec_parse_error( } } -/// Deserializes table spec of a column spec in the borrowed form. -/// -/// Checks for equality of table specs across columns, because the protocol -/// does not guarantee that and we want to be sure that the assumption -/// of them being all the same is correct. -/// To this end, the first column's table spec is written to `known_table_spec` -/// and compared with remaining columns' table spec. -/// -/// To avoid needless allocations, it is advised to pass `known_table_spec` -/// in the borrowed form, so that cloning it is cheap. -fn deser_table_spec_for_col_spec<'frame>( - buf: &'_ mut &'frame [u8], - global_table_spec_provided: bool, - known_table_spec: &'_ mut Option>, - col_idx: usize, -) -> StdResult, ColumnSpecParseError> { - let table_spec = match known_table_spec { - // If global table spec was provided, we simply clone it to each column spec. - Some(ref known_spec) if global_table_spec_provided => known_spec.clone(), - - // Else, we deserialize the table spec for a column and, if we already know some - // previous spec (i.e. that of the first column), we perform equality check - // against it. - Some(_) | None => { - let table_spec = - deser_table_spec(buf).map_err(|err| mk_col_spec_parse_error(col_idx, err))?; - - if let Some(ref known_spec) = known_table_spec { - // We assume that for each column, table spec is the same. - // As this is not guaranteed by the CQL protocol specification but only by how - // Cassandra and ScyllaDB work (no support for joins), we perform a sanity check here. - if known_spec.table_name != table_spec.table_name - || known_spec.ks_name != table_spec.ks_name - { - return Err(mk_col_spec_parse_error( - col_idx, - ColumnSpecParseErrorKind::TableSpecDiffersAcrossColumns( - known_spec.clone().into_owned(), - table_spec.into_owned(), - ), - )); - } - } else { - // Once we have read the first column spec, we save its table spec - // in order to verify its equality with other columns'. - *known_table_spec = Some(table_spec.clone()); - } - - table_spec - } - }; - - Ok(table_spec) -} - fn deser_col_specs_generic<'frame, 'result>( buf: &mut &'frame [u8], global_table_spec: Option>, @@ -1039,17 +984,15 @@ fn deser_col_specs_generic<'frame, 'result>( make_col_spec: fn(&'frame str, ColumnType<'result>, TableSpec<'frame>) -> ColumnSpec<'result>, deser_type: fn(&mut &'frame [u8]) -> StdResult, CqlTypeParseError>, ) -> StdResult>, ColumnSpecParseError> { - let global_table_spec_provided = global_table_spec.is_some(); - let mut known_table_spec = global_table_spec; - let mut col_specs = Vec::with_capacity(col_count); for col_idx in 0..col_count { - let table_spec = deser_table_spec_for_col_spec( - buf, - global_table_spec_provided, - &mut known_table_spec, - col_idx, - )?; + let table_spec = match global_table_spec { + // If global table spec was provided, we simply clone it to each column spec. + Some(ref known_spec) => known_spec.clone(), + + // Else, we deserialize the table spec for a column. + None => deser_table_spec(buf).map_err(|err| mk_col_spec_parse_error(col_idx, err))?, + }; let name = types::read_string(buf).map_err(|err| mk_col_spec_parse_error(col_idx, err))?; let typ = deser_type(buf).map_err(|err| mk_col_spec_parse_error(col_idx, err))?; @@ -1062,10 +1005,6 @@ fn deser_col_specs_generic<'frame, 'result>( /// Deserializes col specs (part of ResultMetadata or PreparedMetadata) /// in the borrowed form. /// -/// Checks for equality of table specs across columns, because the protocol -/// does not guarantee that and we want to be sure that the assumption -/// of them being all the same is correct. -/// /// To avoid needless allocations, it is advised to pass `global_table_spec` /// in the borrowed form, so that cloning it is cheap. fn deser_col_specs_borrowed<'frame>( @@ -1085,10 +1024,6 @@ fn deser_col_specs_borrowed<'frame>( /// Deserializes col specs (part of ResultMetadata or PreparedMetadata) /// in the owned form. /// -/// Checks for equality of table specs across columns, because the protocol -/// does not guarantee that and we want to be sure that the assumption -/// of them being all the same is correct. -/// /// To avoid needless allocations, it is advised to pass `global_table_spec` /// in the borrowed form, so that cloning it is cheap. fn deser_col_specs_owned<'frame>( @@ -2475,6 +2410,7 @@ mod tests { ); } + #[allow(deprecated)] #[test] fn test_serialize_empty() { use crate::frame::value::Value; diff --git a/scylla-cql/src/frame/value.rs b/scylla-cql/src/frame/value.rs index c479b64d01..036511e4e3 100644 --- a/scylla-cql/src/frame/value.rs +++ b/scylla-cql/src/frame/value.rs @@ -12,16 +12,6 @@ use super::response::result::CqlValue; use super::types::vint_encode; use super::types::RawValue; -/// Every value being sent in a query must implement this trait -/// serialize() should write the Value as [bytes] to the provided buffer -pub trait Value { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig>; -} - -#[derive(Debug, Error, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -#[error("Value too big to be sent in a request - max 2GiB allowed")] -pub struct ValueTooBig; - #[derive(Debug, Error, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[error("Value is too large to fit in the CQL type")] pub struct ValueOverflow; @@ -650,15 +640,6 @@ impl TryInto for CqlTime { } } -/// Keeps a buffer with serialized Values -/// Allows adding new Values and iterating over serialized ones -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct LegacySerializedValues { - serialized_values: Vec, - values_num: u16, - contains_names: bool, -} - /// Represents a CQL Duration value #[derive(Clone, Debug, Copy, PartialEq, Eq)] pub struct CqlDuration { @@ -667,754 +648,784 @@ pub struct CqlDuration { pub nanoseconds: i64, } -#[derive(Debug, Error, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub enum SerializeValuesError { - #[error("Too many values to add, max 65,535 values can be sent in a request")] - TooManyValues, - #[error("Mixing named and not named values is not allowed")] - MixingNamedAndNotNamedValues, - #[error(transparent)] - ValueTooBig(#[from] ValueTooBig), - #[error("Parsing serialized values failed")] - ParseError, -} +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] +mod legacy { + #![allow(deprecated)] -pub type SerializedResult<'a> = Result, SerializeValuesError>; + use super::*; -/// Represents list of values to be sent in a query -/// gets serialized and but into request -pub trait ValueList { - /// Provides a view of ValueList as LegacySerializedValues - /// returns `Cow` to make impl ValueList for LegacySerializedValues efficient - fn serialized(&self) -> SerializedResult<'_>; + /// Every value being sent in a query must implement this trait + /// serialize() should write the Value as [bytes] to the provided buffer + pub trait Value { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig>; + } - fn write_to_request(&self, buf: &mut impl BufMut) -> Result<(), SerializeValuesError> { - let serialized = self.serialized()?; - LegacySerializedValues::write_to_request(&serialized, buf); + #[derive(Debug, Error, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] + #[error("Value too big to be sent in a request - max 2GiB allowed")] + pub struct ValueTooBig; - Ok(()) + #[derive(Debug, Error, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] + pub enum SerializeValuesError { + #[error("Too many values to add, max 65,535 values can be sent in a request")] + TooManyValues, + #[error("Mixing named and not named values is not allowed")] + MixingNamedAndNotNamedValues, + #[error(transparent)] + ValueTooBig(#[from] ValueTooBig), + #[error("Parsing serialized values failed")] + ParseError, } -} -impl Default for LegacySerializedValues { - fn default() -> Self { - Self::new() + /// Keeps a buffer with serialized Values + /// Allows adding new Values and iterating over serialized ones + #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] + pub struct LegacySerializedValues { + serialized_values: Vec, + values_num: u16, + contains_names: bool, } -} -impl LegacySerializedValues { - /// Creates empty value list - pub const fn new() -> Self { - LegacySerializedValues { - serialized_values: Vec::new(), - values_num: 0, - contains_names: false, - } - } + pub type SerializedResult<'a> = Result, SerializeValuesError>; - pub fn with_capacity(capacity: usize) -> Self { - LegacySerializedValues { - serialized_values: Vec::with_capacity(capacity), - values_num: 0, - contains_names: false, + /// Represents list of values to be sent in a query + /// gets serialized and but into request + pub trait ValueList { + /// Provides a view of ValueList as LegacySerializedValues + /// returns `Cow` to make impl ValueList for LegacySerializedValues efficient + fn serialized(&self) -> SerializedResult<'_>; + + fn write_to_request(&self, buf: &mut impl BufMut) -> Result<(), SerializeValuesError> { + let serialized = self.serialized()?; + LegacySerializedValues::write_to_request(&serialized, buf); + + Ok(()) } } - pub fn has_names(&self) -> bool { - self.contains_names + impl Default for LegacySerializedValues { + fn default() -> Self { + Self::new() + } } - /// A const empty instance, useful for taking references - pub const EMPTY: &'static LegacySerializedValues = &LegacySerializedValues::new(); + impl LegacySerializedValues { + /// Creates empty value list + pub const fn new() -> Self { + LegacySerializedValues { + serialized_values: Vec::new(), + values_num: 0, + contains_names: false, + } + } - /// Serializes value and appends it to the list - pub fn add_value(&mut self, val: &impl Value) -> Result<(), SerializeValuesError> { - if self.contains_names { - return Err(SerializeValuesError::MixingNamedAndNotNamedValues); + pub fn with_capacity(capacity: usize) -> Self { + LegacySerializedValues { + serialized_values: Vec::with_capacity(capacity), + values_num: 0, + contains_names: false, + } } - if self.values_num == u16::MAX { - return Err(SerializeValuesError::TooManyValues); + + pub fn has_names(&self) -> bool { + self.contains_names } - let len_before_serialize: usize = self.serialized_values.len(); + /// A const empty instance, useful for taking references + pub const EMPTY: &'static LegacySerializedValues = &LegacySerializedValues::new(); - if let Err(e) = val.serialize(&mut self.serialized_values) { - self.serialized_values.resize(len_before_serialize, 0); - return Err(SerializeValuesError::from(e)); - } + /// Serializes value and appends it to the list + pub fn add_value(&mut self, val: &impl Value) -> Result<(), SerializeValuesError> { + if self.contains_names { + return Err(SerializeValuesError::MixingNamedAndNotNamedValues); + } + if self.values_num == u16::MAX { + return Err(SerializeValuesError::TooManyValues); + } - self.values_num += 1; - Ok(()) - } + let len_before_serialize: usize = self.serialized_values.len(); - pub fn add_named_value( - &mut self, - name: &str, - val: &impl Value, - ) -> Result<(), SerializeValuesError> { - if self.values_num > 0 && !self.contains_names { - return Err(SerializeValuesError::MixingNamedAndNotNamedValues); - } - self.contains_names = true; - if self.values_num == u16::MAX { - return Err(SerializeValuesError::TooManyValues); + if let Err(e) = val.serialize(&mut self.serialized_values) { + self.serialized_values.resize(len_before_serialize, 0); + return Err(SerializeValuesError::from(e)); + } + + self.values_num += 1; + Ok(()) } - let len_before_serialize: usize = self.serialized_values.len(); + pub fn add_named_value( + &mut self, + name: &str, + val: &impl Value, + ) -> Result<(), SerializeValuesError> { + if self.values_num > 0 && !self.contains_names { + return Err(SerializeValuesError::MixingNamedAndNotNamedValues); + } + self.contains_names = true; + if self.values_num == u16::MAX { + return Err(SerializeValuesError::TooManyValues); + } + + let len_before_serialize: usize = self.serialized_values.len(); + + types::write_string(name, &mut self.serialized_values) + .map_err(|_| SerializeValuesError::ParseError)?; - types::write_string(name, &mut self.serialized_values) - .map_err(|_| SerializeValuesError::ParseError)?; + if let Err(e) = val.serialize(&mut self.serialized_values) { + self.serialized_values.resize(len_before_serialize, 0); + return Err(SerializeValuesError::from(e)); + } - if let Err(e) = val.serialize(&mut self.serialized_values) { - self.serialized_values.resize(len_before_serialize, 0); - return Err(SerializeValuesError::from(e)); + self.values_num += 1; + Ok(()) } - self.values_num += 1; - Ok(()) - } + pub fn iter(&self) -> impl Iterator { + LegacySerializedValuesIterator { + serialized_values: &self.serialized_values, + contains_names: self.contains_names, + } + } - pub fn iter(&self) -> impl Iterator { - LegacySerializedValuesIterator { - serialized_values: &self.serialized_values, - contains_names: self.contains_names, + pub fn write_to_request(&self, buf: &mut impl BufMut) { + buf.put_u16(self.values_num); + buf.put(&self.serialized_values[..]); } - } - pub fn write_to_request(&self, buf: &mut impl BufMut) { - buf.put_u16(self.values_num); - buf.put(&self.serialized_values[..]); - } + pub fn is_empty(&self) -> bool { + self.values_num == 0 + } - pub fn is_empty(&self) -> bool { - self.values_num == 0 - } + pub fn len(&self) -> u16 { + self.values_num + } - pub fn len(&self) -> u16 { - self.values_num - } + pub fn size(&self) -> usize { + self.serialized_values.len() + } - pub fn size(&self) -> usize { - self.serialized_values.len() + pub fn iter_name_value_pairs(&self) -> impl Iterator, RawValue)> { + let mut buf = &self.serialized_values[..]; + (0..self.values_num).map(move |_| { + // `unwrap()`s here are safe, as we assume type-safety: if `LegacySerializedValues` exits, + // we have a guarantee that the layout of the serialized values is valid. + let name = self + .contains_names + .then(|| types::read_string(&mut buf).unwrap()); + let serialized = types::read_value(&mut buf).unwrap(); + (name, serialized) + }) + } } - pub fn iter_name_value_pairs(&self) -> impl Iterator, RawValue)> { - let mut buf = &self.serialized_values[..]; - (0..self.values_num).map(move |_| { - // `unwrap()`s here are safe, as we assume type-safety: if `LegacySerializedValues` exits, - // we have a guarantee that the layout of the serialized values is valid. - let name = self - .contains_names - .then(|| types::read_string(&mut buf).unwrap()); - let serialized = types::read_value(&mut buf).unwrap(); - (name, serialized) - }) + #[derive(Clone, Copy)] + pub struct LegacySerializedValuesIterator<'a> { + serialized_values: &'a [u8], + contains_names: bool, } -} -#[derive(Clone, Copy)] -pub struct LegacySerializedValuesIterator<'a> { - serialized_values: &'a [u8], - contains_names: bool, -} + impl<'a> Iterator for LegacySerializedValuesIterator<'a> { + type Item = RawValue<'a>; -impl<'a> Iterator for LegacySerializedValuesIterator<'a> { - type Item = RawValue<'a>; + fn next(&mut self) -> Option { + if self.serialized_values.is_empty() { + return None; + } - fn next(&mut self) -> Option { - if self.serialized_values.is_empty() { - return None; - } + // In case of named values, skip names + if self.contains_names { + types::read_short_bytes(&mut self.serialized_values) + .expect("badly encoded value name"); + } - // In case of named values, skip names - if self.contains_names { - types::read_short_bytes(&mut self.serialized_values).expect("badly encoded value name"); + Some(types::read_value(&mut self.serialized_values).expect("badly encoded value")) } + } - Some(types::read_value(&mut self.serialized_values).expect("badly encoded value")) + /// Represents List of ValueList for Batch statement + pub trait LegacyBatchValues { + /// For some unknown reason, this type, when not resolved to a concrete type for a given async function, + /// cannot live across await boundaries while maintaining the corresponding future `Send`, unless `'r: 'static` + /// + /// See for more details + type LegacyBatchValuesIter<'r>: LegacyBatchValuesIterator<'r> + where + Self: 'r; + fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_>; } -} -/// Represents List of ValueList for Batch statement -pub trait LegacyBatchValues { - /// For some unknown reason, this type, when not resolved to a concrete type for a given async function, - /// cannot live across await boundaries while maintaining the corresponding future `Send`, unless `'r: 'static` + /// An iterator-like for `ValueList` /// - /// See for more details - type LegacyBatchValuesIter<'r>: LegacyBatchValuesIterator<'r> - where - Self: 'r; - fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_>; -} - -/// An iterator-like for `ValueList` -/// -/// An instance of this can be easily obtained from `IT: Iterator`: that would be -/// `BatchValuesIteratorFromIterator` -/// -/// It's just essentially making methods from `ValueList` accessible instead of being an actual iterator because of -/// compiler limitations that would otherwise be very complex to overcome. -/// (specifically, types being different would require yielding enums for tuple impls) -pub trait LegacyBatchValuesIterator<'a> { - fn next_serialized(&mut self) -> Option>; - fn write_next_to_request( - &mut self, - buf: &mut impl BufMut, - ) -> Option>; - fn skip_next(&mut self) -> Option<()>; - fn count(mut self) -> usize - where - Self: Sized, - { - let mut count = 0; - while self.skip_next().is_some() { - count += 1; + /// An instance of this can be easily obtained from `IT: Iterator`: that would be + /// `BatchValuesIteratorFromIterator` + /// + /// It's just essentially making methods from `ValueList` accessible instead of being an actual iterator because of + /// compiler limitations that would otherwise be very complex to overcome. + /// (specifically, types being different would require yielding enums for tuple impls) + pub trait LegacyBatchValuesIterator<'a> { + fn next_serialized(&mut self) -> Option>; + fn write_next_to_request( + &mut self, + buf: &mut impl BufMut, + ) -> Option>; + fn skip_next(&mut self) -> Option<()>; + fn count(mut self) -> usize + where + Self: Sized, + { + let mut count = 0; + while self.skip_next().is_some() { + count += 1; + } + count } - count } -} -/// Implements `BatchValuesIterator` from an `Iterator` over references to things that implement `ValueList` -/// -/// Essentially used internally by this lib to provide implementers of `BatchValuesIterator` for cases -/// that always serialize the same concrete `ValueList` type -pub struct LegacyBatchValuesIteratorFromIterator { - it: IT, -} - -impl<'r, 'a: 'r, IT, VL> LegacyBatchValuesIterator<'r> for LegacyBatchValuesIteratorFromIterator -where - IT: Iterator, - VL: ValueList + 'a, -{ - fn next_serialized(&mut self) -> Option> { - self.it.next().map(|vl| vl.serialized()) - } - fn write_next_to_request( - &mut self, - buf: &mut impl BufMut, - ) -> Option> { - self.it.next().map(|vl| vl.write_to_request(buf)) + /// Implements `BatchValuesIterator` from an `Iterator` over references to things that implement `ValueList` + /// + /// Essentially used internally by this lib to provide implementers of `BatchValuesIterator` for cases + /// that always serialize the same concrete `ValueList` type + pub struct LegacyBatchValuesIteratorFromIterator { + it: IT, } - fn skip_next(&mut self) -> Option<()> { - self.it.next().map(|_| ()) + + impl<'r, 'a: 'r, IT, VL> LegacyBatchValuesIterator<'r> for LegacyBatchValuesIteratorFromIterator + where + IT: Iterator, + VL: ValueList + 'a, + { + fn next_serialized(&mut self) -> Option> { + self.it.next().map(|vl| vl.serialized()) + } + fn write_next_to_request( + &mut self, + buf: &mut impl BufMut, + ) -> Option> { + self.it.next().map(|vl| vl.write_to_request(buf)) + } + fn skip_next(&mut self) -> Option<()> { + self.it.next().map(|_| ()) + } } -} -impl From for LegacyBatchValuesIteratorFromIterator -where - IT: Iterator, - IT::Item: ValueList, -{ - fn from(it: IT) -> Self { - LegacyBatchValuesIteratorFromIterator { it } + impl From for LegacyBatchValuesIteratorFromIterator + where + IT: Iterator, + IT::Item: ValueList, + { + fn from(it: IT) -> Self { + LegacyBatchValuesIteratorFromIterator { it } + } } -} -// -// Value impls -// + // + // Value impls + // -// Implement Value for primitive types -impl Value for i8 { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(1); - buf.put_i8(*self); - Ok(()) + // Implement Value for primitive types + impl Value for i8 { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(1); + buf.put_i8(*self); + Ok(()) + } } -} -impl Value for i16 { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(2); - buf.put_i16(*self); - Ok(()) + impl Value for i16 { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(2); + buf.put_i16(*self); + Ok(()) + } } -} -impl Value for i32 { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(4); - buf.put_i32(*self); - Ok(()) + impl Value for i32 { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(4); + buf.put_i32(*self); + Ok(()) + } } -} -impl Value for i64 { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(8); - buf.put_i64(*self); - Ok(()) + impl Value for i64 { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(8); + buf.put_i64(*self); + Ok(()) + } } -} -impl Value for CqlDecimal { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let (bytes, scale) = self.as_signed_be_bytes_slice_and_exponent(); + impl Value for CqlDecimal { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let (bytes, scale) = self.as_signed_be_bytes_slice_and_exponent(); - if bytes.len() > (i32::MAX - 4) as usize { - return Err(ValueTooBig); - } - let serialized_len: i32 = bytes.len() as i32 + 4; + if bytes.len() > (i32::MAX - 4) as usize { + return Err(ValueTooBig); + } + let serialized_len: i32 = bytes.len() as i32 + 4; - buf.put_i32(serialized_len); - buf.put_i32(scale); - buf.extend_from_slice(bytes); + buf.put_i32(serialized_len); + buf.put_i32(scale); + buf.extend_from_slice(bytes); - Ok(()) + Ok(()) + } } -} -#[cfg(feature = "bigdecimal-04")] -impl Value for bigdecimal_04::BigDecimal { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let (value, scale) = self.as_bigint_and_exponent(); + #[cfg(feature = "bigdecimal-04")] + impl Value for bigdecimal_04::BigDecimal { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let (value, scale) = self.as_bigint_and_exponent(); - let serialized = value.to_signed_bytes_be(); + let serialized = value.to_signed_bytes_be(); - if serialized.len() > (i32::MAX - 4) as usize { - return Err(ValueTooBig); - } - let serialized_len: i32 = serialized.len() as i32 + 4; + if serialized.len() > (i32::MAX - 4) as usize { + return Err(ValueTooBig); + } + let serialized_len: i32 = serialized.len() as i32 + 4; - buf.put_i32(serialized_len); - buf.put_i32(scale.try_into().map_err(|_| ValueTooBig)?); - buf.extend_from_slice(&serialized); + buf.put_i32(serialized_len); + buf.put_i32(scale.try_into().map_err(|_| ValueTooBig)?); + buf.extend_from_slice(&serialized); - Ok(()) + Ok(()) + } } -} -#[cfg(feature = "chrono-04")] -impl Value for chrono_04::NaiveDate { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - CqlDate::from(*self).serialize(buf) + #[cfg(feature = "chrono-04")] + impl Value for chrono_04::NaiveDate { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + CqlDate::from(*self).serialize(buf) + } } -} -impl Value for CqlDate { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(4); - buf.put_u32(self.0); - Ok(()) + impl Value for CqlDate { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(4); + buf.put_u32(self.0); + Ok(()) + } } -} -#[cfg(feature = "time-03")] -impl Value for time_03::Date { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - CqlDate::from(*self).serialize(buf) + #[cfg(feature = "time-03")] + impl Value for time_03::Date { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + CqlDate::from(*self).serialize(buf) + } } -} -impl Value for CqlTimestamp { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(8); - buf.put_i64(self.0); - Ok(()) + impl Value for CqlTimestamp { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(8); + buf.put_i64(self.0); + Ok(()) + } } -} -impl Value for CqlTime { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(8); - buf.put_i64(self.0); - Ok(()) + impl Value for CqlTime { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(8); + buf.put_i64(self.0); + Ok(()) + } } -} -#[cfg(feature = "chrono-04")] -impl Value for chrono_04::DateTime { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - CqlTimestamp::from(*self).serialize(buf) + #[cfg(feature = "chrono-04")] + impl Value for chrono_04::DateTime { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + CqlTimestamp::from(*self).serialize(buf) + } } -} -#[cfg(feature = "time-03")] -impl Value for time_03::OffsetDateTime { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - CqlTimestamp::from(*self).serialize(buf) + #[cfg(feature = "time-03")] + impl Value for time_03::OffsetDateTime { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + CqlTimestamp::from(*self).serialize(buf) + } } -} -#[cfg(feature = "chrono-04")] -impl Value for chrono_04::NaiveTime { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - CqlTime::try_from(*self) - .map_err(|_| ValueTooBig)? - .serialize(buf) + #[cfg(feature = "chrono-04")] + impl Value for chrono_04::NaiveTime { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + CqlTime::try_from(*self) + .map_err(|_| ValueTooBig)? + .serialize(buf) + } } -} -#[cfg(feature = "time-03")] -impl Value for time_03::Time { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - CqlTime::from(*self).serialize(buf) + #[cfg(feature = "time-03")] + impl Value for time_03::Time { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + CqlTime::from(*self).serialize(buf) + } } -} -#[cfg(feature = "secrecy-08")] -impl Value for secrecy_08::Secret { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - use secrecy_08::ExposeSecret; - self.expose_secret().serialize(buf) + #[cfg(feature = "secrecy-08")] + impl Value for secrecy_08::Secret { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + use secrecy_08::ExposeSecret; + self.expose_secret().serialize(buf) + } } -} -impl Value for bool { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(1); - let false_bytes: &[u8] = &[0x00]; - let true_bytes: &[u8] = &[0x01]; - if *self { - buf.put(true_bytes); - } else { - buf.put(false_bytes); - } + impl Value for bool { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(1); + let false_bytes: &[u8] = &[0x00]; + let true_bytes: &[u8] = &[0x01]; + if *self { + buf.put(true_bytes); + } else { + buf.put(false_bytes); + } - Ok(()) + Ok(()) + } } -} -impl Value for f32 { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(4); - buf.put_f32(*self); - Ok(()) + impl Value for f32 { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(4); + buf.put_f32(*self); + Ok(()) + } } -} -impl Value for f64 { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(8); - buf.put_f64(*self); - Ok(()) + impl Value for f64 { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(8); + buf.put_f64(*self); + Ok(()) + } } -} -impl Value for Uuid { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(16); - buf.extend_from_slice(self.as_bytes()); - Ok(()) + impl Value for Uuid { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(16); + buf.extend_from_slice(self.as_bytes()); + Ok(()) + } } -} -impl Value for CqlTimeuuid { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - self.0.serialize(buf) + impl Value for CqlTimeuuid { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + self.0.serialize(buf) + } } -} -impl Value for CqlVarint { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let serialized = &self.0; - let serialized_len: i32 = serialized.len().try_into().map_err(|_| ValueTooBig)?; + impl Value for CqlVarint { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let serialized = &self.0; + let serialized_len: i32 = serialized.len().try_into().map_err(|_| ValueTooBig)?; - buf.put_i32(serialized_len); - buf.extend_from_slice(serialized); + buf.put_i32(serialized_len); + buf.extend_from_slice(serialized); - Ok(()) + Ok(()) + } } -} -#[cfg(feature = "num-bigint-03")] -impl Value for num_bigint_03::BigInt { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let serialized = self.to_signed_bytes_be(); - let serialized_len: i32 = serialized.len().try_into().map_err(|_| ValueTooBig)?; + #[cfg(feature = "num-bigint-03")] + impl Value for num_bigint_03::BigInt { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let serialized = self.to_signed_bytes_be(); + let serialized_len: i32 = serialized.len().try_into().map_err(|_| ValueTooBig)?; - buf.put_i32(serialized_len); - buf.extend_from_slice(&serialized); + buf.put_i32(serialized_len); + buf.extend_from_slice(&serialized); - Ok(()) + Ok(()) + } } -} -#[cfg(feature = "num-bigint-04")] -impl Value for num_bigint_04::BigInt { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let serialized = self.to_signed_bytes_be(); - let serialized_len: i32 = serialized.len().try_into().map_err(|_| ValueTooBig)?; + #[cfg(feature = "num-bigint-04")] + impl Value for num_bigint_04::BigInt { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let serialized = self.to_signed_bytes_be(); + let serialized_len: i32 = serialized.len().try_into().map_err(|_| ValueTooBig)?; - buf.put_i32(serialized_len); - buf.extend_from_slice(&serialized); + buf.put_i32(serialized_len); + buf.extend_from_slice(&serialized); - Ok(()) + Ok(()) + } } -} -impl Value for &str { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let str_bytes: &[u8] = self.as_bytes(); - let val_len: i32 = str_bytes.len().try_into().map_err(|_| ValueTooBig)?; + impl Value for &str { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let str_bytes: &[u8] = self.as_bytes(); + let val_len: i32 = str_bytes.len().try_into().map_err(|_| ValueTooBig)?; - buf.put_i32(val_len); - buf.put(str_bytes); + buf.put_i32(val_len); + buf.put(str_bytes); - Ok(()) + Ok(()) + } } -} -impl Value for Vec { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - <&[u8] as Value>::serialize(&self.as_slice(), buf) + impl Value for Vec { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + <&[u8] as Value>::serialize(&self.as_slice(), buf) + } } -} -impl Value for &[u8] { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let val_len: i32 = self.len().try_into().map_err(|_| ValueTooBig)?; - buf.put_i32(val_len); + impl Value for &[u8] { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let val_len: i32 = self.len().try_into().map_err(|_| ValueTooBig)?; + buf.put_i32(val_len); - buf.extend_from_slice(self); + buf.extend_from_slice(self); - Ok(()) + Ok(()) + } } -} -impl Value for [u8; N] { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let val_len: i32 = self.len().try_into().map_err(|_| ValueTooBig)?; - buf.put_i32(val_len); + impl Value for [u8; N] { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let val_len: i32 = self.len().try_into().map_err(|_| ValueTooBig)?; + buf.put_i32(val_len); - buf.extend_from_slice(self); + buf.extend_from_slice(self); - Ok(()) + Ok(()) + } } -} -impl Value for IpAddr { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - match self { - IpAddr::V4(addr) => { - buf.put_i32(4); - buf.extend_from_slice(&addr.octets()); - } - IpAddr::V6(addr) => { - buf.put_i32(16); - buf.extend_from_slice(&addr.octets()); + impl Value for IpAddr { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + match self { + IpAddr::V4(addr) => { + buf.put_i32(4); + buf.extend_from_slice(&addr.octets()); + } + IpAddr::V6(addr) => { + buf.put_i32(16); + buf.extend_from_slice(&addr.octets()); + } } - } - Ok(()) + Ok(()) + } } -} -impl Value for String { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - <&str as Value>::serialize(&self.as_str(), buf) + impl Value for String { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + <&str as Value>::serialize(&self.as_str(), buf) + } } -} -/// Every `Option` can be serialized as None -> NULL, Some(val) -> val.serialize() -impl Value for Option { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - match self { - Some(val) => ::serialize(val, buf), - None => { - buf.put_i32(-1); - Ok(()) + /// Every `Option` can be serialized as None -> NULL, Some(val) -> val.serialize() + impl Value for Option { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + match self { + Some(val) => ::serialize(val, buf), + None => { + buf.put_i32(-1); + Ok(()) + } } } } -} -impl Value for Unset { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - // Unset serializes itself to empty value with length = -2 - buf.put_i32(-2); - Ok(()) + impl Value for Unset { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + // Unset serializes itself to empty value with length = -2 + buf.put_i32(-2); + Ok(()) + } } -} -impl Value for Counter { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - self.0.serialize(buf) + impl Value for Counter { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + self.0.serialize(buf) + } } -} -impl Value for CqlDuration { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - let bytes_num_pos: usize = buf.len(); - buf.put_i32(0); + impl Value for CqlDuration { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + let bytes_num_pos: usize = buf.len(); + buf.put_i32(0); - vint_encode(self.months as i64, buf); - vint_encode(self.days as i64, buf); - vint_encode(self.nanoseconds, buf); + vint_encode(self.months as i64, buf); + vint_encode(self.days as i64, buf); + vint_encode(self.nanoseconds, buf); - let written_bytes: usize = buf.len() - bytes_num_pos - 4; - let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; - buf[bytes_num_pos..(bytes_num_pos + 4)].copy_from_slice(&written_bytes_i32.to_be_bytes()); + let written_bytes: usize = buf.len() - bytes_num_pos - 4; + let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; + buf[bytes_num_pos..(bytes_num_pos + 4)] + .copy_from_slice(&written_bytes_i32.to_be_bytes()); - Ok(()) + Ok(()) + } } -} -impl Value for MaybeUnset { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - match self { - MaybeUnset::Set(v) => v.serialize(buf), - MaybeUnset::Unset => Unset.serialize(buf), + impl Value for MaybeUnset { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + match self { + MaybeUnset::Set(v) => v.serialize(buf), + MaybeUnset::Unset => Unset.serialize(buf), + } } } -} -// Every &impl Value and &dyn Value should also implement Value -impl Value for &T { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - ::serialize(*self, buf) + // Every &impl Value and &dyn Value should also implement Value + impl Value for &T { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + ::serialize(*self, buf) + } } -} -// Every Boxed Value should also implement Value -impl Value for Box { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - ::serialize(self.as_ref(), buf) - } -} - -fn serialize_map( - kv_iter: impl Iterator, - kv_count: usize, - buf: &mut Vec, -) -> Result<(), ValueTooBig> { - let bytes_num_pos: usize = buf.len(); - buf.put_i32(0); - - buf.put_i32(kv_count.try_into().map_err(|_| ValueTooBig)?); - for (key, value) in kv_iter { - ::serialize(&key, buf)?; - ::serialize(&value, buf)?; + // Every Boxed Value should also implement Value + impl Value for Box { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + ::serialize(self.as_ref(), buf) + } } - let written_bytes: usize = buf.len() - bytes_num_pos - 4; - let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; - buf[bytes_num_pos..(bytes_num_pos + 4)].copy_from_slice(&written_bytes_i32.to_be_bytes()); + fn serialize_map( + kv_iter: impl Iterator, + kv_count: usize, + buf: &mut Vec, + ) -> Result<(), ValueTooBig> { + let bytes_num_pos: usize = buf.len(); + buf.put_i32(0); - Ok(()) -} + buf.put_i32(kv_count.try_into().map_err(|_| ValueTooBig)?); + for (key, value) in kv_iter { + ::serialize(&key, buf)?; + ::serialize(&value, buf)?; + } -fn serialize_list_or_set<'a, V: 'a + Value>( - elements_iter: impl Iterator, - element_count: usize, - buf: &mut Vec, -) -> Result<(), ValueTooBig> { - let bytes_num_pos: usize = buf.len(); - buf.put_i32(0); + let written_bytes: usize = buf.len() - bytes_num_pos - 4; + let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; + buf[bytes_num_pos..(bytes_num_pos + 4)].copy_from_slice(&written_bytes_i32.to_be_bytes()); - buf.put_i32(element_count.try_into().map_err(|_| ValueTooBig)?); - for value in elements_iter { - ::serialize(value, buf)?; + Ok(()) } - let written_bytes: usize = buf.len() - bytes_num_pos - 4; - let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; - buf[bytes_num_pos..(bytes_num_pos + 4)].copy_from_slice(&written_bytes_i32.to_be_bytes()); + fn serialize_list_or_set<'a, V: 'a + Value>( + elements_iter: impl Iterator, + element_count: usize, + buf: &mut Vec, + ) -> Result<(), ValueTooBig> { + let bytes_num_pos: usize = buf.len(); + buf.put_i32(0); - Ok(()) -} + buf.put_i32(element_count.try_into().map_err(|_| ValueTooBig)?); + for value in elements_iter { + ::serialize(value, buf)?; + } -impl Value for HashSet { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - serialize_list_or_set(self.iter(), self.len(), buf) - } -} + let written_bytes: usize = buf.len() - bytes_num_pos - 4; + let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; + buf[bytes_num_pos..(bytes_num_pos + 4)].copy_from_slice(&written_bytes_i32.to_be_bytes()); -impl Value for HashMap { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - serialize_map(self.iter(), self.len(), buf) + Ok(()) } -} -impl Value for BTreeSet { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - serialize_list_or_set(self.iter(), self.len(), buf) + impl Value for HashSet { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + serialize_list_or_set(self.iter(), self.len(), buf) + } } -} -impl Value for BTreeMap { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - serialize_map(self.iter(), self.len(), buf) + impl Value for HashMap { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + serialize_map(self.iter(), self.len(), buf) + } } -} -impl Value for Vec { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - serialize_list_or_set(self.iter(), self.len(), buf) + impl Value for BTreeSet { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + serialize_list_or_set(self.iter(), self.len(), buf) + } } -} -impl Value for &[T] { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - serialize_list_or_set(self.iter(), self.len(), buf) + impl Value for BTreeMap { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + serialize_map(self.iter(), self.len(), buf) + } } -} -fn serialize_tuple( - elem_iter: impl Iterator, - buf: &mut Vec, -) -> Result<(), ValueTooBig> { - let bytes_num_pos: usize = buf.len(); - buf.put_i32(0); - - for elem in elem_iter { - elem.serialize(buf)?; + impl Value for Vec { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + serialize_list_or_set(self.iter(), self.len(), buf) + } } - let written_bytes: usize = buf.len() - bytes_num_pos - 4; - let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; - buf[bytes_num_pos..(bytes_num_pos + 4)].copy_from_slice(&written_bytes_i32.to_be_bytes()); + impl Value for &[T] { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + serialize_list_or_set(self.iter(), self.len(), buf) + } + } - Ok(()) -} + fn serialize_tuple( + elem_iter: impl Iterator, + buf: &mut Vec, + ) -> Result<(), ValueTooBig> { + let bytes_num_pos: usize = buf.len(); + buf.put_i32(0); -fn serialize_empty(buf: &mut Vec) -> Result<(), ValueTooBig> { - buf.put_i32(0); - Ok(()) -} + for elem in elem_iter { + elem.serialize(buf)?; + } -impl Value for CqlValue { - fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { - match self { - CqlValue::Map(m) => serialize_map(m.iter().map(|p| (&p.0, &p.1)), m.len(), buf), - CqlValue::Tuple(t) => serialize_tuple(t.iter(), buf), + let written_bytes: usize = buf.len() - bytes_num_pos - 4; + let written_bytes_i32: i32 = written_bytes.try_into().map_err(|_| ValueTooBig)?; + buf[bytes_num_pos..(bytes_num_pos + 4)].copy_from_slice(&written_bytes_i32.to_be_bytes()); - // A UDT value is composed of successive [bytes] values, one for each field of the UDT - // value (in the order defined by the type), so they serialize in a same way tuples do. - CqlValue::UserDefinedType { fields, .. } => { - serialize_tuple(fields.iter().map(|(_, value)| value), buf) - } + Ok(()) + } - CqlValue::Date(d) => d.serialize(buf), - CqlValue::Duration(d) => d.serialize(buf), - CqlValue::Timestamp(t) => t.serialize(buf), - CqlValue::Time(t) => t.serialize(buf), + fn serialize_empty(buf: &mut Vec) -> Result<(), ValueTooBig> { + buf.put_i32(0); + Ok(()) + } - CqlValue::Ascii(s) | CqlValue::Text(s) => s.serialize(buf), - CqlValue::List(v) | CqlValue::Set(v) => v.serialize(buf), + impl Value for CqlValue { + fn serialize(&self, buf: &mut Vec) -> Result<(), ValueTooBig> { + match self { + CqlValue::Map(m) => serialize_map(m.iter().map(|p| (&p.0, &p.1)), m.len(), buf), + CqlValue::Tuple(t) => serialize_tuple(t.iter(), buf), - CqlValue::Blob(b) => b.serialize(buf), - CqlValue::Boolean(b) => b.serialize(buf), - CqlValue::Counter(c) => c.serialize(buf), - CqlValue::Decimal(d) => d.serialize(buf), - CqlValue::Double(d) => d.serialize(buf), - CqlValue::Float(f) => f.serialize(buf), - CqlValue::Int(i) => i.serialize(buf), - CqlValue::BigInt(i) => i.serialize(buf), - CqlValue::Inet(i) => i.serialize(buf), - CqlValue::SmallInt(s) => s.serialize(buf), - CqlValue::TinyInt(t) => t.serialize(buf), - CqlValue::Timeuuid(t) => t.serialize(buf), - CqlValue::Uuid(u) => u.serialize(buf), - CqlValue::Varint(v) => v.serialize(buf), + // A UDT value is composed of successive [bytes] values, one for each field of the UDT + // value (in the order defined by the type), so they serialize in a same way tuples do. + CqlValue::UserDefinedType { fields, .. } => { + serialize_tuple(fields.iter().map(|(_, value)| value), buf) + } - CqlValue::Empty => serialize_empty(buf), + CqlValue::Date(d) => d.serialize(buf), + CqlValue::Duration(d) => d.serialize(buf), + CqlValue::Timestamp(t) => t.serialize(buf), + CqlValue::Time(t) => t.serialize(buf), + + CqlValue::Ascii(s) | CqlValue::Text(s) => s.serialize(buf), + CqlValue::List(v) | CqlValue::Set(v) => v.serialize(buf), + + CqlValue::Blob(b) => b.serialize(buf), + CqlValue::Boolean(b) => b.serialize(buf), + CqlValue::Counter(c) => c.serialize(buf), + CqlValue::Decimal(d) => d.serialize(buf), + CqlValue::Double(d) => d.serialize(buf), + CqlValue::Float(f) => f.serialize(buf), + CqlValue::Int(i) => i.serialize(buf), + CqlValue::BigInt(i) => i.serialize(buf), + CqlValue::Inet(i) => i.serialize(buf), + CqlValue::SmallInt(s) => s.serialize(buf), + CqlValue::TinyInt(t) => t.serialize(buf), + CqlValue::Timeuuid(t) => t.serialize(buf), + CqlValue::Uuid(u) => u.serialize(buf), + CqlValue::Varint(v) => v.serialize(buf), + + CqlValue::Empty => serialize_empty(buf), + } } } -} -macro_rules! impl_value_for_tuple { + macro_rules! impl_value_for_tuple { ( $($Ti:ident),* ; $($FieldI:tt),* ) => { impl<$($Ti),+> Value for ($($Ti,)+) where @@ -1437,126 +1448,126 @@ macro_rules! impl_value_for_tuple { } } -impl_value_for_tuple!(T0; 0); -impl_value_for_tuple!(T0, T1; 0, 1); -impl_value_for_tuple!(T0, T1, T2; 0, 1, 2); -impl_value_for_tuple!(T0, T1, T2, T3; 0, 1, 2, 3); -impl_value_for_tuple!(T0, T1, T2, T3, T4; 0, 1, 2, 3, 4); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5; 0, 1, 2, 3, 4, 5); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6; 0, 1, 2, 3, 4, 5, 6); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7; 0, 1, 2, 3, 4, 5, 6, 7); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8; 0, 1, 2, 3, 4, 5, 6, 7, 8); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9; + impl_value_for_tuple!(T0; 0); + impl_value_for_tuple!(T0, T1; 0, 1); + impl_value_for_tuple!(T0, T1, T2; 0, 1, 2); + impl_value_for_tuple!(T0, T1, T2, T3; 0, 1, 2, 3); + impl_value_for_tuple!(T0, T1, T2, T3, T4; 0, 1, 2, 3, 4); + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5; 0, 1, 2, 3, 4, 5); + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6; 0, 1, 2, 3, 4, 5, 6); + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7; 0, 1, 2, 3, 4, 5, 6, 7); + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8; 0, 1, 2, 3, 4, 5, 6, 7, 8); + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10; + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11; + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12; + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13; + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14; + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14); -impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15; + impl_value_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); -// -// ValueList impls -// + // + // ValueList impls + // -// Implement ValueList for the unit type -impl ValueList for () { - fn serialized(&self) -> SerializedResult<'_> { - Ok(Cow::Owned(LegacySerializedValues::new())) + // Implement ValueList for the unit type + impl ValueList for () { + fn serialized(&self) -> SerializedResult<'_> { + Ok(Cow::Owned(LegacySerializedValues::new())) + } } -} -// Implement ValueList for &[] - u8 because otherwise rust can't infer type -impl ValueList for [u8; 0] { - fn serialized(&self) -> SerializedResult<'_> { - Ok(Cow::Owned(LegacySerializedValues::new())) + // Implement ValueList for &[] - u8 because otherwise rust can't infer type + impl ValueList for [u8; 0] { + fn serialized(&self) -> SerializedResult<'_> { + Ok(Cow::Owned(LegacySerializedValues::new())) + } } -} -// Implement ValueList for slices of Value types -impl ValueList for &[T] { - fn serialized(&self) -> SerializedResult<'_> { - let size = std::mem::size_of_val(*self); - let mut result = LegacySerializedValues::with_capacity(size); - for val in *self { - result.add_value(val)?; - } + // Implement ValueList for slices of Value types + impl ValueList for &[T] { + fn serialized(&self) -> SerializedResult<'_> { + let size = std::mem::size_of_val(*self); + let mut result = LegacySerializedValues::with_capacity(size); + for val in *self { + result.add_value(val)?; + } - Ok(Cow::Owned(result)) + Ok(Cow::Owned(result)) + } } -} -// Implement ValueList for Vec -impl ValueList for Vec { - fn serialized(&self) -> SerializedResult<'_> { - let slice = self.as_slice(); - let size = std::mem::size_of_val(slice); - let mut result = LegacySerializedValues::with_capacity(size); - for val in self { - result.add_value(val)?; - } + // Implement ValueList for Vec + impl ValueList for Vec { + fn serialized(&self) -> SerializedResult<'_> { + let slice = self.as_slice(); + let size = std::mem::size_of_val(slice); + let mut result = LegacySerializedValues::with_capacity(size); + for val in self { + result.add_value(val)?; + } - Ok(Cow::Owned(result)) + Ok(Cow::Owned(result)) + } } -} -// Implement ValueList for maps, which serializes named values -macro_rules! impl_value_list_for_btree_map { - ($key_type:ty) => { - impl ValueList for BTreeMap<$key_type, T> { - fn serialized(&self) -> SerializedResult<'_> { - let mut result = LegacySerializedValues::with_capacity(self.len()); - for (key, val) in self { - result.add_named_value(key, val)?; - } + // Implement ValueList for maps, which serializes named values + macro_rules! impl_value_list_for_btree_map { + ($key_type:ty) => { + impl ValueList for BTreeMap<$key_type, T> { + fn serialized(&self) -> SerializedResult<'_> { + let mut result = LegacySerializedValues::with_capacity(self.len()); + for (key, val) in self { + result.add_named_value(key, val)?; + } - Ok(Cow::Owned(result)) + Ok(Cow::Owned(result)) + } } - } - }; -} + }; + } -// Implement ValueList for maps, which serializes named values -macro_rules! impl_value_list_for_hash_map { - ($key_type:ty) => { - impl ValueList for HashMap<$key_type, T, S> { - fn serialized(&self) -> SerializedResult<'_> { - let mut result = LegacySerializedValues::with_capacity(self.len()); - for (key, val) in self { - result.add_named_value(key, val)?; - } + // Implement ValueList for maps, which serializes named values + macro_rules! impl_value_list_for_hash_map { + ($key_type:ty) => { + impl ValueList for HashMap<$key_type, T, S> { + fn serialized(&self) -> SerializedResult<'_> { + let mut result = LegacySerializedValues::with_capacity(self.len()); + for (key, val) in self { + result.add_named_value(key, val)?; + } - Ok(Cow::Owned(result)) + Ok(Cow::Owned(result)) + } } - } - }; -} + }; + } -impl_value_list_for_hash_map!(String); -impl_value_list_for_hash_map!(&str); -impl_value_list_for_btree_map!(String); -impl_value_list_for_btree_map!(&str); + impl_value_list_for_hash_map!(String); + impl_value_list_for_hash_map!(&str); + impl_value_list_for_btree_map!(String); + impl_value_list_for_btree_map!(&str); -// Implement ValueList for tuples of Values of size up to 16 + // Implement ValueList for tuples of Values of size up to 16 -// Here is an example implementation for (T0, ) -// Further variants are done using a macro -impl ValueList for (T0,) { - fn serialized(&self) -> SerializedResult<'_> { - let size = std::mem::size_of_val(self); - let mut result = LegacySerializedValues::with_capacity(size); - result.add_value(&self.0)?; - Ok(Cow::Owned(result)) + // Here is an example implementation for (T0, ) + // Further variants are done using a macro + impl ValueList for (T0,) { + fn serialized(&self) -> SerializedResult<'_> { + let size = std::mem::size_of_val(self); + let mut result = LegacySerializedValues::with_capacity(size); + result.add_value(&self.0)?; + Ok(Cow::Owned(result)) + } } -} -macro_rules! impl_value_list_for_tuple { + macro_rules! impl_value_list_for_tuple { ( $($Ti:ident),* ; $($FieldI:tt),*) => { impl<$($Ti),+> ValueList for ($($Ti,)+) where @@ -1574,131 +1585,143 @@ macro_rules! impl_value_list_for_tuple { } } -impl_value_list_for_tuple!(T0, T1; 0, 1); -impl_value_list_for_tuple!(T0, T1, T2; 0, 1, 2); -impl_value_list_for_tuple!(T0, T1, T2, T3; 0, 1, 2, 3); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4; 0, 1, 2, 3, 4); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5; 0, 1, 2, 3, 4, 5); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6; 0, 1, 2, 3, 4, 5, 6); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7; 0, 1, 2, 3, 4, 5, 6, 7); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8; 0, 1, 2, 3, 4, 5, 6, 7, 8); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10; + impl_value_list_for_tuple!(T0, T1; 0, 1); + impl_value_list_for_tuple!(T0, T1, T2; 0, 1, 2); + impl_value_list_for_tuple!(T0, T1, T2, T3; 0, 1, 2, 3); + impl_value_list_for_tuple!(T0, T1, T2, T3, T4; 0, 1, 2, 3, 4); + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5; 0, 1, 2, 3, 4, 5); + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6; 0, 1, 2, 3, 4, 5, 6); + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7; 0, 1, 2, 3, 4, 5, 6, 7); + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8; 0, 1, 2, 3, 4, 5, 6, 7, 8); + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11; + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12; + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13; + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14; + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14); -impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15; + impl_value_list_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); -// Every &impl ValueList should also implement ValueList -impl ValueList for &T { - fn serialized(&self) -> SerializedResult<'_> { - ::serialized(*self) + // Every &impl ValueList should also implement ValueList + impl ValueList for &T { + fn serialized(&self) -> SerializedResult<'_> { + ::serialized(*self) + } } -} -impl ValueList for LegacySerializedValues { - fn serialized(&self) -> SerializedResult<'_> { - Ok(Cow::Borrowed(self)) + impl ValueList for LegacySerializedValues { + fn serialized(&self) -> SerializedResult<'_> { + Ok(Cow::Borrowed(self)) + } } -} -impl<'b> ValueList for Cow<'b, LegacySerializedValues> { - fn serialized(&self) -> SerializedResult<'_> { - Ok(Cow::Borrowed(self.as_ref())) + impl ValueList for Cow<'_, LegacySerializedValues> { + fn serialized(&self) -> SerializedResult<'_> { + Ok(Cow::Borrowed(self.as_ref())) + } } -} -// -// BatchValues impls -// + // + // BatchValues impls + // -/// Implements `BatchValues` from an `Iterator` over references to things that implement `ValueList` -/// -/// This is to avoid requiring allocating a new `Vec` containing all the `ValueList`s directly: -/// with this, one can write: -/// `session.batch(&batch, BatchValuesFromIterator::from(lines_to_insert.iter().map(|l| &l.value_list)))` -/// where `lines_to_insert` may also contain e.g. data to pick the statement... -/// -/// The underlying iterator will always be cloned at least once, once to compute the length if it can't be known -/// in advance, and be re-cloned at every retry. -/// It is consequently expected that the provided iterator is cheap to clone (e.g. `slice.iter().map(...)`). -pub struct LegacyBatchValuesFromIter<'a, IT> { - it: IT, - _spooky: std::marker::PhantomData<&'a ()>, -} + /// Implements `BatchValues` from an `Iterator` over references to things that implement `ValueList` + /// + /// This is to avoid requiring allocating a new `Vec` containing all the `ValueList`s directly: + /// with this, one can write: + /// `session.batch(&batch, BatchValuesFromIterator::from(lines_to_insert.iter().map(|l| &l.value_list)))` + /// where `lines_to_insert` may also contain e.g. data to pick the statement... + /// + /// The underlying iterator will always be cloned at least once, once to compute the length if it can't be known + /// in advance, and be re-cloned at every retry. + /// It is consequently expected that the provided iterator is cheap to clone (e.g. `slice.iter().map(...)`). + pub struct LegacyBatchValuesFromIter<'a, IT> { + it: IT, + _spooky: std::marker::PhantomData<&'a ()>, + } -impl<'a, IT, VL> LegacyBatchValuesFromIter<'a, IT> -where - IT: Iterator + Clone, - VL: ValueList + 'a, -{ - pub fn new(into_iter: impl IntoIterator) -> Self { - Self { - it: into_iter.into_iter(), - _spooky: std::marker::PhantomData, + impl<'a, IT, VL> LegacyBatchValuesFromIter<'a, IT> + where + IT: Iterator + Clone, + VL: ValueList + 'a, + { + pub fn new(into_iter: impl IntoIterator) -> Self { + Self { + it: into_iter.into_iter(), + _spooky: std::marker::PhantomData, + } } } -} -impl<'a, IT, VL> From for LegacyBatchValuesFromIter<'a, IT> -where - IT: Iterator + Clone, - VL: ValueList + 'a, -{ - fn from(it: IT) -> Self { - Self::new(it) + impl<'a, IT, VL> From for LegacyBatchValuesFromIter<'a, IT> + where + IT: Iterator + Clone, + VL: ValueList + 'a, + { + fn from(it: IT) -> Self { + Self::new(it) + } } -} -impl<'a, IT, VL> LegacyBatchValues for LegacyBatchValuesFromIter<'a, IT> -where - IT: Iterator + Clone, - VL: ValueList + 'a, -{ - type LegacyBatchValuesIter<'r> = LegacyBatchValuesIteratorFromIterator where Self: 'r; - fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { - self.it.clone().into() + impl<'a, IT, VL> LegacyBatchValues for LegacyBatchValuesFromIter<'a, IT> + where + IT: Iterator + Clone, + VL: ValueList + 'a, + { + type LegacyBatchValuesIter<'r> + = LegacyBatchValuesIteratorFromIterator + where + Self: 'r; + fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { + self.it.clone().into() + } } -} -// Implement BatchValues for slices of ValueList types -impl LegacyBatchValues for [T] { - type LegacyBatchValuesIter<'r> = LegacyBatchValuesIteratorFromIterator> where Self: 'r; - fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { - self.iter().into() + // Implement BatchValues for slices of ValueList types + impl LegacyBatchValues for [T] { + type LegacyBatchValuesIter<'r> + = LegacyBatchValuesIteratorFromIterator> + where + Self: 'r; + fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { + self.iter().into() + } } -} -// Implement BatchValues for Vec -impl LegacyBatchValues for Vec { - type LegacyBatchValuesIter<'r> = LegacyBatchValuesIteratorFromIterator> where Self: 'r; - fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { - LegacyBatchValues::batch_values_iter(self.as_slice()) + // Implement BatchValues for Vec + impl LegacyBatchValues for Vec { + type LegacyBatchValuesIter<'r> + = LegacyBatchValuesIteratorFromIterator> + where + Self: 'r; + fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { + LegacyBatchValues::batch_values_iter(self.as_slice()) + } } -} -// Here is an example implementation for (T0, ) -// Further variants are done using a macro -impl LegacyBatchValues for (T0,) { - type LegacyBatchValuesIter<'r> = LegacyBatchValuesIteratorFromIterator> where Self: 'r; - fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { - std::iter::once(&self.0).into() + // Here is an example implementation for (T0, ) + // Further variants are done using a macro + impl LegacyBatchValues for (T0,) { + type LegacyBatchValuesIter<'r> + = LegacyBatchValuesIteratorFromIterator> + where + Self: 'r; + fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { + std::iter::once(&self.0).into() + } } -} -pub struct TupleValuesIter<'a, T> { - tuple: &'a T, - idx: usize, -} + pub struct TupleValuesIter<'a, T> { + tuple: &'a T, + idx: usize, + } -macro_rules! impl_batch_values_for_tuple { + macro_rules! impl_batch_values_for_tuple { ( $($Ti:ident),* ; $($FieldI:tt),* ; $TupleSize:tt) => { impl<$($Ti),+> LegacyBatchValues for ($($Ti,)+) where @@ -1751,98 +1774,114 @@ macro_rules! impl_batch_values_for_tuple { } } -impl_batch_values_for_tuple!(T0, T1; 0, 1; 2); -impl_batch_values_for_tuple!(T0, T1, T2; 0, 1, 2; 3); -impl_batch_values_for_tuple!(T0, T1, T2, T3; 0, 1, 2, 3; 4); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4; 0, 1, 2, 3, 4; 5); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5; 0, 1, 2, 3, 4, 5; 6); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6; 0, 1, 2, 3, 4, 5, 6; 7); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7; 0, 1, 2, 3, 4, 5, 6, 7; 8); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8; 0, 1, 2, 3, 4, 5, 6, 7, 8; 9); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9; + impl_batch_values_for_tuple!(T0, T1; 0, 1; 2); + impl_batch_values_for_tuple!(T0, T1, T2; 0, 1, 2; 3); + impl_batch_values_for_tuple!(T0, T1, T2, T3; 0, 1, 2, 3; 4); + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4; 0, 1, 2, 3, 4; 5); + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5; 0, 1, 2, 3, 4, 5; 6); + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6; 0, 1, 2, 3, 4, 5, 6; 7); + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7; 0, 1, 2, 3, 4, 5, 6, 7; 8); + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8; 0, 1, 2, 3, 4, 5, 6, 7, 8; 9); + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9; 10); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10; + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10; 11); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11; + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11; 12); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12; + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12; 13); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13; + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13; 14); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14; + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14; 15); -impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15; + impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15; 16); -// Every &impl BatchValues should also implement BatchValues -impl<'a, T: LegacyBatchValues + ?Sized> LegacyBatchValues for &'a T { - type LegacyBatchValuesIter<'r> = ::LegacyBatchValuesIter<'r> where Self: 'r; - fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { - ::batch_values_iter(*self) + // Every &impl BatchValues should also implement BatchValues + impl LegacyBatchValues for &T { + type LegacyBatchValuesIter<'r> + = ::LegacyBatchValuesIter<'r> + where + Self: 'r; + fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { + ::batch_values_iter(*self) + } } -} -/// Allows reusing already-serialized first value -/// -/// We'll need to build a `LegacySerializedValues` for the first ~`ValueList` of a batch to figure out the shard (#448). -/// Once that is done, we can use that instead of re-serializing. -/// -/// This struct implements both `BatchValues` and `BatchValuesIterator` for that purpose -pub struct LegacyBatchValuesFirstSerialized<'f, T> { - first: Option<&'f LegacySerializedValues>, - rest: T, -} - -impl<'f, T: LegacyBatchValues> LegacyBatchValuesFirstSerialized<'f, T> { - pub fn new( - batch_values: T, - already_serialized_first: Option<&'f LegacySerializedValues>, - ) -> Self { - Self { - first: already_serialized_first, - rest: batch_values, + /// Allows reusing already-serialized first value + /// + /// We'll need to build a `LegacySerializedValues` for the first ~`ValueList` of a batch to figure out the shard (#448). + /// Once that is done, we can use that instead of re-serializing. + /// + /// This struct implements both `BatchValues` and `BatchValuesIterator` for that purpose + pub struct LegacyBatchValuesFirstSerialized<'f, T> { + first: Option<&'f LegacySerializedValues>, + rest: T, + } + + impl<'f, T: LegacyBatchValues> LegacyBatchValuesFirstSerialized<'f, T> { + pub fn new( + batch_values: T, + already_serialized_first: Option<&'f LegacySerializedValues>, + ) -> Self { + Self { + first: already_serialized_first, + rest: batch_values, + } } } -} -impl<'f, BV: LegacyBatchValues> LegacyBatchValues for LegacyBatchValuesFirstSerialized<'f, BV> { - type LegacyBatchValuesIter<'r> = - LegacyBatchValuesFirstSerialized<'f, ::LegacyBatchValuesIter<'r>> where Self: 'r; - fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { - LegacyBatchValuesFirstSerialized { - first: self.first, - rest: self.rest.batch_values_iter(), + impl<'f, BV: LegacyBatchValues> LegacyBatchValues for LegacyBatchValuesFirstSerialized<'f, BV> { + type LegacyBatchValuesIter<'r> + = LegacyBatchValuesFirstSerialized< + 'f, + ::LegacyBatchValuesIter<'r>, + > + where + Self: 'r; + fn batch_values_iter(&self) -> Self::LegacyBatchValuesIter<'_> { + LegacyBatchValuesFirstSerialized { + first: self.first, + rest: self.rest.batch_values_iter(), + } } } -} -impl<'a, 'f: 'a, IT: LegacyBatchValuesIterator<'a>> LegacyBatchValuesIterator<'a> - for LegacyBatchValuesFirstSerialized<'f, IT> -{ - fn next_serialized(&mut self) -> Option> { - match self.first.take() { - Some(first) => { - self.rest.skip_next(); - Some(Ok(Cow::Borrowed(first))) + impl<'a, 'f: 'a, IT: LegacyBatchValuesIterator<'a>> LegacyBatchValuesIterator<'a> + for LegacyBatchValuesFirstSerialized<'f, IT> + { + fn next_serialized(&mut self) -> Option> { + match self.first.take() { + Some(first) => { + self.rest.skip_next(); + Some(Ok(Cow::Borrowed(first))) + } + None => self.rest.next_serialized(), } - None => self.rest.next_serialized(), - } - } - fn write_next_to_request( - &mut self, - buf: &mut impl BufMut, - ) -> Option> { - match self.first.take() { - Some(first) => { - self.rest.skip_next(); - first.write_to_request(buf); - Some(Ok(())) + } + fn write_next_to_request( + &mut self, + buf: &mut impl BufMut, + ) -> Option> { + match self.first.take() { + Some(first) => { + self.rest.skip_next(); + first.write_to_request(buf); + Some(Ok(())) + } + None => self.rest.write_next_to_request(buf), } - None => self.rest.write_next_to_request(buf), } - } - fn skip_next(&mut self) -> Option<()> { - self.rest.skip_next(); - self.first.take().map(|_| ()) + fn skip_next(&mut self) -> Option<()> { + self.rest.skip_next(); + self.first.take().map(|_| ()) + } } } +#[allow(deprecated)] +pub use legacy::{ + LegacyBatchValues, LegacyBatchValuesFirstSerialized, LegacyBatchValuesFromIter, + LegacyBatchValuesIterator, LegacyBatchValuesIteratorFromIterator, LegacySerializedValues, + LegacySerializedValuesIterator, SerializeValuesError, SerializedResult, TupleValuesIter, Value, + ValueList, ValueTooBig, +}; diff --git a/scylla-cql/src/frame/value_tests.rs b/scylla-cql/src/frame/value_tests.rs index 62d998cbf3..a2df53a4d1 100644 --- a/scylla-cql/src/frame/value_tests.rs +++ b/scylla-cql/src/frame/value_tests.rs @@ -1,3 +1,6 @@ +// TODO: remove this once deprecated items are deleted. +#![allow(deprecated)] + use crate::frame::value::{CqlTimeuuid, CqlVarint}; use crate::frame::{response::result::CqlValue, types::RawValue, value::LegacyBatchValuesIterator}; use crate::types::serialize::batch::{BatchValues, BatchValuesIterator, LegacyBatchValuesAdapter}; diff --git a/scylla-cql/src/lib.rs b/scylla-cql/src/lib.rs index 228fc43f89..09a4e56d70 100644 --- a/scylla-cql/src/lib.rs +++ b/scylla-cql/src/lib.rs @@ -16,7 +16,9 @@ pub mod macros { #[allow(deprecated)] pub use crate::impl_from_cql_value_from_method; + #[allow(deprecated)] pub use crate::impl_serialize_row_via_value_list; + #[allow(deprecated)] pub use crate::impl_serialize_value_via_value; } @@ -35,6 +37,7 @@ pub mod _macro_internal { FromCqlVal, FromCqlValError, FromRow, FromRowError, }; pub use crate::frame::response::result::{ColumnSpec, ColumnType, CqlValue, Row}; + #[allow(deprecated)] pub use crate::frame::value::{ LegacySerializedValues, SerializedResult, Value, ValueList, ValueTooBig, }; diff --git a/scylla-cql/src/types/deserialize/result.rs b/scylla-cql/src/types/deserialize/result.rs index b6ad7a5590..d8c5450338 100644 --- a/scylla-cql/src/types/deserialize/result.rs +++ b/scylla-cql/src/types/deserialize/result.rs @@ -255,8 +255,12 @@ mod tests { fn lend_next(&mut self) -> Option, DeserializationError>>; } + // Disable the lint, if there is more than one lifetime included. + // Can be removed once https://github.com/rust-lang/rust-clippy/issues/12495 is fixed. + #[allow(clippy::needless_lifetimes)] impl<'frame, 'metadata> LendingIterator for RawRowIterator<'frame, 'metadata> { - type Item<'borrow> = ColumnIterator<'borrow, 'borrow> + type Item<'borrow> + = ColumnIterator<'borrow, 'borrow> where Self: 'borrow; diff --git a/scylla-cql/src/types/serialize/batch.rs b/scylla-cql/src/types/serialize/batch.rs index 2425b90737..aff43b990e 100644 --- a/scylla-cql/src/types/serialize/batch.rs +++ b/scylla-cql/src/types/serialize/batch.rs @@ -4,6 +4,7 @@ // Note: When editing above doc-comment edit the corresponding comment on // re-export module in scylla crate too. +#[allow(deprecated)] use crate::frame::value::{LegacyBatchValues, LegacyBatchValuesIterator}; use super::row::{RowSerializationContext, SerializeRow}; @@ -166,7 +167,10 @@ where IT: Iterator + Clone, SR: SerializeRow + 'sr, { - type BatchValuesIter<'r> = BatchValuesIteratorFromIterator where Self: 'r; + type BatchValuesIter<'r> + = BatchValuesIteratorFromIterator + where + Self: 'r; #[inline] fn batch_values_iter(&self) -> Self::BatchValuesIter<'_> { @@ -176,7 +180,10 @@ where // Implement BatchValues for slices of SerializeRow types impl BatchValues for [T] { - type BatchValuesIter<'r> = BatchValuesIteratorFromIterator> where Self: 'r; + type BatchValuesIter<'r> + = BatchValuesIteratorFromIterator> + where + Self: 'r; #[inline] fn batch_values_iter(&self) -> Self::BatchValuesIter<'_> { @@ -186,7 +193,10 @@ impl BatchValues for [T] { // Implement BatchValues for Vec impl BatchValues for Vec { - type BatchValuesIter<'r> = BatchValuesIteratorFromIterator> where Self: 'r; + type BatchValuesIter<'r> + = BatchValuesIteratorFromIterator> + where + Self: 'r; #[inline] fn batch_values_iter(&self) -> Self::BatchValuesIter<'_> { @@ -197,7 +207,10 @@ impl BatchValues for Vec { // Here is an example implementation for (T0, ) // Further variants are done using a macro impl BatchValues for (T0,) { - type BatchValuesIter<'r> = BatchValuesIteratorFromIterator> where Self: 'r; + type BatchValuesIter<'r> + = BatchValuesIteratorFromIterator> + where + Self: 'r; #[inline] fn batch_values_iter(&self) -> Self::BatchValuesIter<'_> { @@ -302,8 +315,11 @@ impl_batch_values_for_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15; 16); // Every &impl BatchValues should also implement BatchValues -impl<'a, T: BatchValues + ?Sized> BatchValues for &'a T { - type BatchValuesIter<'r> = ::BatchValuesIter<'r> where Self: 'r; +impl BatchValues for &T { + type BatchValuesIter<'r> + = ::BatchValuesIter<'r> + where + Self: 'r; #[inline] fn batch_values_iter(&self) -> Self::BatchValuesIter<'_> { @@ -317,13 +333,19 @@ impl<'a, T: BatchValues + ?Sized> BatchValues for &'a T { /// Note that the [`LegacyBatchValues`] trait is deprecated and will be /// removed in the future, and you should prefer using [`BatchValues`] as it is /// more type-safe. +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] pub struct LegacyBatchValuesAdapter(pub T); +#[allow(deprecated)] impl BatchValues for LegacyBatchValuesAdapter where T: LegacyBatchValues, { - type BatchValuesIter<'r> = LegacyBatchValuesIteratorAdapter> + type BatchValuesIter<'r> + = LegacyBatchValuesIteratorAdapter> where Self: 'r; @@ -335,8 +357,13 @@ where /// A newtype wrapper which adjusts an existing types that implement /// [`LegacyBatchValuesIterator`] to the current [`BatchValuesIterator`] API. +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] pub struct LegacyBatchValuesIteratorAdapter(pub T); +#[allow(deprecated)] impl<'r, T> BatchValuesIterator<'r> for LegacyBatchValuesIteratorAdapter where T: LegacyBatchValuesIterator<'r>, diff --git a/scylla-cql/src/types/serialize/mod.rs b/scylla-cql/src/types/serialize/mod.rs index 3bc3424dc0..8d63e32041 100644 --- a/scylla-cql/src/types/serialize/mod.rs +++ b/scylla-cql/src/types/serialize/mod.rs @@ -32,6 +32,8 @@ pub use writers::{CellValueBuilder, CellWriter, RowWriter}; /// a list of named values encoded with the legacy `ValueList` trait is passed /// as an argument to the statement, and rewriting it using the new /// `SerializeRow` interface fails. +// TODO: remove mentions about legacy errors from the above description when +// they are removed. #[derive(Debug, Clone, Error)] #[error("SerializationError: {0}")] pub struct SerializationError(Arc); diff --git a/scylla-cql/src/types/serialize/raw_batch.rs b/scylla-cql/src/types/serialize/raw_batch.rs index e378f42dcb..6c755c5e88 100644 --- a/scylla-cql/src/types/serialize/raw_batch.rs +++ b/scylla-cql/src/types/serialize/raw_batch.rs @@ -52,17 +52,14 @@ pub trait RawBatchValuesIterator<'a> { where Self: Sized, { - let mut count = 0; - while self.skip_next().is_some() { - count += 1; - } - count + std::iter::from_fn(|| self.skip_next()).count() } } // An implementation used by `scylla-proxy` impl RawBatchValues for Vec { - type RawBatchValuesIter<'r> = std::slice::Iter<'r, SerializedValues> + type RawBatchValuesIter<'r> + = std::slice::Iter<'r, SerializedValues> where Self: 'r; @@ -117,7 +114,8 @@ where BV: BatchValues, CTX: Iterator> + Clone, { - type RawBatchValuesIter<'r> = RawBatchValuesIteratorAdapter, CTX> + type RawBatchValuesIter<'r> + = RawBatchValuesIteratorAdapter, CTX> where Self: 'r; @@ -143,19 +141,26 @@ where { #[inline] fn serialize_next(&mut self, writer: &mut RowWriter) -> Option> { - let ctx = self.contexts.next()?; + // We do `unwrap_or` because we want the iterator length to be the same + // as the amount of values. Limiting to length of the amount of + // statements (contexts) causes the caller to not be able to correctly + // detect that amount of statements and values is different. + let ctx = self + .contexts + .next() + .unwrap_or(RowSerializationContext::empty()); self.batch_values_iterator.serialize_next(&ctx, writer) } fn is_empty_next(&mut self) -> Option { - self.contexts.next()?; + let _ = self.contexts.next(); let ret = self.batch_values_iterator.is_empty_next()?; Some(ret) } #[inline] fn skip_next(&mut self) -> Option<()> { - self.contexts.next()?; + let _ = self.contexts.next(); self.batch_values_iterator.skip_next()?; Some(()) } diff --git a/scylla-cql/src/types/serialize/row.rs b/scylla-cql/src/types/serialize/row.rs index 09d3ba43ce..cf05398fda 100644 --- a/scylla-cql/src/types/serialize/row.rs +++ b/scylla-cql/src/types/serialize/row.rs @@ -16,7 +16,7 @@ use crate::frame::request::RequestDeserializationError; use crate::frame::response::result::ColumnType; use crate::frame::response::result::PreparedMetadata; use crate::frame::types; -use crate::frame::value::SerializeValuesError; +#[allow(deprecated)] use crate::frame::value::{LegacySerializedValues, ValueList}; use crate::frame::{response::result::ColumnSpec, types::RawValue}; @@ -282,11 +282,13 @@ impl SerializeRow for &T { } } +#[allow(deprecated)] impl SerializeRow for LegacySerializedValues { fallback_impl_contents!(); } -impl<'b> SerializeRow for Cow<'b, LegacySerializedValues> { +#[allow(deprecated)] +impl SerializeRow for Cow<'_, LegacySerializedValues> { fallback_impl_contents!(); } @@ -409,6 +411,10 @@ impl_tuples!( /// } /// impl_serialize_row_via_value_list!(WithGenerics); /// ``` +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] #[macro_export] macro_rules! impl_serialize_row_via_value_list { ($t:ident$(<$($targ:tt $(: $tbound:tt)?),*>)?) => { @@ -440,8 +446,13 @@ macro_rules! impl_serialize_row_via_value_list { /// /// See the [`impl_serialize_row_via_value_list`] macro on information about /// the properties of the [`SerializeRow`] implementation. +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] pub struct ValueListAdapter(pub T); +#[allow(deprecated)] impl SerializeRow for ValueListAdapter where T: ValueList, @@ -482,6 +493,11 @@ where /// /// See [`impl_serialize_row_via_value_list`] which generates a boilerplate /// [`SerializeRow`] implementation that uses this function. +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] +#[allow(deprecated)] pub fn serialize_legacy_row( r: &T, ctx: &RowSerializationContext<'_>, @@ -671,6 +687,8 @@ pub enum BuiltinSerializationErrorKind { /// The error that caused the column serialization to fail. err: SerializationError, }, + /// Too many values to add, max 65,535 values can be sent in a request. + TooManyValues, } impl Display for BuiltinSerializationErrorKind { @@ -679,6 +697,12 @@ impl Display for BuiltinSerializationErrorKind { BuiltinSerializationErrorKind::ColumnSerializationFailed { name, err } => { write!(f, "failed to serialize column {name}: {err}") } + BuiltinSerializationErrorKind::TooManyValues => { + write!( + f, + "Too many values to add, max 65,535 values can be sent in a request" + ) + } } } } @@ -686,6 +710,11 @@ impl Display for BuiltinSerializationErrorKind { /// Describes a failure to translate the output of the [`ValueList`] legacy trait /// into an output of the [`SerializeRow`] trait. #[derive(Error, Debug)] +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] +#[allow(deprecated)] pub enum ValueListToSerializeRowAdapterError { /// The values generated by the [`ValueList`] trait were provided in /// name-value pairs, and there is a column in the statement for which @@ -750,9 +779,9 @@ impl SerializedValues { let element_count = match writer.value_count().try_into() { Ok(n) => n, Err(_) => { - return Err(SerializationError(Arc::new( - SerializeValuesError::TooManyValues, - ))) + return Err(SerializationError(Arc::new(mk_ser_err::( + BuiltinSerializationErrorKind::TooManyValues, + )))); } }; @@ -808,9 +837,9 @@ impl SerializedValues { typ: &ColumnType, ) -> Result<(), SerializationError> { if self.element_count() == u16::MAX { - return Err(SerializationError(Arc::new( - SerializeValuesError::TooManyValues, - ))); + return Err(SerializationError(Arc::new(mk_ser_err::( + BuiltinSerializationErrorKind::TooManyValues, + )))); } let len_before_serialize: usize = self.serialized_values.len(); @@ -921,7 +950,9 @@ pub(crate) mod tests { use crate::frame::response::result::{ColumnSpec, ColumnType, TableSpec}; use crate::frame::types::RawValue; + #[allow(deprecated)] use crate::frame::value::{LegacySerializedValues, MaybeUnset, SerializedResult, ValueList}; + #[allow(deprecated)] use crate::types::serialize::row::ValueListAdapter; use crate::types::serialize::{RowWriter, SerializationError}; @@ -938,6 +969,7 @@ pub(crate) mod tests { ColumnSpec::borrowed(name, typ, TableSpec::borrowed("ks", "tbl")) } + #[allow(deprecated)] #[test] fn test_legacy_fallback() { let row = ( @@ -967,6 +999,7 @@ pub(crate) mod tests { assert_eq!(&legacy_data[2..], new_data); } + #[allow(deprecated)] #[test] fn test_legacy_fallback_with_names() { let sorted_row = ( @@ -1056,6 +1089,7 @@ pub(crate) mod tests { ColumnSpec::borrowed(name, typ, TableSpec::borrowed("ks", "tbl")) } + #[allow(deprecated)] #[test] fn test_legacy_wrapper() { struct Foo; @@ -1132,7 +1166,10 @@ pub(crate) mod tests { let err = do_serialize_err(v, &spec); let err = get_ser_err(&err); assert_eq!(err.rust_name, std::any::type_name::<(&str, i32)>()); - let BuiltinSerializationErrorKind::ColumnSerializationFailed { name, err: _ } = &err.kind; + let BuiltinSerializationErrorKind::ColumnSerializationFailed { name, err: _ } = &err.kind + else { + panic!("Expected BuiltinSerializationErrorKind::ColumnSerializationFailed") + }; assert_eq!(name, "b"); } @@ -1159,7 +1196,10 @@ pub(crate) mod tests { let err = do_serialize_err(v, &spec); let err = get_ser_err(&err); assert_eq!(err.rust_name, std::any::type_name::>()); - let BuiltinSerializationErrorKind::ColumnSerializationFailed { name, err: _ } = &err.kind; + let BuiltinSerializationErrorKind::ColumnSerializationFailed { name, err: _ } = &err.kind + else { + panic!("Expected BuiltinSerializationErrorKind::ColumnSerializationFailed") + }; assert_eq!(name, "b"); } @@ -1193,7 +1233,10 @@ pub(crate) mod tests { let err = do_serialize_err(v, &spec); let err = get_ser_err(&err); assert_eq!(err.rust_name, std::any::type_name::>()); - let BuiltinSerializationErrorKind::ColumnSerializationFailed { name, err: _ } = &err.kind; + let BuiltinSerializationErrorKind::ColumnSerializationFailed { name, err: _ } = &err.kind + else { + panic!("Expected BuiltinSerializationErrorKind::ColumnSerializationFailed") + }; assert_eq!(name, "b"); } @@ -1568,6 +1611,23 @@ pub(crate) mod tests { assert_eq!(reference, row); } + #[test] + fn test_row_serialization_with_not_rust_idents() { + #[derive(SerializeRow, Debug)] + #[scylla(crate = crate)] + struct RowWithTTL { + #[scylla(rename = "[ttl]")] + ttl: i32, + } + + let spec = [col("[ttl]", ColumnType::Int)]; + + let reference = do_serialize((42i32,), &spec); + let row = do_serialize(RowWithTTL { ttl: 42 }, &spec); + + assert_eq!(reference, row); + } + #[derive(SerializeRow, Debug)] #[scylla(crate = crate)] struct TestRowWithSkippedFields { diff --git a/scylla-cql/src/types/serialize/value.rs b/scylla-cql/src/types/serialize/value.rs index 0e7fba6691..ce6cc19a0e 100644 --- a/scylla-cql/src/types/serialize/value.rs +++ b/scylla-cql/src/types/serialize/value.rs @@ -15,6 +15,7 @@ use uuid::Uuid; use crate::frame::response::result::{ColumnType, CqlValue}; use crate::frame::types::vint_encode; +#[allow(deprecated)] use crate::frame::value::{ Counter, CqlDate, CqlDecimal, CqlDuration, CqlTime, CqlTimestamp, CqlTimeuuid, CqlVarint, MaybeUnset, Unset, Value, @@ -926,6 +927,10 @@ fn serialize_mapping<'t, 'b, K: SerializeValue + 't, V: SerializeValue + 't>( /// } /// impl_serialize_value_via_value!(WithGenerics); /// ``` +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] #[macro_export] macro_rules! impl_serialize_value_via_value { ($t:ident$(<$($targ:tt $(: $tbound:tt)?),*>)?) => { @@ -952,8 +957,13 @@ macro_rules! impl_serialize_value_via_value { /// /// See the [`impl_serialize_value_via_value`] macro on information about /// the properties of the [`SerializeValue`] implementation. +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] pub struct ValueAdapter(pub T); +#[allow(deprecated)] impl SerializeValue for ValueAdapter where T: Value, @@ -981,6 +991,11 @@ where /// /// See [`impl_serialize_value_via_value`] which generates a boilerplate /// [`SerializeValue`] implementation that uses this function. +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] +#[allow(deprecated)] pub fn serialize_legacy_value<'b, T: Value>( v: &T, writer: CellWriter<'b>, @@ -1465,6 +1480,11 @@ impl Display for UdtSerializationErrorKind { /// Describes a failure to translate the output of the [`Value`] legacy trait /// into an output of the [`SerializeValue`] trait. +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] +#[allow(deprecated)] #[derive(Error, Debug)] pub enum ValueToSerializeValueAdapterError { /// The value is too bit to be serialized as it exceeds the maximum 2GB size limit. @@ -1598,7 +1618,9 @@ pub(crate) mod tests { use std::collections::BTreeMap; use crate::frame::response::result::{ColumnType, CqlValue}; + #[allow(deprecated)] use crate::frame::value::{Counter, MaybeUnset, Unset, Value, ValueTooBig}; + #[allow(deprecated)] use crate::types::serialize::value::{ BuiltinSerializationError, BuiltinSerializationErrorKind, BuiltinTypeCheckError, BuiltinTypeCheckErrorKind, MapSerializationErrorKind, MapTypeCheckErrorKind, @@ -1612,6 +1634,7 @@ pub(crate) mod tests { use super::{SerializeValue, UdtSerializationErrorKind, UdtTypeCheckErrorKind}; + #[allow(deprecated)] fn check_compat(v: V) { let mut legacy_data = Vec::new(); ::serialize(&v, &mut legacy_data).unwrap(); @@ -1662,6 +1685,7 @@ pub(crate) mod tests { do_serialize_result(t, typ).unwrap_err() } + #[allow(deprecated)] #[test] fn test_legacy_wrapper() { struct Foo; @@ -2824,4 +2848,32 @@ pub(crate) mod tests { assert_eq!(reference, row); } + + #[test] + fn test_udt_with_non_rust_ident() { + #[derive(SerializeValue, Debug)] + #[scylla(crate = crate)] + struct UdtWithNonRustIdent { + #[scylla(rename = "a$a")] + a: i32, + } + + let typ = ColumnType::UserDefinedType { + type_name: "typ".into(), + keyspace: "ks".into(), + field_types: vec![("a$a".into(), ColumnType::Int)], + }; + let value = UdtWithNonRustIdent { a: 42 }; + + let mut reference = Vec::new(); + // Total length of the struct + reference.extend_from_slice(&8i32.to_be_bytes()); + // Field 'a' + reference.extend_from_slice(&(std::mem::size_of_val(&value.a) as i32).to_be_bytes()); + reference.extend_from_slice(&value.a.to_be_bytes()); + + let udt = do_serialize(value, &typ); + + assert_eq!(reference, udt); + } } diff --git a/scylla-cql/src/types/serialize/writers.rs b/scylla-cql/src/types/serialize/writers.rs index f366d297a0..ad85f41300 100644 --- a/scylla-cql/src/types/serialize/writers.rs +++ b/scylla-cql/src/types/serialize/writers.rs @@ -204,7 +204,7 @@ pub struct WrittenCellProof<'buf> { _phantom: std::marker::PhantomData<*mut &'buf ()>, } -impl<'buf> WrittenCellProof<'buf> { +impl WrittenCellProof<'_> { /// A shorthand for creating the proof. /// /// Do not make it public! It's important that only the row writer defined diff --git a/scylla-macros/Cargo.toml b/scylla-macros/Cargo.toml index 0565c3fd16..b8e71f95b4 100644 --- a/scylla-macros/Cargo.toml +++ b/scylla-macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "scylla-macros" -version = "0.7.0" +version = "0.7.1" edition = "2021" rust-version = "1.70" description = "proc macros for scylla async CQL driver" diff --git a/scylla-macros/src/deserialize/mod.rs b/scylla-macros/src/deserialize/mod.rs index 2eb1819e46..074f37d8b1 100644 --- a/scylla-macros/src/deserialize/mod.rs +++ b/scylla-macros/src/deserialize/mod.rs @@ -115,6 +115,7 @@ where let items = items.into_iter(); parse_quote! { + #[automatically_derived] impl<#frame_lifetime, #metadata_lifetime, #impl_generics> #trait_<#frame_lifetime, #metadata_lifetime> for #struct_name #ty_generics where #(#predicates),* diff --git a/scylla-macros/src/deserialize/row.rs b/scylla-macros/src/deserialize/row.rs index 01ba2a0e4e..ef79fd2acd 100644 --- a/scylla-macros/src/deserialize/row.rs +++ b/scylla-macros/src/deserialize/row.rs @@ -167,7 +167,7 @@ impl StructDesc { struct TypeCheckAssumeOrderGenerator<'sd>(&'sd StructDesc); -impl<'sd> TypeCheckAssumeOrderGenerator<'sd> { +impl TypeCheckAssumeOrderGenerator<'_> { fn generate_name_verification( &self, field_index: usize, // These two indices can be different because of `skip` attribute @@ -267,7 +267,7 @@ impl<'sd> TypeCheckAssumeOrderGenerator<'sd> { struct DeserializeAssumeOrderGenerator<'sd>(&'sd StructDesc); -impl<'sd> DeserializeAssumeOrderGenerator<'sd> { +impl DeserializeAssumeOrderGenerator<'_> { fn generate_finalize_field(&self, field_index: usize, field: &Field) -> syn::Expr { if field.skip { // Skipped fields are initialized with Default::default() @@ -335,7 +335,7 @@ impl<'sd> DeserializeAssumeOrderGenerator<'sd> { struct TypeCheckUnorderedGenerator<'sd>(&'sd StructDesc); -impl<'sd> TypeCheckUnorderedGenerator<'sd> { +impl TypeCheckUnorderedGenerator<'_> { // An identifier for a bool variable that represents whether given // field was already visited during type check fn visited_flag_variable(field: &Field) -> syn::Ident { @@ -480,7 +480,7 @@ impl<'sd> TypeCheckUnorderedGenerator<'sd> { struct DeserializeUnorderedGenerator<'sd>(&'sd StructDesc); -impl<'sd> DeserializeUnorderedGenerator<'sd> { +impl DeserializeUnorderedGenerator<'_> { // An identifier for a variable that is meant to store the parsed variable // before being ultimately moved to the struct on deserialize fn deserialize_field_variable(field: &Field) -> syn::Ident { diff --git a/scylla-macros/src/deserialize/value.rs b/scylla-macros/src/deserialize/value.rs index de7a752e69..c3a7a3cdff 100644 --- a/scylla-macros/src/deserialize/value.rs +++ b/scylla-macros/src/deserialize/value.rs @@ -222,7 +222,7 @@ impl StructDesc { struct TypeCheckAssumeOrderGenerator<'sd>(&'sd StructDesc); -impl<'sd> TypeCheckAssumeOrderGenerator<'sd> { +impl TypeCheckAssumeOrderGenerator<'_> { // Generates name and type validation for given Rust struct's field. fn generate_field_validation(&self, rust_field_idx: usize, field: &Field) -> syn::Expr { let macro_internal = self.0.struct_attrs().macro_internal_path(); @@ -398,7 +398,7 @@ impl<'sd> TypeCheckAssumeOrderGenerator<'sd> { struct DeserializeAssumeOrderGenerator<'sd>(&'sd StructDesc); -impl<'sd> DeserializeAssumeOrderGenerator<'sd> { +impl DeserializeAssumeOrderGenerator<'_> { fn generate_finalize_field(&self, field: &Field) -> syn::Expr { if field.skip { // Skipped fields are initialized with Default::default() @@ -566,7 +566,7 @@ impl<'sd> DeserializeAssumeOrderGenerator<'sd> { struct TypeCheckUnorderedGenerator<'sd>(&'sd StructDesc); -impl<'sd> TypeCheckUnorderedGenerator<'sd> { +impl TypeCheckUnorderedGenerator<'_> { // An identifier for a bool variable that represents whether given // field was already visited during type check fn visited_flag_variable(field: &Field) -> syn::Ident { @@ -730,7 +730,7 @@ impl<'sd> TypeCheckUnorderedGenerator<'sd> { struct DeserializeUnorderedGenerator<'sd>(&'sd StructDesc); -impl<'sd> DeserializeUnorderedGenerator<'sd> { +impl DeserializeUnorderedGenerator<'_> { /// An identifier for a variable that is meant to store the parsed variable /// before being ultimately moved to the struct on deserialize. fn deserialize_field_variable(field: &Field) -> syn::Ident { diff --git a/scylla-macros/src/serialize/row.rs b/scylla-macros/src/serialize/row.rs index a5449c9be6..a1695fa57c 100644 --- a/scylla-macros/src/serialize/row.rs +++ b/scylla-macros/src/serialize/row.rs @@ -102,6 +102,7 @@ pub(crate) fn derive_serialize_row(tokens_input: TokenStream) -> Result { ctx: &'a Context, } -impl<'a> Generator for ColumnSortingGenerator<'a> { +impl Generator for ColumnSortingGenerator<'_> { fn generate_serialize(&self) -> syn::TraitItemFn { // Need to: // - Check that all required columns are there and no more @@ -223,7 +224,7 @@ impl<'a> Generator for ColumnSortingGenerator<'a> { statements.push(self.ctx.generate_mk_ser_err()); // Generate a "visited" flag for each field - let visited_flag_names = rust_field_names + let visited_flag_names = rust_field_idents .iter() .map(|s| syn::Ident::new(&format!("visited_flag_{}", s), Span::call_site())) .collect::>(); @@ -317,7 +318,7 @@ struct ColumnOrderedGenerator<'a> { ctx: &'a Context, } -impl<'a> Generator for ColumnOrderedGenerator<'a> { +impl Generator for ColumnOrderedGenerator<'_> { fn generate_serialize(&self) -> syn::TraitItemFn { let mut statements: Vec = Vec::new(); diff --git a/scylla-macros/src/serialize/value.rs b/scylla-macros/src/serialize/value.rs index 14ca2e7b93..1072e33c8f 100644 --- a/scylla-macros/src/serialize/value.rs +++ b/scylla-macros/src/serialize/value.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use darling::FromAttributes; use proc_macro::TokenStream; -use proc_macro2::Span; use syn::parse_quote; use crate::Flavor; @@ -129,6 +128,7 @@ pub(crate) fn derive_serialize_value( let serialize_item = gen.generate_serialize(); let res = parse_quote! { + #[automatically_derived] impl #impl_generics #implemented_trait for #struct_name #ty_generics #where_clause { #serialize_item } @@ -256,7 +256,7 @@ struct FieldSortingGenerator<'a> { ctx: &'a Context, } -impl<'a> Generator for FieldSortingGenerator<'a> { +impl Generator for FieldSortingGenerator<'_> { fn generate_serialize(&self) -> syn::TraitItemFn { // Need to: // - Check that all required fields are there and no more @@ -327,14 +327,14 @@ impl<'a> Generator for FieldSortingGenerator<'a> { .generate_udt_type_match(parse_quote!(#crate_path::UdtTypeCheckErrorKind::NotUdt)), ); - fn make_visited_flag_ident(field_name: &str) -> syn::Ident { - syn::Ident::new(&format!("visited_flag_{}", field_name), Span::call_site()) + fn make_visited_flag_ident(field_name: &syn::Ident) -> syn::Ident { + syn::Ident::new(&format!("visited_flag_{}", field_name), field_name.span()) } // Generate a "visited" flag for each field - let visited_flag_names = rust_field_names + let visited_flag_names = rust_field_idents .iter() - .map(|s| make_visited_flag_ident(s)) + .map(make_visited_flag_ident) .collect::>(); statements.extend::>(parse_quote! { #(let mut #visited_flag_names = false;)* @@ -347,11 +347,11 @@ impl<'a> Generator for FieldSortingGenerator<'a> { .fields .iter() .filter(|f| !f.attrs.ignore_missing) - .map(|f| f.field_name()); + .map(|f| &f.ident); // An iterator over visited flags of Rust fields that can't be ignored // (i.e., if UDT misses a corresponding field, an error should be raised). let nonignorable_visited_flag_names = - nonignorable_rust_field_names.map(|s| make_visited_flag_ident(&s)); + nonignorable_rust_field_names.map(make_visited_flag_ident); // Generate a variable that counts down visited fields. let field_count = self.ctx.fields.len(); @@ -449,7 +449,7 @@ struct FieldOrderedGenerator<'a> { ctx: &'a Context, } -impl<'a> Generator for FieldOrderedGenerator<'a> { +impl Generator for FieldOrderedGenerator<'_> { fn generate_serialize(&self) -> syn::TraitItemFn { let mut statements: Vec = Vec::new(); diff --git a/scylla-proxy/Cargo.toml b/scylla-proxy/Cargo.toml index 4f4ee627f8..4adae4533a 100644 --- a/scylla-proxy/Cargo.toml +++ b/scylla-proxy/Cargo.toml @@ -14,7 +14,7 @@ license = "MIT OR Apache-2.0" defaults = [] [dependencies] -scylla-cql = { version = "0.4.0", path = "../scylla-cql" } +scylla-cql = { version = "0.4.1", path = "../scylla-cql" } byteorder = "1.3.4" bytes = "1.2.0" futures = "0.3.6" @@ -28,7 +28,7 @@ tokio = { version = "1.34", features = [ "rt-multi-thread", ] } uuid = "1.0" -thiserror = "1.0.32" +thiserror = "2.0.6" bigdecimal = "0.4" num-bigint = "0.3" tracing = "0.1.25" diff --git a/scylla/Cargo.toml b/scylla/Cargo.toml index d5d9551e2d..fb9fadcba2 100644 --- a/scylla/Cargo.toml +++ b/scylla/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "scylla" -version = "0.15.0" +version = "0.15.1" edition = "2021" rust-version = "1.70" description = "Async CQL driver for Rust, optimized for Scylla, fully compatible with Apache Cassandraâ„¢" @@ -41,8 +41,8 @@ full-serialization = [ ] [dependencies] -scylla-macros = { version = "0.7.0", path = "../scylla-macros" } -scylla-cql = { version = "0.4.0", path = "../scylla-cql" } +scylla-macros = { version = "0.7.1", path = "../scylla-macros" } +scylla-cql = { version = "0.4.1", path = "../scylla-cql" } byteorder = "1.3.4" bytes = "1.0.1" futures = "0.3.6" @@ -59,7 +59,7 @@ tokio = { version = "1.34", features = [ snap = "1.0" uuid = { version = "1.0", features = ["v4"] } rand = "0.8.3" -thiserror = "1.0" +thiserror = "2.0.6" itertools = "0.13.0" tracing = "0.1.36" chrono = { version = "0.4.32", default-features = false, features = ["clock"] } @@ -98,4 +98,7 @@ harness = false [lints.rust] unnameable_types = "warn" unreachable_pub = "warn" -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(scylla_cloud_tests)'] } +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(scylla_cloud_tests)', + 'cfg(cpp_rust_unstable)', +] } diff --git a/scylla/src/history.rs b/scylla/src/history.rs index a055f91a39..b80dcf15ec 100644 --- a/scylla/src/history.rs +++ b/scylla/src/history.rs @@ -449,28 +449,21 @@ fn write_fiber_attempts(fiber: &FiberHistory, f: &mut std::fmt::Formatter<'_>) - #[cfg(test)] mod tests { - use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use crate::{ - query::Query, retry_policy::RetryDecision, test_utils::setup_tracing, transport::errors::{DbError, QueryError}, - utils::test_utils::unique_keyspace_name, }; use super::{ AttemptId, AttemptResult, HistoryCollector, HistoryListener, QueryHistoryResult, QueryId, SpeculativeId, StructuredHistory, TimePoint, }; - use crate::test_utils::create_new_session_builder; use assert_matches::assert_matches; use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; - use futures::StreamExt as _; - use scylla_cql::{frame::response::result::Row, Consistency}; + use scylla_cql::Consistency; // Set a single time for all timestamps within StructuredHistory. // HistoryCollector sets the timestamp to current time which changes with each test. @@ -510,53 +503,6 @@ mod tests { history } - // Set a single node for all attempts within StructuredHistory. - // When running against real life nodes this address may change, - // setting it to one value makes it possible to run tests consistently. - fn set_one_node(mut history: StructuredHistory) -> StructuredHistory { - let the_node: SocketAddr = node1_addr(); - - for query in &mut history.queries { - for fiber in std::iter::once(&mut query.non_speculative_fiber) - .chain(query.speculative_fibers.iter_mut()) - { - for attempt in &mut fiber.attempts { - attempt.node_addr = the_node; - } - } - } - - history - } - - // Set a single error message for all DbErrors within StructuredHistory. - // The error message changes between Scylla/Cassandra/their versions. - // Setting it to one value makes it possible to run tests consistently. - fn set_one_db_error_message(mut history: StructuredHistory) -> StructuredHistory { - let set_msg = |err: &mut QueryError| { - if let QueryError::DbError(_, msg) = err { - *msg = "Error message from database".to_string(); - } - }; - - for query in &mut history.queries { - if let Some(QueryHistoryResult::Error(_, err)) = &mut query.result { - set_msg(err); - } - for fiber in std::iter::once(&mut query.non_speculative_fiber) - .chain(query.speculative_fibers.iter_mut()) - { - for attempt in &mut fiber.attempts { - if let Some(AttemptResult::Error(_, err, _)) = &mut attempt.result { - set_msg(err); - } - } - } - } - - history - } - fn node1_addr() -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 19042) } @@ -913,195 +859,4 @@ mod tests { "; assert_eq!(displayed, format!("{}", set_one_time(history))); } - - #[tokio::test] - async fn successful_query_history() { - setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); - - let mut query = Query::new("SELECT * FROM system.local"); - let history_collector = Arc::new(HistoryCollector::new()); - query.set_history_listener(history_collector.clone()); - - session.query_unpaged(query.clone(), ()).await.unwrap(); - - let history: StructuredHistory = history_collector.clone_structured_history(); - - let displayed = "Queries History: -=== Query #0 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Success at 2022-02-22 20:22:22 UTC -| -| Query successful at 2022-02-22 20:22:22 UTC -================= -"; - assert_eq!( - displayed, - format!( - "{}", - set_one_db_error_message(set_one_node(set_one_time(history))) - ) - ); - - // Prepared queries retain the history listener set in Query. - let prepared = session.prepare(query).await.unwrap(); - session.execute_unpaged(&prepared, ()).await.unwrap(); - - let history2: StructuredHistory = history_collector.clone_structured_history(); - - let displayed2 = "Queries History: -=== Query #0 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Success at 2022-02-22 20:22:22 UTC -| -| Query successful at 2022-02-22 20:22:22 UTC -================= -=== Query #1 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Success at 2022-02-22 20:22:22 UTC -| -| Query successful at 2022-02-22 20:22:22 UTC -================= -"; - assert_eq!( - displayed2, - format!( - "{}", - set_one_db_error_message(set_one_node(set_one_time(history2))) - ) - ); - } - - #[tokio::test] - async fn failed_query_history() { - setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); - - let mut query = Query::new("This isnt even CQL"); - let history_collector = Arc::new(HistoryCollector::new()); - query.set_history_listener(history_collector.clone()); - - assert!(session.query_unpaged(query.clone(), ()).await.is_err()); - - let history: StructuredHistory = history_collector.clone_structured_history(); - - let displayed = -"Queries History: -=== Query #0 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Error at 2022-02-22 20:22:22 UTC -| Error: Database returned an error: The submitted query has a syntax error, Error message: Error message from database -| Retry decision: DontRetry -| -| Query failed at 2022-02-22 20:22:22 UTC -| Error: Database returned an error: The submitted query has a syntax error, Error message: Error message from database -================= -"; - assert_eq!( - displayed, - format!( - "{}", - set_one_db_error_message(set_one_node(set_one_time(history))) - ) - ); - } - - #[tokio::test] - async fn iterator_query_history() { - setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); - let ks = unique_keyspace_name(); - session - .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) - .await - .unwrap(); - session.use_keyspace(ks, true).await.unwrap(); - - session - .query_unpaged("CREATE TABLE t (p int primary key)", ()) - .await - .unwrap(); - for i in 0..32 { - session - .query_unpaged("INSERT INTO t (p) VALUES (?)", (i,)) - .await - .unwrap(); - } - - let mut iter_query: Query = Query::new("SELECT * FROM t"); - iter_query.set_page_size(8); - let history_collector = Arc::new(HistoryCollector::new()); - iter_query.set_history_listener(history_collector.clone()); - - let mut rows_iterator = session - .query_iter(iter_query, ()) - .await - .unwrap() - .rows_stream::() - .unwrap(); - while let Some(_row) = rows_iterator.next().await { - // Receive rows... - } - - let history = history_collector.clone_structured_history(); - - assert!(history.queries.len() >= 4); - - let displayed_prefix = "Queries History: -=== Query #0 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Success at 2022-02-22 20:22:22 UTC -| -| Query successful at 2022-02-22 20:22:22 UTC -================= -=== Query #1 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Success at 2022-02-22 20:22:22 UTC -| -| Query successful at 2022-02-22 20:22:22 UTC -================= -=== Query #2 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Success at 2022-02-22 20:22:22 UTC -| -| Query successful at 2022-02-22 20:22:22 UTC -================= -=== Query #3 === -| start_time: 2022-02-22 20:22:22 UTC -| Non-speculative attempts: -| - Attempt #0 sent to 127.0.0.1:19042 -| request send time: 2022-02-22 20:22:22 UTC -| Success at 2022-02-22 20:22:22 UTC -| -| Query successful at 2022-02-22 20:22:22 UTC -================= -"; - let displayed_str = format!( - "{}", - set_one_db_error_message(set_one_node(set_one_time(history))) - ); - - assert!(displayed_str.starts_with(displayed_prefix),); - } } diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index 0bd45d7a14..8dc56420a9 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -118,6 +118,9 @@ pub mod frame { pub(crate) use scylla_cql::frame::response::*; pub mod result { + #[cfg(cpp_rust_unstable)] + pub use scylla_cql::frame::response::result::DeserializedMetadataAndRawRows; + pub(crate) use scylla_cql::frame::response::result::*; pub use scylla_cql::frame::response::result::{ ColumnSpec, ColumnType, CqlValue, PartitionKeyIndex, Row, TableSpec, @@ -141,6 +144,7 @@ pub mod serialize { }; // Legacy migration types - to be removed when removing legacy framework + #[allow(deprecated)] pub use scylla_cql::types::serialize::batch::{ LegacyBatchValuesAdapter, LegacyBatchValuesIteratorAdapter, }; @@ -158,6 +162,7 @@ pub mod serialize { }; // Legacy migration types - to be removed when removing legacy framework + #[allow(deprecated)] pub use scylla_cql::types::serialize::row::{ // Legacy migration types - to be removed when removing legacy framework serialize_legacy_row, @@ -185,6 +190,7 @@ pub mod serialize { }; // Legacy migration types - to be removed when removing legacy framework + #[allow(deprecated)] pub use scylla_cql::types::serialize::value::{ serialize_legacy_value, ValueAdapter, ValueToSerializeValueAdapterError, }; @@ -257,10 +263,8 @@ pub mod transport; pub(crate) mod utils; -/// This module is NOT part of the public API (it is `pub` only for internal use of integration tests). -/// Future minor releases are free to introduce breaking API changes inside it. -#[doc(hidden)] -pub use utils::test_utils; +#[cfg(test)] +pub(crate) use utils::test_utils; pub use statement::batch; pub use statement::prepared_statement; diff --git a/scylla/src/macros.rs b/scylla/src/macros.rs index 6549507bef..3e75fa1ab1 100644 --- a/scylla/src/macros.rs +++ b/scylla/src/macros.rs @@ -30,6 +30,11 @@ pub use scylla_cql::macros::FromUserType; /// /// --- /// +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] +#[allow(deprecated)] pub use scylla_cql::macros::IntoUserType; /// Derive macro for the [`SerializeValue`](crate::serialize::value::SerializeValue) trait @@ -360,7 +365,7 @@ pub use scylla_cql::macros::SerializeRow; /// If the value of the field received from DB is null, the field will be /// initialized with `Default::default()`. /// -/// `#[scylla(rename = "field_name")` +/// `#[scylla(rename = "field_name")]` /// /// By default, the generated implementation will try to match the Rust field /// to a UDT field with the same name. This attribute instead allows to match @@ -475,7 +480,7 @@ pub use scylla_macros::DeserializeValue; /// The field will be completely ignored during deserialization and will /// be initialized with `Default::default()`. /// -/// `#[scylla(rename = "field_name")` +/// `#[scylla(rename = "field_name")]` /// /// By default, the generated implementation will try to match the Rust field /// to a column with the same name. This attribute allows to match to a column @@ -486,6 +491,10 @@ pub use scylla_macros::DeserializeRow; /// /// --- /// +#[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" +)] pub use scylla_cql::macros::ValueList; #[deprecated( @@ -495,7 +504,9 @@ pub use scylla_cql::macros::ValueList; #[allow(deprecated)] pub use scylla_cql::macros::impl_from_cql_value_from_method; +#[allow(deprecated)] pub use scylla_cql::macros::impl_serialize_row_via_value_list; +#[allow(deprecated)] pub use scylla_cql::macros::impl_serialize_value_via_value; // Reexports for derive(IntoUserType) diff --git a/scylla/src/statement/batch.rs b/scylla/src/statement/batch.rs index cda256a033..62bbaab140 100644 --- a/scylla/src/statement/batch.rs +++ b/scylla/src/statement/batch.rs @@ -283,9 +283,10 @@ pub(crate) mod batch_values { where BV: BatchValues, { - type BatchValuesIter<'r> = BatchValuesFirstSerializedIterator<'r, BV::BatchValuesIter<'r>> - where - Self: 'r; + type BatchValuesIter<'r> + = BatchValuesFirstSerializedIterator<'r, BV::BatchValuesIter<'r>> + where + Self: 'r; fn batch_values_iter(&self) -> Self::BatchValuesIter<'_> { BatchValuesFirstSerializedIterator { diff --git a/scylla/src/statement/prepared_statement.rs b/scylla/src/statement/prepared_statement.rs index 8ecb86a4f9..385fcdf074 100644 --- a/scylla/src/statement/prepared_statement.rs +++ b/scylla/src/statement/prepared_statement.rs @@ -224,7 +224,7 @@ impl PreparedStatement { pub(crate) fn extract_partition_key<'ps>( &'ps self, bound_values: &'ps SerializedValues, - ) -> Result { + ) -> Result, PartitionKeyExtractionError> { PartitionKey::new(self.get_prepared_metadata(), bound_values) } @@ -519,7 +519,7 @@ pub(crate) struct PartitionKey<'ps> { pk_values: SmallVec<[Option>; PartitionKey::SMALLVEC_ON_STACK_SIZE]>, } -impl<'ps, 'spec: 'ps> PartitionKey<'ps> { +impl<'ps> PartitionKey<'ps> { const SMALLVEC_ON_STACK_SIZE: usize = 8; fn new( diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 79d2c25388..77668b28cc 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -329,7 +329,9 @@ where mod tests { use crate::query::Query; use crate::statement::PagingState; - use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; + use crate::test_utils::{ + create_new_session_builder, scylla_supports_tablets, setup_tracing, PerformDDL, + }; use crate::transport::partitioner::PartitionerName; use crate::transport::session::Session; use crate::utils::test_utils::unique_keyspace_name; @@ -358,18 +360,15 @@ mod tests { } session - .query_unpaged(create_ks, &[]) + .ddl(create_ks) .await .expect("Could not create keyspace"); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.test_table (a int primary key, b int)", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.test_table (a int primary key, b int)", + ks + )) .await .expect("Could not create table"); @@ -566,10 +565,7 @@ mod tests { let session: CachingSession = create_caching_session().await; session - .execute_unpaged( - "CREATE TABLE IF NOT EXISTS test_batch_table (a int, b int, primary key (a, b))", - (), - ) + .ddl("CREATE TABLE IF NOT EXISTS test_batch_table (a int, b int, primary key (a, b))") .await .unwrap(); @@ -689,7 +685,7 @@ mod tests { let session: CachingSession = CachingSession::from(new_for_test(true).await, 100); session - .execute_unpaged("CREATE TABLE tbl (a int PRIMARY KEY, b int)", ()) + .ddl("CREATE TABLE tbl (a int PRIMARY KEY, b int)") .await .unwrap(); @@ -745,10 +741,7 @@ mod tests { let session: CachingSession = CachingSession::from(new_for_test(false).await, 100); session - .execute_unpaged( - "CREATE TABLE tbl (a int PRIMARY KEY) with cdc = {'enabled': true}", - &(), - ) + .ddl("CREATE TABLE tbl (a int PRIMARY KEY) with cdc = {'enabled': true}") .await .unwrap(); diff --git a/scylla/src/transport/cluster.rs b/scylla/src/transport/cluster.rs index 9e7aae4a36..309fe58157 100644 --- a/scylla/src/transport/cluster.rs +++ b/scylla/src/transport/cluster.rs @@ -62,7 +62,7 @@ pub(crate) struct Cluster { /// Enables printing [Cluster] struct in a neat way, by skipping the rather useless /// print of channels state and printing [ClusterData] neatly. pub(crate) struct ClusterNeatDebug<'a>(pub(crate) &'a Cluster); -impl<'a> std::fmt::Debug for ClusterNeatDebug<'a> { +impl std::fmt::Debug for ClusterNeatDebug<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let cluster = self.0; f.debug_struct("Cluster") @@ -81,7 +81,7 @@ pub struct ClusterData { /// Enables printing [ClusterData] struct in a neat way, skipping the clutter involved by /// [ClusterData::ring] being large and [Self::keyspaces] debug print being very verbose by default. pub(crate) struct ClusterDataNeatDebug<'a>(pub(crate) &'a Arc); -impl<'a> std::fmt::Debug for ClusterDataNeatDebug<'a> { +impl std::fmt::Debug for ClusterDataNeatDebug<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let cluster_data = &self.0; diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 26e41ae723..3a550ae6ae 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -193,7 +193,7 @@ impl<'a> OrphanhoodNotifier<'a> { } } -impl<'a> Drop for OrphanhoodNotifier<'a> { +impl Drop for OrphanhoodNotifier<'_> { fn drop(&mut self) { if self.enabled { let _ = self.notification_sender.send(self.request_id); @@ -2397,7 +2397,7 @@ mod tests { use crate::transport::connection::open_connection; use crate::transport::node::ResolvedContactPoint; use crate::transport::topology::UntranslatedEndpoint; - use crate::utils::test_utils::unique_keyspace_name; + use crate::utils::test_utils::{unique_keyspace_name, PerformDDL}; use crate::SessionBuilder; use futures::{StreamExt, TryStreamExt}; use std::collections::HashMap; @@ -2452,17 +2452,14 @@ mod tests { .build() .await .unwrap(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone())).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query_unpaged("DROP TABLE IF EXISTS connection_query_iter_tab", &[]) + .ddl("DROP TABLE IF EXISTS connection_query_iter_tab") .await .unwrap(); session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS connection_query_iter_tab (p int primary key)", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS connection_query_iter_tab (p int primary key)") .await .unwrap(); } @@ -2548,13 +2545,10 @@ mod tests { .build() .await .unwrap(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone())).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS t (p int primary key, v blob)", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS t (p int primary key, v blob)") .await .unwrap(); } @@ -2580,7 +2574,7 @@ mod tests { .await .unwrap(); - connection.query_unpaged("TRUNCATE t").await.unwrap(); + connection.ddl("TRUNCATE t").await.unwrap(); let mut futs = Vec::new(); diff --git a/scylla/src/transport/connection_pool.rs b/scylla/src/transport/connection_pool.rs index 54d70c0eb8..4b3de60c53 100644 --- a/scylla/src/transport/connection_pool.rs +++ b/scylla/src/transport/connection_pool.rs @@ -576,7 +576,7 @@ impl PoolRefiller { req = use_keyspace_request_receiver.recv() => { if let Some(req) = req { debug!("[{}] Requested keyspace change: {}", self.endpoint_description(), req.keyspace_name.as_str()); - self.use_keyspace(&req.keyspace_name, req.response_sender); + self.use_keyspace(req.keyspace_name, req.response_sender); } else { // The keyspace request channel is dropped. // This means that the corresponding pool is dropped. @@ -1077,13 +1077,12 @@ impl PoolRefiller { // have their keyspace set. fn use_keyspace( &mut self, - keyspace_name: &VerifiedKeyspaceName, + keyspace_name: VerifiedKeyspaceName, response_sender: tokio::sync::oneshot::Sender>, ) { self.current_keyspace = Some(keyspace_name.clone()); let mut conns = self.conns.clone(); - let keyspace_name = keyspace_name.clone(); let address = self.endpoint.read().unwrap().address(); let connect_timeout = self.pool_config.connection_config.connect_timeout; diff --git a/scylla/src/transport/errors.rs b/scylla/src/transport/errors.rs index 778f33f295..fdd5b9aa05 100644 --- a/scylla/src/transport/errors.rs +++ b/scylla/src/transport/errors.rs @@ -12,6 +12,7 @@ use std::{ sync::Arc, }; +#[allow(deprecated)] use scylla_cql::{ frame::{ frame_errors::{ @@ -34,15 +35,15 @@ use thiserror::Error; use crate::{authentication::AuthError, frame::response}; -use super::{ - iterator::NextRowError, - legacy_query_result::IntoLegacyQueryResultError, - query_result::{IntoRowsResultError, SingleRowError}, -}; +use super::iterator::NextRowError; +#[allow(deprecated)] +use super::legacy_query_result::IntoLegacyQueryResultError; +use super::query_result::{IntoRowsResultError, SingleRowError}; /// Error that occurred during query execution #[derive(Error, Debug, Clone)] #[non_exhaustive] +#[allow(deprecated)] pub enum QueryError { /// Database sent a response containing some error with a message #[error("Database returned an error: {0}, Error message: {1}")] @@ -115,10 +116,16 @@ pub enum QueryError { /// Failed to convert [`QueryResult`][crate::transport::query_result::QueryResult] /// into [`LegacyQueryResult`][crate::transport::legacy_query_result::LegacyQueryResult]. + #[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" + )] + #[allow(deprecated)] #[error("Failed to convert `QueryResult` into `LegacyQueryResult`: {0}")] IntoLegacyQueryResultError(#[from] IntoLegacyQueryResultError), } +#[allow(deprecated)] impl From for QueryError { fn from(serialized_err: SerializeValuesError) -> QueryError { QueryError::BadQuery(BadQuery::SerializeValuesError(serialized_err)) @@ -181,6 +188,7 @@ impl From for NewSessionError { QueryError::BrokenConnection(e) => NewSessionError::BrokenConnection(e), QueryError::UnableToAllocStreamId => NewSessionError::UnableToAllocStreamId, QueryError::RequestTimeout(msg) => NewSessionError::RequestTimeout(msg), + #[allow(deprecated)] QueryError::IntoLegacyQueryResultError(e) => { NewSessionError::IntoLegacyQueryResultError(e) } @@ -204,6 +212,7 @@ impl From for QueryError { /// Error that occurred during session creation #[derive(Error, Debug, Clone)] #[non_exhaustive] +#[allow(deprecated)] pub enum NewSessionError { /// Failed to resolve hostname passed in Session creation #[error("Couldn't resolve any hostname: {0:?}")] @@ -286,6 +295,11 @@ pub enum NewSessionError { /// Failed to convert [`QueryResult`][crate::transport::query_result::QueryResult] /// into [`LegacyQueryResult`][crate::transport::legacy_query_result::LegacyQueryResult]. + #[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" + )] + #[allow(deprecated)] #[error("Failed to convert `QueryResult` into `LegacyQueryResult`: {0}")] IntoLegacyQueryResultError(#[from] IntoLegacyQueryResultError), } @@ -561,7 +575,12 @@ pub enum ViewsMetadataError { #[non_exhaustive] pub enum BadQuery { /// Failed to serialize values passed to a query - values too big + #[deprecated( + since = "0.15.1", + note = "Legacy serialization API is not type-safe and is going to be removed soon" + )] #[error("Serializing values failed: {0} ")] + #[allow(deprecated)] SerializeValuesError(#[from] SerializeValuesError), #[error("Serializing values failed: {0} ")] @@ -695,9 +714,10 @@ pub enum TranslationError { /// It indicates that request needed to initiate a connection failed. #[derive(Error, Debug, Clone)] #[error("Failed to perform a connection setup request. Request: {request_kind}, reason: {error}")] +#[non_exhaustive] pub struct ConnectionSetupRequestError { - request_kind: CqlRequestKind, - error: ConnectionSetupRequestErrorKind, + pub request_kind: CqlRequestKind, + pub error: ConnectionSetupRequestErrorKind, } #[derive(Error, Debug, Clone)] diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 8d7c07be35..f11d9dd73e 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -1154,6 +1154,10 @@ mod legacy { } /// Couldn't get next typed row from the iterator + #[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" + )] #[derive(Error, Debug, Clone)] pub enum LegacyNextRowError { /// Query to fetch next page has failed diff --git a/scylla/src/transport/legacy_query_result.rs b/scylla/src/transport/legacy_query_result.rs index 6b257ffef7..91fe47a58b 100644 --- a/scylla/src/transport/legacy_query_result.rs +++ b/scylla/src/transport/legacy_query_result.rs @@ -11,6 +11,11 @@ use uuid::Uuid; /// Trait used to implement `Vec::into_typed` // This is the only way to add custom method to Vec +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] +#[allow(deprecated)] pub trait IntoTypedRows { fn into_typed(self) -> TypedRowIter; } @@ -28,6 +33,10 @@ impl IntoTypedRows for Vec { /// Iterator over rows parsed as the given type\ /// Returned by `rows.into_typed::<(...)>()` +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] pub struct TypedRowIter { row_iter: std::vec::IntoIter, phantom_data: std::marker::PhantomData, @@ -175,7 +184,7 @@ impl LegacyQueryResult { /// Returns a column specification for a column with given name, or None if not found #[inline] - pub fn get_column_spec<'a>(&'a self, name: &str) -> Option<(usize, &'a ColumnSpec<'_>)> { + pub fn get_column_spec<'a>(&'a self, name: &str) -> Option<(usize, &'a ColumnSpec<'a>)> { self.col_specs() .iter() .enumerate() @@ -185,6 +194,10 @@ impl LegacyQueryResult { /// An error that occurred during [`QueryResult`](crate::transport::query_result::QueryResult) /// to [`LegacyQueryResult`] conversion. +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] #[non_exhaustive] #[derive(Error, Clone, Debug)] pub enum IntoLegacyQueryResultError { @@ -205,6 +218,11 @@ pub enum IntoLegacyQueryResultError { /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ /// It is `None` for queries that can't return rows (e.g `INSERT`). +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] +#[allow(deprecated)] #[derive(Debug, Clone, Error, PartialEq, Eq)] #[error( "LegacyQueryResult::rows() or similar function called on a bad LegacyQueryResult. @@ -259,6 +277,11 @@ pub enum FirstRowTypedError { FromRowError(#[from] FromRowError), } +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] +#[allow(deprecated)] #[derive(Debug, Clone, Error, PartialEq, Eq)] pub enum MaybeFirstRowTypedError { /// [`LegacyQueryResult::maybe_first_row_typed()`](LegacyQueryResult::maybe_first_row_typed) called on a bad LegacyQueryResult.\ @@ -273,6 +296,11 @@ pub enum MaybeFirstRowTypedError { FromRowError(#[from] FromRowError), } +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] +#[allow(deprecated)] #[derive(Debug, Clone, Error, PartialEq, Eq)] pub enum SingleRowError { /// [`LegacyQueryResult::single_row()`](LegacyQueryResult::single_row) called on a bad LegacyQueryResult.\ @@ -287,6 +315,11 @@ pub enum SingleRowError { BadNumberOfRows(usize), } +#[deprecated( + since = "0.15.1", + note = "Legacy deserialization API is inefficient and is going to be removed soon" +)] +#[allow(deprecated)] #[derive(Debug, Clone, Error, PartialEq, Eq)] pub enum SingleRowTypedError { /// [`LegacyQueryResult::single_row_typed()`](LegacyQueryResult::single_row_typed) called on a bad LegacyQueryResult.\ diff --git a/scylla/src/transport/load_balancing/default.rs b/scylla/src/transport/load_balancing/default.rs index b445dea5fb..16fd6ac9aa 100644 --- a/scylla/src/transport/load_balancing/default.rs +++ b/scylla/src/transport/load_balancing/default.rs @@ -707,7 +707,7 @@ impl DefaultPolicy { cluster: &'a ClusterData, statement_type: StatementType, table_spec: &TableSpec, - ) -> Option { + ) -> Option> { match statement_type { StatementType::Lwt => { self.pick_first_replica(ts, replica_location, predicate, cluster, table_spec) @@ -744,7 +744,7 @@ impl DefaultPolicy { predicate: impl Fn(NodeRef<'a>, Shard) -> bool + 'a, cluster: &'a ClusterData, table_spec: &TableSpec, - ) -> Option { + ) -> Option> { match replica_location { NodeLocationCriteria::Any => { // ReplicaSet returned by ReplicaLocator for this case: @@ -819,11 +819,11 @@ impl DefaultPolicy { &'a self, ts: &TokenWithStrategy<'a>, replica_location: NodeLocationCriteria<'a>, - predicate: impl Fn(NodeRef<'_>, Shard) -> bool + 'a, + predicate: impl Fn(NodeRef<'a>, Shard) -> bool + 'a, cluster: &'a ClusterData, statement_type: StatementType, table_spec: &TableSpec, - ) -> impl Iterator, Shard)> { + ) -> impl Iterator, Shard)> { let order = match statement_type { StatementType::Lwt => ReplicaOrder::Deterministic, StatementType::NonLwt => ReplicaOrder::Arbitrary, @@ -862,7 +862,7 @@ impl DefaultPolicy { &'a self, nodes: &'a [Arc], predicate: impl Fn(NodeRef<'a>) -> bool, - ) -> Option> { + ) -> Option> { // Select the first node that matches the predicate Self::randomly_rotated_nodes(nodes).find(|&node| predicate(node)) } @@ -873,7 +873,7 @@ impl DefaultPolicy { &'a self, nodes: &'a [Arc], predicate: impl Fn(NodeRef<'a>) -> bool, - ) -> impl Iterator> { + ) -> impl Iterator> { Self::randomly_rotated_nodes(nodes).filter(move |node| predicate(node)) } @@ -2853,6 +2853,7 @@ mod latency_awareness { | QueryError::DbError(DbError::RateLimitReached { .. }, _) => false, // "slow" errors, i.e. ones that are returned after considerable time of query being run + #[allow(deprecated)] QueryError::DbError(_, _) | QueryError::CqlResultParseError(_) | QueryError::CqlErrorParseError(_) @@ -3128,7 +3129,7 @@ mod latency_awareness { use crate::{ load_balancing::default::NodeLocationPreference, routing::Shard, - test_utils::{create_new_session_builder, setup_tracing}, + test_utils::setup_tracing, transport::locator::test::{TABLE_INVALID, TABLE_NTS_RF_2, TABLE_NTS_RF_3}, }; use crate::{ @@ -3141,7 +3142,6 @@ mod latency_awareness { locator::test::{id_to_invalid_addr, A, B, C, D, E, F, G}, ClusterData, NodeAddr, }, - ExecutionProfile, }; use tokio::time::Instant; @@ -3847,28 +3847,6 @@ mod latency_awareness { } } - // This is a regression test for #696. - #[tokio::test] - #[ntest::timeout(1000)] - async fn latency_aware_query_completes() { - setup_tracing(); - let policy = DefaultPolicy::builder() - .latency_awareness(LatencyAwarenessBuilder::default()) - .build(); - let handle = ExecutionProfile::builder() - .load_balancing_policy(policy) - .build() - .into_handle(); - - let session = create_new_session_builder() - .default_execution_profile_handle(handle) - .build() - .await - .unwrap(); - - session.query_unpaged("whatever", ()).await.unwrap_err(); - } - #[tokio::test(start_paused = true)] async fn timestamped_average_works_when_clock_stops() { setup_tracing(); diff --git a/scylla/src/transport/load_balancing/plan.rs b/scylla/src/transport/load_balancing/plan.rs index d8d2862c7d..7ae247ab13 100644 --- a/scylla/src/transport/load_balancing/plan.rs +++ b/scylla/src/transport/load_balancing/plan.rs @@ -65,7 +65,6 @@ enum PlanState<'a> { /// } /// } /// ``` - pub struct Plan<'a> { policy: &'a dyn LoadBalancingPolicy, routing_info: &'a RoutingInfo<'a>, diff --git a/scylla/src/transport/locator/mod.rs b/scylla/src/transport/locator/mod.rs index 2ae46856d1..6770b7b5b8 100644 --- a/scylla/src/transport/locator/mod.rs +++ b/scylla/src/transport/locator/mod.rs @@ -99,14 +99,14 @@ impl ReplicaLocator { } else { tablets.replicas_for_token(token) }; - return ReplicaSet { + ReplicaSet { inner: ReplicaSetInner::PlainSharded(replicas.unwrap_or( // The table is a tablet table, but we don't have information for given token. // Let's just return empty set in this case. &[], )), token, - }; + } } else { match strategy { Strategy::SimpleStrategy { replication_factor } => { diff --git a/scylla/src/transport/locator/replicas.rs b/scylla/src/transport/locator/replicas.rs index 30df602695..131402d113 100644 --- a/scylla/src/transport/locator/replicas.rs +++ b/scylla/src/transport/locator/replicas.rs @@ -48,7 +48,7 @@ impl<'a> From<&'a [Arc]> for ReplicasArray<'a> { } } -impl<'a> Index for ReplicasArray<'a> { +impl Index for ReplicasArray<'_> { type Output = Arc; fn index(&self, index: usize) -> &Self::Output { diff --git a/scylla/src/transport/locator/tablets.rs b/scylla/src/transport/locator/tablets.rs index 750818d658..472351ac9c 100644 --- a/scylla/src/transport/locator/tablets.rs +++ b/scylla/src/transport/locator/tablets.rs @@ -490,6 +490,9 @@ impl TabletsInfo { table_spec: &'a TableSpec<'a>, } + // Disable the lint, if there is more than one lifetime included. + // Can be removed once https://github.com/rust-lang/rust-clippy/issues/12495 is fixed. + #[allow(clippy::needless_lifetimes)] impl<'key, 'query> hashbrown::Equivalent> for TableSpecQueryKey<'query> { fn equivalent(&self, key: &TableSpec<'key>) -> bool { self.table_spec == key @@ -574,7 +577,6 @@ impl TabletsInfo { /// * Removing the keyspace and recreating it immediately without tablets. This seems so absurd /// that we most likely don't need to worry about it, but I'm putting it here as a potential problem /// for completeness. - pub(crate) fn perform_maintenance( &mut self, table_predicate: &impl Fn(&TableSpec) -> bool, diff --git a/scylla/src/transport/mod.rs b/scylla/src/transport/mod.rs index be4cfa37ba..55184aadc6 100644 --- a/scylla/src/transport/mod.rs +++ b/scylla/src/transport/mod.rs @@ -25,21 +25,8 @@ pub use connection::SelfIdentity; pub use execution_profile::ExecutionProfile; pub use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; -#[cfg(test)] -mod authenticate_test; -#[cfg(test)] -mod cql_collections_test; #[cfg(test)] mod session_test; -#[cfg(test)] -mod silent_prepare_batch_test; - -#[cfg(test)] -mod cql_types_test; -#[cfg(test)] -mod cql_value_test; -#[cfg(test)] -mod large_batch_statements_test; pub use cluster::ClusterData; pub use node::{KnownNode, Node, NodeAddr, NodeRef}; diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index 114a433f24..b76ec28e5a 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -433,6 +433,17 @@ impl QueryRowsResult { Err(RowsError::TypeCheckFailed(err)) => Err(SingleRowError::TypeCheckFailed(err)), } } + + #[cfg(cpp_rust_unstable)] + pub fn into_inner(self) -> (DeserializedMetadataAndRawRows, Option, Vec) { + let Self { + raw_rows_with_metadata, + tracing_id, + warnings, + } = self; + + (raw_rows_with_metadata, tracing_id, warnings) + } } /// An error returned by [`QueryResult::into_rows_result`] diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index b3efa7e076..90bdfead42 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -83,6 +83,7 @@ pub use crate::transport::connection_pool::PoolSize; // This re-export is to preserve backward compatibility. // Those items are no longer here not to clutter session.rs with legacy things. +#[allow(deprecated)] pub use crate::transport::legacy_query_result::{IntoTypedRows, TypedRowIter}; use crate::authentication::AuthenticatorProvider; @@ -2272,7 +2273,7 @@ struct HistoryData<'a> { speculative_id: Option, } -impl<'a> ExecuteQueryContext<'a> { +impl ExecuteQueryContext<'_> { fn log_attempt_start(&self, node_addr: SocketAddr) -> Option { self.history_data.as_ref().map(|hd| { hd.listener @@ -2423,7 +2424,7 @@ impl RequestSpan { pub(crate) fn record_replicas<'a>(&'a self, replicas: &'a [(impl Borrow>, Shard)]) { struct ReplicaIps<'a, N>(&'a [(N, Shard)]); - impl<'a, N> Display for ReplicaIps<'a, N> + impl Display for ReplicaIps<'_, N> where N: Borrow>, { diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 3e3552c230..cfe2356b99 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -5,7 +5,6 @@ use crate::query::Query; use crate::retry_policy::{QueryInfo, RetryDecision, RetryPolicy, RetrySession}; use crate::routing::Token; use crate::statement::Consistency; -use crate::test_utils::{scylla_supports_tablets, setup_tracing}; use crate::tracing::TracingInfo; use crate::transport::errors::{BadKeyspaceName, BadQuery, DbError, QueryError}; use crate::transport::partitioner::{ @@ -17,7 +16,8 @@ use crate::transport::topology::{ CollectionType, ColumnKind, CqlType, NativeType, UserDefinedType, }; use crate::utils::test_utils::{ - create_new_session_builder, supports_feature, unique_keyspace_name, + create_new_session_builder, scylla_supports_tablets, setup_tracing, supports_feature, + unique_keyspace_name, PerformDDL, }; use crate::ExecutionProfile; use crate::{self as scylla, QueryResult}; @@ -70,15 +70,12 @@ async fn test_unprepared_statement() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", + ks + )) .await .unwrap(); @@ -177,19 +174,16 @@ async fn test_prepared_statement() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.t2 (a int, b int, c text, primary key (a, b))", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t2 (a int, b int, c text, primary key (a, b))", + ks + )) .await .unwrap(); session - .query_unpaged(format!("CREATE TABLE IF NOT EXISTS {}.complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))", ks), &[]) + .ddl(format!("CREATE TABLE IF NOT EXISTS {}.complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))", ks)) .await .unwrap(); @@ -401,15 +395,12 @@ async fn test_counter_batch() { create_ks += " AND TABLETS = {'enabled': false}" } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.t_batch (key int PRIMARY KEY, value counter)", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t_batch (key int PRIMARY KEY, value counter)", + ks + )) .await .unwrap(); @@ -449,15 +440,12 @@ async fn test_batch() { let session = Arc::new(create_new_session_builder().build().await.unwrap()); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.t_batch (a int, b int, c text, primary key (a, b))", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t_batch (a int, b int, c text, primary key (a, b))", + ks + )) .await .unwrap(); @@ -469,7 +457,7 @@ async fn test_batch() { .await .unwrap(); - // TODO: Add API, that supports binding values to statements in batch creation process, + // TODO: Add API that supports binding values to statements in batch creation process, // to avoid problem of statements/values count mismatch use crate::batch::Batch; let mut batch: Batch = Default::default(); @@ -524,10 +512,10 @@ async fn test_batch() { // This statement flushes the prepared statement cache session - .query_unpaged( - format!("ALTER TABLE {}.t_batch WITH gc_grace_seconds = 42", ks), - &[], - ) + .ddl(format!( + "ALTER TABLE {}.t_batch WITH gc_grace_seconds = 42", + ks + )) .await .unwrap(); session.batch(&batch, values).await.unwrap(); @@ -549,21 +537,57 @@ async fn test_batch() { assert_eq!(results, vec![(4, 20, String::from("foobar"))]); } +// This is a regression test for #1134. #[tokio::test] -async fn test_token_calculation() { +async fn test_batch_to_multiple_tables() { setup_tracing(); let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); + session.use_keyspace(&ks, true).await.unwrap(); session - .query_unpaged( - format!("CREATE TABLE IF NOT EXISTS {}.t3 (a text primary key)", ks), - &[], + .ddl("CREATE TABLE IF NOT EXISTS t_batch1 (a int, b int, c text, primary key (a, b))") + .await + .unwrap(); + session + .ddl("CREATE TABLE IF NOT EXISTS t_batch2 (a int, b int, c text, primary key (a, b))") + .await + .unwrap(); + + let prepared_statement = session + .prepare( + " + BEGIN BATCH + INSERT INTO t_batch1 (a, b, c) VALUES (?, ?, ?); + INSERT INTO t_batch2 (a, b, c) VALUES (?, ?, ?); + APPLY BATCH; + ", ) .await .unwrap(); + session + .execute_unpaged(&prepared_statement, (1, 2, "ala", 4, 5, "ma")) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_token_calculation() { + setup_tracing(); + let session = create_new_session_builder().build().await.unwrap(); + let ks = unique_keyspace_name(); + + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); + session + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t3 (a text primary key)", + ks + )) + .await + .unwrap(); + // Refresh metadata as `ClusterData::compute_token` use them session.await_schema_agreement().await.unwrap(); session.refresh_metadata().await.unwrap(); @@ -626,12 +650,12 @@ async fn test_token_awareness() { create_ks += " AND TABLETS = {'enabled': false}" } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session - .query_unpaged( - format!("CREATE TABLE IF NOT EXISTS {}.t (a text primary key)", ks), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t (a text primary key)", + ks + )) .await .unwrap(); @@ -678,13 +702,13 @@ async fn test_use_keyspace() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", + ks + )) .await .unwrap(); @@ -774,22 +798,22 @@ async fn test_use_keyspace_case_sensitivity() { let ks_lower = unique_keyspace_name().to_lowercase(); let ks_upper = ks_lower.to_uppercase(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_lower), &[]).await.unwrap(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_upper), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_lower)).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS \"{}\" WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks_upper)).await.unwrap(); session - .query_unpaged( - format!("CREATE TABLE {}.tab (a text primary key)", ks_lower), - &[], - ) + .ddl(format!( + "CREATE TABLE {}.tab (a text primary key)", + ks_lower + )) .await .unwrap(); session - .query_unpaged( - format!("CREATE TABLE \"{}\".tab (a text primary key)", ks_upper), - &[], - ) + .ddl(format!( + "CREATE TABLE \"{}\".tab (a text primary key)", + ks_upper + )) .await .unwrap(); @@ -850,13 +874,13 @@ async fn test_raw_use_keyspace() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", + ks + )) .await .unwrap(); @@ -928,9 +952,9 @@ async fn test_db_errors() { )); // AlreadyExists when creating a keyspace for the second time - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); - let create_keyspace_res = session.query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await; + let create_keyspace_res = session.ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await; let keyspace_exists_error: DbError = match create_keyspace_res { Err(QueryError::DbError(e, _)) => e, _ => panic!("Second CREATE KEYSPACE didn't return an error!"), @@ -946,15 +970,15 @@ async fn test_db_errors() { // AlreadyExists when creating a table for the second time session - .query_unpaged( - format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", + ks + )) .await .unwrap(); let create_table_res = session - .query_unpaged(format!("CREATE TABLE {}.tab (a text primary key)", ks), &[]) + .ddl(format!("CREATE TABLE {}.tab (a text primary key)", ks)) .await; let create_tab_error: DbError = match create_table_res { Err(QueryError::DbError(e, _)) => e, @@ -976,13 +1000,13 @@ async fn test_tracing() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!("CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", ks), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.tab (a text primary key)", + ks + )) .await .unwrap(); @@ -1200,15 +1224,12 @@ async fn test_timestamp() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.t_timestamp (a text, b text, primary key (a))", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t_timestamp (a text, b text, primary key (a))", + ks + )) .await .unwrap(); @@ -1469,7 +1490,7 @@ async fn test_schema_types_in_metadata() { let ks = unique_keyspace_name(); session - .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) .await .unwrap(); @@ -1479,31 +1500,27 @@ async fn test_schema_types_in_metadata() { .unwrap(); session - .query_unpaged( + .ddl( "CREATE TYPE IF NOT EXISTS type_a ( a map>, text>, b frozen>, frozen>>> )", - &[], ) .await .unwrap(); session - .query_unpaged("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) + .ddl("CREATE TYPE IF NOT EXISTS type_b (a int, b text)") .await .unwrap(); session - .query_unpaged( - "CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)", - &[], - ) + .ddl("CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)") .await .unwrap(); session - .query_unpaged( + .ddl( "CREATE TABLE IF NOT EXISTS table_a ( a frozen PRIMARY KEY, b type_b, @@ -1511,18 +1528,16 @@ async fn test_schema_types_in_metadata() { d map>>, e tuple )", - &[], ) .await .unwrap(); session - .query_unpaged( + .ddl( "CREATE TABLE IF NOT EXISTS table_b ( a text PRIMARY KEY, b frozen> )", - &[], ) .await .unwrap(); @@ -1628,7 +1643,7 @@ async fn test_user_defined_types_in_metadata() { let ks = unique_keyspace_name(); session - .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) .await .unwrap(); @@ -1638,26 +1653,22 @@ async fn test_user_defined_types_in_metadata() { .unwrap(); session - .query_unpaged( + .ddl( "CREATE TYPE IF NOT EXISTS type_a ( a map>, text>, b frozen>, frozen>>> )", - &[], ) .await .unwrap(); session - .query_unpaged("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) + .ddl("CREATE TYPE IF NOT EXISTS type_b (a int, b text)") .await .unwrap(); session - .query_unpaged( - "CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)", - &[], - ) + .ddl("CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)") .await .unwrap(); @@ -1692,7 +1703,7 @@ async fn test_column_kinds_in_metadata() { let ks = unique_keyspace_name(); session - .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) .await .unwrap(); @@ -1702,7 +1713,7 @@ async fn test_column_kinds_in_metadata() { .unwrap(); session - .query_unpaged( + .ddl( "CREATE TABLE IF NOT EXISTS t ( a int, b int, @@ -1712,7 +1723,6 @@ async fn test_column_kinds_in_metadata() { f int, PRIMARY KEY ((c, e), b, a) )", - &[], ) .await .unwrap(); @@ -1738,7 +1748,7 @@ async fn test_primary_key_ordering_in_metadata() { let ks = unique_keyspace_name(); session - .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) .await .unwrap(); @@ -1748,7 +1758,7 @@ async fn test_primary_key_ordering_in_metadata() { .unwrap(); session - .query_unpaged( + .ddl( "CREATE TABLE IF NOT EXISTS t ( a int, b int, @@ -1761,7 +1771,6 @@ async fn test_primary_key_ordering_in_metadata() { i int STATIC, PRIMARY KEY ((c, e), b, a) )", - &[], ) .await .unwrap(); @@ -1794,7 +1803,7 @@ async fn test_table_partitioner_in_metadata() { create_ks += " AND TABLETS = {'enabled': false}"; } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session .query_unpaged(format!("USE {}", ks), &[]) @@ -1802,9 +1811,8 @@ async fn test_table_partitioner_in_metadata() { .unwrap(); session - .query_unpaged( + .ddl( "CREATE TABLE t (pk int, ck int, v int, PRIMARY KEY (pk, ck, v))WITH cdc = {'enabled':true}", - &[], ) .await .unwrap(); @@ -1835,7 +1843,7 @@ async fn test_turning_off_schema_fetching() { let ks = unique_keyspace_name(); session - .query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) .await .unwrap(); @@ -1845,31 +1853,27 @@ async fn test_turning_off_schema_fetching() { .unwrap(); session - .query_unpaged( + .ddl( "CREATE TYPE IF NOT EXISTS type_a ( a map>, text>, b frozen>, frozen>>> )", - &[], ) .await .unwrap(); session - .query_unpaged("CREATE TYPE IF NOT EXISTS type_b (a int, b text)", &[]) + .ddl("CREATE TYPE IF NOT EXISTS type_b (a int, b text)") .await .unwrap(); session - .query_unpaged( - "CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)", - &[], - ) + .ddl("CREATE TYPE IF NOT EXISTS type_c (a map>, frozen>)") .await .unwrap(); session - .query_unpaged( + .ddl( "CREATE TABLE IF NOT EXISTS table_a ( a frozen PRIMARY KEY, b type_b, @@ -1877,7 +1881,6 @@ async fn test_turning_off_schema_fetching() { d map>>, e tuple )", - &[], ) .await .unwrap(); @@ -1909,7 +1912,7 @@ async fn test_named_bind_markers() { let ks = unique_keyspace_name(); session - .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) .await .unwrap(); @@ -1919,10 +1922,7 @@ async fn test_named_bind_markers() { .unwrap(); session - .query_unpaged( - "CREATE TABLE t (pk int, ck int, v int, PRIMARY KEY (pk, ck, v))", - &[], - ) + .ddl("CREATE TABLE t (pk int, ck int, v int, PRIMARY KEY (pk, ck, v))") .await .unwrap(); @@ -1974,11 +1974,11 @@ async fn test_prepared_partitioner() { create_ks += " AND TABLETS = {'enabled': false}" } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE IF NOT EXISTS t1 (a int primary key)", &[]) + .ddl("CREATE TABLE IF NOT EXISTS t1 (a int primary key)") .await .unwrap(); @@ -2000,10 +2000,7 @@ async fn test_prepared_partitioner() { } session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS t2 (a int primary key) WITH cdc = {'enabled':true}", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS t2 (a int primary key) WITH cdc = {'enabled':true}") .await .unwrap(); @@ -2023,14 +2020,14 @@ async fn test_prepared_partitioner() { async fn rename(session: &Session, rename_str: &str) { session - .query_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), ()) + .ddl(format!("ALTER TABLE tab RENAME {}", rename_str)) .await .unwrap(); } async fn rename_caching(session: &CachingSession, rename_str: &str) { session - .execute_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), &()) + .ddl(format!("ALTER TABLE tab RENAME {}", rename_str)) .await .unwrap(); } @@ -2049,14 +2046,11 @@ async fn test_unprepared_reprepare_in_execute() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))") .await .unwrap(); @@ -2112,14 +2106,11 @@ async fn test_unusual_valuelists() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS tab (a int, b int, c varchar, primary key (a, b, c))", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS tab (a int, b int, c varchar, primary key (a, b, c))") .await .unwrap(); @@ -2182,14 +2173,11 @@ async fn test_unprepared_reprepare_in_batch() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))") .await .unwrap(); @@ -2249,16 +2237,13 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); let caching_session: CachingSession = CachingSession::from(session, 64); caching_session - .execute_unpaged( - "CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS tab (a int, b int, c int, primary key (a, b, c))") .await .unwrap(); @@ -2311,16 +2296,16 @@ async fn test_views_in_schema_info() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query_unpaged("CREATE TABLE t(id int PRIMARY KEY, v int)", &[]) + .ddl("CREATE TABLE t(id int PRIMARY KEY, v int)") .await .unwrap(); - session.query_unpaged("CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)", &[]).await.unwrap(); - session.query_unpaged("CREATE MATERIALIZED VIEW mv2 AS SELECT id, v FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)", &[]).await.unwrap(); + session.ddl("CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)").await.unwrap(); + session.ddl("CREATE MATERIALIZED VIEW mv2 AS SELECT id, v FROM t WHERE v IS NOT NULL PRIMARY KEY (v, id)").await.unwrap(); session.await_schema_agreement().await.unwrap(); session.refresh_metadata().await.unwrap(); @@ -2384,14 +2369,11 @@ async fn test_prepare_batch() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE test_batch_table (a int, b int, primary key (a, b))", - (), - ) + .ddl("CREATE TABLE test_batch_table (a int, b int, primary key (a, b))") .await .unwrap(); @@ -2481,14 +2463,11 @@ async fn test_refresh_metadata_after_schema_agreement() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query_unpaged( - "CREATE TYPE udt (field1 int, field2 uuid, field3 text)", - &[], - ) + .ddl("CREATE TYPE udt (field1 int, field2 uuid, field3 text)") .await .unwrap(); @@ -2527,9 +2506,9 @@ async fn test_rate_limit_exceeded_exception() { } let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); - session.query_unpaged("CREATE TABLE tbl (pk int PRIMARY KEY, v int) WITH per_partition_rate_limit = {'max_writes_per_second': 1}", ()).await.unwrap(); + session.ddl("CREATE TABLE tbl (pk int PRIMARY KEY, v int) WITH per_partition_rate_limit = {'max_writes_per_second': 1}").await.unwrap(); let stmt = session .prepare("INSERT INTO tbl (pk, v) VALUES (?, ?)") @@ -2571,14 +2550,11 @@ async fn test_batch_lwts() { if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE tab (p1 int, c1 int, r1 int, r2 int, primary key (p1, c1))", - (), - ) + .ddl("CREATE TABLE tab (p1 int, c1 int, r1 int, r2 int, primary key (p1, c1))") .await .unwrap(); @@ -2699,7 +2675,7 @@ async fn test_keyspaces_to_fetch() { let session_default = create_new_session_builder().build().await.unwrap(); for ks in [&ks1, &ks2] { session_default - .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) + .ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) .await .unwrap(); } @@ -2776,10 +2752,10 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query_unpaged(create_ks, ()).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session.use_keyspace(ks, true).await.unwrap(); session - .query_unpaged("CREATE TABLE t (pk int PRIMARY KEY, v int)", ()) + .ddl("CREATE TABLE t (pk int PRIMARY KEY, v int)") .await .unwrap(); @@ -2819,15 +2795,12 @@ async fn test_iter_methods_with_modification_statements() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", + ks + )) .await .unwrap(); @@ -2870,7 +2843,7 @@ async fn test_get_keyspace_name() { // No keyspace is set in config, so get_keyspace() should return None. let session = create_new_session_builder().build().await.unwrap(); assert_eq!(session.get_keyspace(), None); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); assert_eq!(session.get_keyspace(), None); // Call use_keyspace(), get_keyspace now should return the new keyspace name @@ -2896,25 +2869,19 @@ async fn simple_strategy_test() { let session = create_new_session_builder().build().await.unwrap(); session - .query_unpaged( - format!( - "CREATE KEYSPACE {} WITH REPLICATION = \ + .ddl(format!( + "CREATE KEYSPACE {} WITH REPLICATION = \ {{'class': 'SimpleStrategy', 'replication_factor': 1}}", - ks - ), - (), - ) + ks + )) .await .unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE {}.tab (p int, c int, r int, PRIMARY KEY (p, c, r))", - ks - ), - (), - ) + .ddl(format!( + "CREATE TABLE {}.tab (p int, c int, r int, PRIMARY KEY (p, c, r))", + ks + )) .await .unwrap(); @@ -2961,7 +2928,7 @@ async fn test_manual_primary_key_computation() { // Setup session let ks = unique_keyspace_name(); let session = create_new_session_builder().build().await.unwrap(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(&ks, true).await.unwrap(); async fn assert_tokens_equal( @@ -2996,10 +2963,7 @@ async fn test_manual_primary_key_computation() { // Single-column partition key { session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS t2 (a int, b int, c text, primary key (a, b))", - &[], - ) + .ddl("CREATE TABLE IF NOT EXISTS t2 (a int, b int, c text, primary key (a, b))") .await .unwrap(); @@ -3027,7 +2991,7 @@ async fn test_manual_primary_key_computation() { // Composite partition key { session - .query_unpaged("CREATE TABLE IF NOT EXISTS complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))", &[]) + .ddl("CREATE TABLE IF NOT EXISTS complex_pk (a int, b int, c text, d int, e int, primary key ((a,b,c),d))") .await .unwrap(); @@ -3070,7 +3034,7 @@ async fn test_deserialize_empty_collections() { // Setup session. let ks = unique_keyspace_name(); let session = create_new_session_builder().build().await.unwrap(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(&ks, true).await.unwrap(); async fn deserialize_empty_collection< @@ -3086,7 +3050,7 @@ async fn test_deserialize_empty_collections() { "CREATE TABLE {} (n int primary key, c {}<{}>)", table_name, collection_name, collection_type_params ); - session.query_unpaged(query, ()).await.unwrap(); + session.ddl(query).await.unwrap(); // Populate the table with an empty collection, effectively inserting null as the collection. session diff --git a/scylla/src/transport/speculative_execution.rs b/scylla/src/transport/speculative_execution.rs index 60344d0a02..756ed3d895 100644 --- a/scylla/src/transport/speculative_execution.rs +++ b/scylla/src/transport/speculative_execution.rs @@ -110,6 +110,7 @@ fn can_be_ignored(result: &Result) -> bool { QueryError::EmptyPlan => false, // Errors that should not appear here, thus should not be ignored + #[allow(deprecated)] QueryError::NextRowError(_) | QueryError::IntoLegacyQueryResultError(_) | QueryError::TimeoutError diff --git a/scylla/src/utils/mod.rs b/scylla/src/utils/mod.rs index c83adbf7e7..bf0f94e752 100644 --- a/scylla/src/utils/mod.rs +++ b/scylla/src/utils/mod.rs @@ -1,4 +1,6 @@ pub(crate) mod parse; pub(crate) mod pretty; -pub mod test_utils; + +#[cfg(test)] +pub(crate) mod test_utils; diff --git a/scylla/src/utils/pretty.rs b/scylla/src/utils/pretty.rs index bd3f06487a..63ed28b42b 100644 --- a/scylla/src/utils/pretty.rs +++ b/scylla/src/utils/pretty.rs @@ -9,7 +9,7 @@ use std::fmt::{Display, LowerHex, UpperHex}; pub(crate) struct HexBytes<'a>(pub(crate) &'a [u8]); -impl<'a> LowerHex for HexBytes<'a> { +impl LowerHex for HexBytes<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for b in self.0 { write!(f, "{:02x}", b)?; @@ -18,7 +18,7 @@ impl<'a> LowerHex for HexBytes<'a> { } } -impl<'a> UpperHex for HexBytes<'a> { +impl UpperHex for HexBytes<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for b in self.0 { write!(f, "{:02X}", b)?; @@ -154,7 +154,7 @@ where pub(crate) struct CqlStringLiteralDisplayer<'a>(&'a str); -impl<'a> Display for CqlStringLiteralDisplayer<'a> { +impl Display for CqlStringLiteralDisplayer<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // CQL string literals use single quotes. The only character that // needs escaping is singular quote, and escaping is done by repeating diff --git a/scylla/src/utils/test_utils.rs b/scylla/src/utils/test_utils.rs index d15284d61e..7e258c352c 100644 --- a/scylla/src/utils/test_utils.rs +++ b/scylla/src/utils/test_utils.rs @@ -1,9 +1,12 @@ -use scylla_cql::frame::response::result::Row; - -#[cfg(test)] +use crate::load_balancing::{FallbackPlan, LoadBalancingPolicy, RoutingInfo}; +use crate::query::Query; +use crate::routing::Shard; +use crate::transport::connection::Connection; +use crate::transport::errors::QueryError; use crate::transport::session_builder::{GenericSessionBuilder, SessionBuilderKind}; -use crate::Session; -#[cfg(test)] +use crate::transport::{ClusterData, NodeRef}; +use crate::{CachingSession, ExecutionProfile, Session}; +use std::sync::Arc; use std::{num::NonZeroU32, time::Duration}; use std::{ sync::atomic::{AtomicUsize, Ordering}, @@ -12,7 +15,7 @@ use std::{ static UNIQUE_COUNTER: AtomicUsize = AtomicUsize::new(0); -pub fn unique_keyspace_name() -> String { +pub(crate) fn unique_keyspace_name() -> String { let cnt = UNIQUE_COUNTER.fetch_add(1, Ordering::SeqCst); let name = format!( "test_rust_{}_{}", @@ -26,7 +29,6 @@ pub fn unique_keyspace_name() -> String { name } -#[cfg(test)] pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { // Cassandra doesn't have a concept of features, so first detect // if there is the `supported_features` column in system.local @@ -44,15 +46,15 @@ pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { return false; } - let (features,): (Option,) = session + let result = session .query_unpaged("SELECT supported_features FROM system.local", ()) .await .unwrap() .into_rows_result() - .unwrap() - .single_row() .unwrap(); + let (features,): (Option<&str>,) = result.single_row().unwrap(); + features .unwrap_or_default() .split(',') @@ -62,8 +64,7 @@ pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { // Creates a generic session builder based on conditional compilation configuration // For SessionBuilder of DefaultMode type, adds localhost to known hosts, as all of the tests // connect to localhost. -#[cfg(test)] -pub fn create_new_session_builder() -> GenericSessionBuilder { +pub(crate) fn create_new_session_builder() -> GenericSessionBuilder { let session_builder = { #[cfg(not(scylla_cloud_tests))] { @@ -96,26 +97,90 @@ pub fn create_new_session_builder() -> GenericSessionBuilder bool { - let result = session - .query_unpaged( - "select column_name from system_schema.columns where - keyspace_name = 'system_schema' - and table_name = 'scylla_keyspaces' - and column_name = 'initial_tablets'", - &[], - ) - .await - .unwrap() - .into_rows_result(); - - result.is_ok_and(|rows_result| rows_result.single_row::().is_ok()) +pub(crate) async fn scylla_supports_tablets(session: &Session) -> bool { + supports_feature(session, "TABLETS").await } -#[cfg(test)] pub(crate) fn setup_tracing() { let _ = tracing_subscriber::fmt::fmt() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .with_writer(tracing_subscriber::fmt::TestWriter::new()) .try_init(); } + +// This LBP produces a predictable query plan - it order the nodes +// by position in the ring. +// This is to make sure that all DDL queries land on the same node, +// to prevent errors from concurrent DDL queries executed on different nodes. +#[derive(Debug)] +struct SchemaQueriesLBP; + +impl LoadBalancingPolicy for SchemaQueriesLBP { + fn pick<'a>( + &'a self, + _query: &'a RoutingInfo, + cluster: &'a ClusterData, + ) -> Option<(NodeRef<'a>, Option)> { + // I'm not sure if Scylla can handle concurrent DDL queries to different shard, + // in other words if its local lock is per-node or per shard. + // Just to be safe, let's use explicit shard. + cluster.get_nodes_info().first().map(|node| (node, Some(0))) + } + + fn fallback<'a>( + &'a self, + _query: &'a RoutingInfo, + cluster: &'a ClusterData, + ) -> FallbackPlan<'a> { + Box::new(cluster.get_nodes_info().iter().map(|node| (node, Some(0)))) + } + + fn name(&self) -> String { + "SchemaQueriesLBP".to_owned() + } +} + +fn apply_ddl_lbp(query: &mut Query) { + let policy = query + .get_execution_profile_handle() + .map(|profile| profile.pointee_to_builder()) + .unwrap_or(ExecutionProfile::builder()) + .load_balancing_policy(Arc::new(SchemaQueriesLBP)) + .build(); + query.set_execution_profile_handle(Some(policy.into_handle())); +} + +// This is just to make it easier to call the above function: +// we'll be able to do session.ddl(...) instead of perform_ddl(&session, ...) +// or something like that. +#[async_trait::async_trait] +pub(crate) trait PerformDDL { + async fn ddl(&self, query: impl Into + Send) -> Result<(), QueryError>; +} + +#[async_trait::async_trait] +impl PerformDDL for Session { + async fn ddl(&self, query: impl Into + Send) -> Result<(), QueryError> { + let mut query = query.into(); + apply_ddl_lbp(&mut query); + self.query_unpaged(query, &[]).await.map(|_| ()) + } +} + +#[async_trait::async_trait] +impl PerformDDL for CachingSession { + async fn ddl(&self, query: impl Into + Send) -> Result<(), QueryError> { + let mut query = query.into(); + apply_ddl_lbp(&mut query); + self.execute_unpaged(query, &[]).await.map(|_| ()) + } +} + +#[async_trait::async_trait] +impl PerformDDL for Connection { + async fn ddl(&self, query: impl Into + Send) -> Result<(), QueryError> { + let mut query = query.into(); + apply_ddl_lbp(&mut query); + self.query_unpaged(query).await.map(|_| ()) + } +} diff --git a/scylla/src/transport/authenticate_test.rs b/scylla/tests/integration/authenticate.rs similarity index 70% rename from scylla/src/transport/authenticate_test.rs rename to scylla/tests/integration/authenticate.rs index 78e72dea40..c2a6569cca 100644 --- a/scylla/src/transport/authenticate_test.rs +++ b/scylla/tests/integration/authenticate.rs @@ -1,8 +1,7 @@ -use crate::authentication::{AuthError, AuthenticatorProvider, AuthenticatorSession}; -use crate::test_utils::setup_tracing; -use crate::utils::test_utils::unique_keyspace_name; +use crate::utils::{setup_tracing, unique_keyspace_name, PerformDDL}; use async_trait::async_trait; use bytes::{BufMut, BytesMut}; +use scylla::authentication::{AuthError, AuthenticatorProvider, AuthenticatorSession}; use std::sync::Arc; #[tokio::test] @@ -13,7 +12,7 @@ async fn authenticate_superuser() { println!("Connecting to {} with cassandra superuser ...", uri); - let session = crate::SessionBuilder::new() + let session = scylla::SessionBuilder::new() .known_node(uri) .user("cassandra", "cassandra") .build() @@ -21,12 +20,9 @@ async fn authenticate_superuser() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - session - .query_unpaged("DROP TABLE IF EXISTS t;", &[]) - .await - .unwrap(); + session.ddl("DROP TABLE IF EXISTS t;").await.unwrap(); println!("Ok."); } @@ -72,7 +68,7 @@ async fn custom_authentication() { println!("Connecting to {} with cassandra superuser ...", uri); - let session = crate::SessionBuilder::new() + let session = scylla::SessionBuilder::new() .known_node(uri) .authenticator_provider(Arc::new(CustomAuthenticatorProvider)) .build() @@ -80,12 +76,9 @@ async fn custom_authentication() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - session - .query_unpaged("DROP TABLE IF EXISTS t;", &[]) - .await - .unwrap(); + session.ddl("DROP TABLE IF EXISTS t;").await.unwrap(); println!("Ok."); } diff --git a/scylla/tests/integration/batch.rs b/scylla/tests/integration/batch.rs new file mode 100644 index 0000000000..d711cb5014 --- /dev/null +++ b/scylla/tests/integration/batch.rs @@ -0,0 +1,74 @@ +use scylla::batch::Batch; +use scylla::batch::BatchType; +use scylla::frame::frame_errors::BatchSerializationError; +use scylla::frame::frame_errors::CqlRequestSerializationError; +use scylla::query::Query; +use scylla::transport::errors::QueryError; + +use crate::utils::create_new_session_builder; +use crate::utils::setup_tracing; +use crate::utils::unique_keyspace_name; +use crate::utils::PerformDDL; + +use assert_matches::assert_matches; + +#[tokio::test] +#[ntest::timeout(60000)] +async fn batch_statements_and_values_mismatch_detected() { + setup_tracing(); + let session = create_new_session_builder().build().await.unwrap(); + let ks = unique_keyspace_name(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); + session.use_keyspace(ks, false).await.unwrap(); + session + .ddl("CREATE TABLE IF NOT EXISTS batch_serialization_test (p int PRIMARY KEY, val int)") + .await + .unwrap(); + + let mut batch = Batch::new(BatchType::Logged); + let stmt = session + .prepare("INSERT INTO batch_serialization_test (p, val) VALUES (?, ?)") + .await + .unwrap(); + batch.append_statement(stmt.clone()); + batch.append_statement(Query::new( + "INSERT INTO batch_serialization_test (p, val) VALUES (3, 4)", + )); + batch.append_statement(stmt); + + // Subtest 1: counts are correct + { + session.batch(&batch, &((1, 2), (), (5, 6))).await.unwrap(); + } + + // Subtest 2: not enough values + { + let err = session.batch(&batch, &((1, 2), ())).await.unwrap_err(); + assert_matches!( + err, + QueryError::CqlRequestSerialization(CqlRequestSerializationError::BatchSerialization( + BatchSerializationError::ValuesAndStatementsLengthMismatch { + n_value_lists: 2, + n_statements: 3 + } + )) + ) + } + + // Subtest 3: too many values + { + let err = session + .batch(&batch, &((1, 2), (), (5, 6), (7, 8))) + .await + .unwrap_err(); + assert_matches!( + err, + QueryError::CqlRequestSerialization(CqlRequestSerializationError::BatchSerialization( + BatchSerializationError::ValuesAndStatementsLengthMismatch { + n_value_lists: 4, + n_statements: 3 + } + )) + ) + } +} diff --git a/scylla/tests/integration/consistency.rs b/scylla/tests/integration/consistency.rs index 09780066ac..a503fb7d3b 100644 --- a/scylla/tests/integration/consistency.rs +++ b/scylla/tests/integration/consistency.rs @@ -1,11 +1,9 @@ -use crate::utils::{setup_tracing, test_with_3_node_cluster}; - +use crate::utils::{setup_tracing, test_with_3_node_cluster, unique_keyspace_name, PerformDDL}; use scylla::execution_profile::{ExecutionProfileBuilder, ExecutionProfileHandle}; use scylla::load_balancing::{DefaultPolicy, LoadBalancingPolicy, RoutingInfo}; use scylla::prepared_statement::PreparedStatement; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::routing::{Shard, Token}; -use scylla::test_utils::unique_keyspace_name; use scylla::transport::NodeRef; use scylla::Session; use scylla_cql::frame::response::result::TableSpec; @@ -60,10 +58,10 @@ const CREATE_TABLE_STR: &str = "CREATE TABLE consistency_tests (a int, b int, PR const QUERY_STR: &str = "INSERT INTO consistency_tests (a, b) VALUES (?, 1)"; async fn create_schema(session: &Session, ks: &str) { - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - session.query_unpaged(CREATE_TABLE_STR, &[]).await.unwrap(); + session.ddl(CREATE_TABLE_STR).await.unwrap(); } // The following functions perform a request with consistencies set directly on a statement. diff --git a/scylla/src/transport/cql_collections_test.rs b/scylla/tests/integration/cql_collections.rs similarity index 91% rename from scylla/src/transport/cql_collections_test.rs rename to scylla/tests/integration/cql_collections.rs index 9cdb34ce5a..a36d1bc16d 100644 --- a/scylla/src/transport/cql_collections_test.rs +++ b/scylla/tests/integration/cql_collections.rs @@ -1,16 +1,16 @@ -use crate::deserialize::DeserializeOwnedValue; -use crate::transport::session::Session; - -use crate::frame::response::result::CqlValue; -use crate::test_utils::{create_new_session_builder, setup_tracing}; -use crate::utils::test_utils::unique_keyspace_name; +use crate::utils::{ + create_new_session_builder, setup_tracing, unique_keyspace_name, DeserializeOwnedValue, + PerformDDL, +}; +use scylla::frame::response::result::CqlValue; +use scylla::Session; use scylla_cql::types::serialize::value::SerializeValue; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; async fn connect() -> Session { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session @@ -18,13 +18,10 @@ async fn connect() -> Session { async fn create_table(session: &Session, table_name: &str, value_type: &str) { session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {} (p int PRIMARY KEY, val {})", - table_name, value_type - ), - (), - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {} (p int PRIMARY KEY, val {})", + table_name, value_type + )) .await .unwrap(); } diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/tests/integration/cql_types.rs similarity index 94% rename from scylla/src/transport/cql_types_test.rs rename to scylla/tests/integration/cql_types.rs index 30f406a1db..1125914283 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/tests/integration/cql_types.rs @@ -1,19 +1,19 @@ -use crate as scylla; -use crate::deserialize::DeserializeOwnedValue; -use crate::frame::response::result::CqlValue; -use crate::frame::value::{Counter, CqlDate, CqlTime, CqlTimestamp}; -use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; -use crate::transport::session::Session; -use crate::utils::test_utils::unique_keyspace_name; use itertools::Itertools; -use scylla_cql::frame::value::{CqlTimeuuid, CqlVarint}; -use scylla_cql::types::serialize::value::SerializeValue; -use scylla_macros::{DeserializeValue, SerializeValue}; +use scylla::frame::response::result::CqlValue; +use scylla::frame::value::{Counter, CqlDate, CqlTime, CqlTimestamp, CqlTimeuuid, CqlVarint}; +use scylla::serialize::value::SerializeValue; +use scylla::Session; +use scylla::{DeserializeValue, SerializeValue}; use std::cmp::PartialEq; use std::fmt::Debug; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; +use crate::utils::{ + create_new_session_builder, scylla_supports_tablets, setup_tracing, unique_keyspace_name, + DeserializeOwnedValue, PerformDDL, +}; + // Used to prepare a table for test // Creates a new keyspace, without tablets if requested and the ScyllaDB instance supports them. // Drops and creates table {table_name} (id int PRIMARY KEY, val {type_name}) @@ -35,22 +35,19 @@ async fn init_test_maybe_without_tablets( create_ks += " AND TABLETS = {'enabled': false}" } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged(format!("DROP TABLE IF EXISTS {}", table_name), &[]) + .ddl(format!("DROP TABLE IF EXISTS {}", table_name)) .await .unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", - table_name, type_name - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", + table_name, type_name + )) .await .unwrap(); @@ -173,26 +170,20 @@ async fn test_cql_varint() { let ks = unique_keyspace_name(); session - .query_unpaged( - format!( - "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ + .ddl(format!( + "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", - ks - ), - &[], - ) + ks + )) .await .unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val varint)", - table_name - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val varint)", + table_name + )) .await .unwrap(); @@ -1285,23 +1276,17 @@ async fn test_timeuuid_ordering() { let ks = unique_keyspace_name(); session - .query_unpaged( - format!( - "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ + .ddl(format!( + "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", - ks - ), - &[], - ) + ks + )) .await .unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE tab (p int, t timeuuid, PRIMARY KEY (p, t))", - (), - ) + .ddl("CREATE TABLE tab (p int, t timeuuid, PRIMARY KEY (p, t))") .await .unwrap(); @@ -1527,52 +1512,42 @@ async fn test_udt_after_schema_update() { let ks = unique_keyspace_name(); session - .query_unpaged( - format!( - "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ + .ddl(format!( + "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", - ks - ), - &[], - ) + ks + )) .await .unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged(format!("DROP TABLE IF EXISTS {}", table_name), &[]) + .ddl(format!("DROP TABLE IF EXISTS {}", table_name)) .await .unwrap(); session - .query_unpaged(format!("DROP TYPE IF EXISTS {}", type_name), &[]) + .ddl(format!("DROP TYPE IF EXISTS {}", type_name)) .await .unwrap(); session - .query_unpaged( - format!( - "CREATE TYPE IF NOT EXISTS {} (first int, second boolean)", - type_name - ), - &[], - ) + .ddl(format!( + "CREATE TYPE IF NOT EXISTS {} (first int, second boolean)", + type_name + )) .await .unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", - table_name, type_name - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", + table_name, type_name + )) .await .unwrap(); #[derive(SerializeValue, DeserializeValue, Debug, PartialEq)] - #[scylla(crate = crate)] struct UdtV1 { first: i32, second: bool, @@ -1625,7 +1600,7 @@ async fn test_udt_after_schema_update() { assert_eq!(read_udt, v1); session - .query_unpaged(format!("ALTER TYPE {} ADD third text;", type_name), &[]) + .ddl(format!("ALTER TYPE {} ADD third text;", type_name)) .await .unwrap(); @@ -1709,47 +1684,38 @@ async fn test_udt_with_missing_field() { let ks = unique_keyspace_name(); session - .query_unpaged( - format!( - "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ + .ddl(format!( + "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", - ks - ), - &[], - ) + ks + )) .await .unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged(format!("DROP TABLE IF EXISTS {}", table_name), &[]) + .ddl(format!("DROP TABLE IF EXISTS {}", table_name)) .await .unwrap(); session - .query_unpaged(format!("DROP TYPE IF EXISTS {}", type_name), &[]) + .ddl(format!("DROP TYPE IF EXISTS {}", type_name)) .await .unwrap(); session - .query_unpaged( - format!( - "CREATE TYPE IF NOT EXISTS {} (first int, second boolean, third float, fourth blob)", - type_name - ), - &[], - ) + .ddl(format!( + "CREATE TYPE IF NOT EXISTS {} (first int, second boolean, third float, fourth blob)", + type_name + )) .await .unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", - table_name, type_name - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {} (id int PRIMARY KEY, val {})", + table_name, type_name + )) .await .unwrap(); @@ -1796,7 +1762,6 @@ async fn test_udt_with_missing_field() { } #[derive(SerializeValue)] - #[scylla(crate = crate)] struct UdtV1 { first: i32, second: bool, @@ -1822,7 +1787,6 @@ async fn test_udt_with_missing_field() { id += 1; #[derive(SerializeValue)] - #[scylla(crate = crate)] struct UdtV2 { first: i32, second: bool, @@ -1850,7 +1814,6 @@ async fn test_udt_with_missing_field() { id += 1; #[derive(SerializeValue)] - #[scylla(crate = crate)] struct UdtV3 { first: i32, second: bool, @@ -1878,7 +1841,7 @@ async fn test_udt_with_missing_field() { id += 1; #[derive(SerializeValue)] - #[scylla(crate = crate, flavor="enforce_order")] + #[scylla(flavor = "enforce_order")] struct UdtV4 { first: i32, second: bool, diff --git a/scylla/src/transport/cql_value_test.rs b/scylla/tests/integration/cql_value.rs similarity index 68% rename from scylla/src/transport/cql_value_test.rs rename to scylla/tests/integration/cql_value.rs index 932b72934b..d0648b1472 100644 --- a/scylla/src/transport/cql_value_test.rs +++ b/scylla/tests/integration/cql_value.rs @@ -1,11 +1,10 @@ use assert_matches::assert_matches; -use crate::frame::response::result::CqlValue; -use crate::frame::value::CqlDuration; +use scylla::frame::response::result::CqlValue; +use scylla::frame::value::CqlDuration; +use scylla::Session; -use crate::test_utils::{create_new_session_builder, setup_tracing}; -use crate::utils::test_utils::unique_keyspace_name; -use crate::Session; +use crate::utils::{create_new_session_builder, setup_tracing, unique_keyspace_name, PerformDDL}; #[tokio::test] async fn test_cqlvalue_udt() { @@ -13,32 +12,20 @@ async fn test_cqlvalue_udt() { let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session - .query_unpaged( - format!( - "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ + .ddl(format!( + "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", - ks - ), - &[], - ) + ks + )) .await .unwrap(); session.use_keyspace(&ks, false).await.unwrap(); session - .query_unpaged( - "CREATE TYPE IF NOT EXISTS cqlvalue_udt_type (int_val int, text_val text)", - &[], - ) - .await - .unwrap(); - session - .query_unpaged( - "CREATE TABLE IF NOT EXISTS cqlvalue_udt_test (k int, my cqlvalue_udt_type, primary key (k))", - &[], - ) + .ddl("CREATE TYPE IF NOT EXISTS cqlvalue_udt_type (int_val int, text_val text)") .await .unwrap(); + session.ddl("CREATE TABLE IF NOT EXISTS cqlvalue_udt_test (k int, my cqlvalue_udt_type, primary key (k))").await.unwrap(); let udt_cql_value = CqlValue::UserDefinedType { keyspace: ks, @@ -76,14 +63,11 @@ async fn test_cqlvalue_duration() { let ks = unique_keyspace_name(); session - .query_unpaged( - format!( - "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ + .ddl(format!( + "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = \ {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", - ks - ), - &[], - ) + ks + )) .await .unwrap(); session.use_keyspace(&ks, false).await.unwrap(); @@ -94,12 +78,24 @@ async fn test_cqlvalue_duration() { nanoseconds: 21372137, }); + session.ddl("CREATE TABLE IF NOT EXISTS cqlvalue_duration_test (pk int, ck int, v duration, primary key (pk, ck))").await.unwrap(); let fixture_queries = vec![ - ("CREATE TABLE IF NOT EXISTS cqlvalue_duration_test (pk int, ck int, v duration, primary key (pk, ck))", vec![],), - ("INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 0, ?)", vec![&duration_cql_value,],), - ("INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 1, 89h4m48s)", vec![],), - ("INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 2, PT89H8M53S)", vec![],), - ("INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 3, P0000-00-00T89:09:09)", vec![],), + ( + "INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 0, ?)", + vec![&duration_cql_value], + ), + ( + "INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 1, 89h4m48s)", + vec![], + ), + ( + "INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 2, PT89H8M53S)", + vec![], + ), + ( + "INSERT INTO cqlvalue_duration_test (pk, ck, v) VALUES (0, 3, P0000-00-00T89:09:09)", + vec![], + ), ]; for query in fixture_queries { diff --git a/scylla/tests/integration/default_policy.rs b/scylla/tests/integration/default_policy.rs new file mode 100644 index 0000000000..e097cf418a --- /dev/null +++ b/scylla/tests/integration/default_policy.rs @@ -0,0 +1,26 @@ +use scylla::load_balancing::{DefaultPolicy, LatencyAwarenessBuilder}; +use scylla::ExecutionProfile; + +use crate::utils::{create_new_session_builder, setup_tracing}; + +// This is a regression test for #696. +#[tokio::test] +#[ntest::timeout(1000)] +async fn latency_aware_query_completes() { + setup_tracing(); + let policy = DefaultPolicy::builder() + .latency_awareness(LatencyAwarenessBuilder::default()) + .build(); + let handle = ExecutionProfile::builder() + .load_balancing_policy(policy) + .build() + .into_handle(); + + let session = create_new_session_builder() + .default_execution_profile_handle(handle) + .build() + .await + .unwrap(); + + session.query_unpaged("whatever", ()).await.unwrap_err(); +} diff --git a/scylla/tests/integration/execution_profiles.rs b/scylla/tests/integration/execution_profiles.rs index 0a49bae785..dd58cdbae0 100644 --- a/scylla/tests/integration/execution_profiles.rs +++ b/scylla/tests/integration/execution_profiles.rs @@ -1,7 +1,7 @@ use std::ops::Deref; use std::sync::Arc; -use crate::utils::{setup_tracing, test_with_3_node_cluster}; +use crate::utils::{setup_tracing, test_with_3_node_cluster, unique_keyspace_name, PerformDDL}; use assert_matches::assert_matches; use scylla::batch::BatchStatement; use scylla::batch::{Batch, BatchType}; @@ -13,7 +13,6 @@ use scylla::{ load_balancing::{LoadBalancingPolicy, RoutingInfo}, retry_policy::{RetryPolicy, RetrySession}, speculative_execution::SpeculativeExecutionPolicy, - test_utils::unique_keyspace_name, transport::ClusterData, ExecutionProfile, SessionBuilder, }; @@ -165,14 +164,13 @@ async fn test_execution_profiles() { let ks = unique_keyspace_name(); /* Prepare schema */ - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session - .query_unpaged( + .ddl( format!( "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", ks ), - &[], ) .await .unwrap(); diff --git a/scylla/tests/integration/history.rs b/scylla/tests/integration/history.rs new file mode 100644 index 0000000000..1bbd21e024 --- /dev/null +++ b/scylla/tests/integration/history.rs @@ -0,0 +1,293 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::Arc; + +use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use futures::StreamExt; +use scylla::frame::response::result::Row; +use scylla::history::{ + AttemptResult, HistoryCollector, QueryHistoryResult, StructuredHistory, TimePoint, +}; +use scylla::query::Query; +use scylla::transport::errors::QueryError; + +use crate::utils::{create_new_session_builder, setup_tracing, unique_keyspace_name, PerformDDL}; + +// Set a single time for all timestamps within StructuredHistory. +// HistoryCollector sets the timestamp to current time which changes with each test. +// Setting it to one makes it possible to test displaying consistently. +fn set_one_time(mut history: StructuredHistory) -> StructuredHistory { + let the_time: TimePoint = DateTime::::from_naive_utc_and_offset( + NaiveDateTime::new( + NaiveDate::from_ymd_opt(2022, 2, 22).unwrap(), + NaiveTime::from_hms_opt(20, 22, 22).unwrap(), + ), + Utc, + ); + + for query in &mut history.queries { + query.start_time = the_time; + match &mut query.result { + Some(QueryHistoryResult::Success(succ_time)) => *succ_time = the_time, + Some(QueryHistoryResult::Error(err_time, _)) => *err_time = the_time, + None => {} + }; + + for fiber in std::iter::once(&mut query.non_speculative_fiber) + .chain(query.speculative_fibers.iter_mut()) + { + fiber.start_time = the_time; + for attempt in &mut fiber.attempts { + attempt.send_time = the_time; + match &mut attempt.result { + Some(AttemptResult::Success(succ_time)) => *succ_time = the_time, + Some(AttemptResult::Error(err_time, _, _)) => *err_time = the_time, + None => {} + } + } + } + } + + history +} + +// Set a single node for all attempts within StructuredHistory. +// When running against real life nodes this address may change, +// setting it to one value makes it possible to run tests consistently. +fn set_one_node(mut history: StructuredHistory) -> StructuredHistory { + let the_node: SocketAddr = node1_addr(); + + for query in &mut history.queries { + for fiber in std::iter::once(&mut query.non_speculative_fiber) + .chain(query.speculative_fibers.iter_mut()) + { + for attempt in &mut fiber.attempts { + attempt.node_addr = the_node; + } + } + } + + history +} + +// Set a single error message for all DbErrors within StructuredHistory. +// The error message changes between Scylla/Cassandra/their versions. +// Setting it to one value makes it possible to run tests consistently. +fn set_one_db_error_message(mut history: StructuredHistory) -> StructuredHistory { + let set_msg = |err: &mut QueryError| { + if let QueryError::DbError(_, msg) = err { + *msg = "Error message from database".to_string(); + } + }; + + for query in &mut history.queries { + if let Some(QueryHistoryResult::Error(_, err)) = &mut query.result { + set_msg(err); + } + for fiber in std::iter::once(&mut query.non_speculative_fiber) + .chain(query.speculative_fibers.iter_mut()) + { + for attempt in &mut fiber.attempts { + if let Some(AttemptResult::Error(_, err, _)) = &mut attempt.result { + set_msg(err); + } + } + } + } + + history +} + +fn node1_addr() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 19042) +} + +#[tokio::test] +async fn successful_query_history() { + setup_tracing(); + let session = create_new_session_builder().build().await.unwrap(); + + let mut query = Query::new("SELECT * FROM system.local"); + let history_collector = Arc::new(HistoryCollector::new()); + query.set_history_listener(history_collector.clone()); + + session.query_unpaged(query.clone(), ()).await.unwrap(); + + let history: StructuredHistory = history_collector.clone_structured_history(); + + let displayed = "Queries History: +=== Query #0 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Success at 2022-02-22 20:22:22 UTC +| +| Query successful at 2022-02-22 20:22:22 UTC +================= +"; + assert_eq!( + displayed, + format!( + "{}", + set_one_db_error_message(set_one_node(set_one_time(history))) + ) + ); + + // Prepared queries retain the history listener set in Query. + let prepared = session.prepare(query).await.unwrap(); + session.execute_unpaged(&prepared, ()).await.unwrap(); + + let history2: StructuredHistory = history_collector.clone_structured_history(); + + let displayed2 = "Queries History: +=== Query #0 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Success at 2022-02-22 20:22:22 UTC +| +| Query successful at 2022-02-22 20:22:22 UTC +================= +=== Query #1 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Success at 2022-02-22 20:22:22 UTC +| +| Query successful at 2022-02-22 20:22:22 UTC +================= +"; + assert_eq!( + displayed2, + format!( + "{}", + set_one_db_error_message(set_one_node(set_one_time(history2))) + ) + ); +} + +#[tokio::test] +async fn failed_query_history() { + setup_tracing(); + let session = create_new_session_builder().build().await.unwrap(); + + let mut query = Query::new("This isnt even CQL"); + let history_collector = Arc::new(HistoryCollector::new()); + query.set_history_listener(history_collector.clone()); + + assert!(session.query_unpaged(query.clone(), ()).await.is_err()); + + let history: StructuredHistory = history_collector.clone_structured_history(); + + let displayed = +"Queries History: +=== Query #0 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Error at 2022-02-22 20:22:22 UTC +| Error: Database returned an error: The submitted query has a syntax error, Error message: Error message from database +| Retry decision: DontRetry +| +| Query failed at 2022-02-22 20:22:22 UTC +| Error: Database returned an error: The submitted query has a syntax error, Error message: Error message from database +================= +"; + assert_eq!( + displayed, + format!( + "{}", + set_one_db_error_message(set_one_node(set_one_time(history))) + ) + ); +} + +#[tokio::test] +async fn iterator_query_history() { + setup_tracing(); + let session = create_new_session_builder().build().await.unwrap(); + let ks = unique_keyspace_name(); + session + .ddl(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)) + .await + .unwrap(); + session.use_keyspace(ks, true).await.unwrap(); + + session + .ddl("CREATE TABLE t (p int primary key)") + .await + .unwrap(); + for i in 0..32 { + session + .query_unpaged("INSERT INTO t (p) VALUES (?)", (i,)) + .await + .unwrap(); + } + + let mut iter_query: Query = Query::new("SELECT * FROM t"); + iter_query.set_page_size(8); + let history_collector = Arc::new(HistoryCollector::new()); + iter_query.set_history_listener(history_collector.clone()); + + let mut rows_iterator = session + .query_iter(iter_query, ()) + .await + .unwrap() + .rows_stream::() + .unwrap(); + while let Some(_row) = rows_iterator.next().await { + // Receive rows... + } + + let history = history_collector.clone_structured_history(); + + assert!(history.queries.len() >= 4); + + let displayed_prefix = "Queries History: +=== Query #0 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Success at 2022-02-22 20:22:22 UTC +| +| Query successful at 2022-02-22 20:22:22 UTC +================= +=== Query #1 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Success at 2022-02-22 20:22:22 UTC +| +| Query successful at 2022-02-22 20:22:22 UTC +================= +=== Query #2 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Success at 2022-02-22 20:22:22 UTC +| +| Query successful at 2022-02-22 20:22:22 UTC +================= +=== Query #3 === +| start_time: 2022-02-22 20:22:22 UTC +| Non-speculative attempts: +| - Attempt #0 sent to 127.0.0.1:19042 +| request send time: 2022-02-22 20:22:22 UTC +| Success at 2022-02-22 20:22:22 UTC +| +| Query successful at 2022-02-22 20:22:22 UTC +================= +"; + let displayed_str = format!( + "{}", + set_one_db_error_message(set_one_node(set_one_time(history))) + ); + + assert!(displayed_str.starts_with(displayed_prefix),); +} diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/tests/integration/large_batch_statements.rs similarity index 77% rename from scylla/src/transport/large_batch_statements_test.rs rename to scylla/tests/integration/large_batch_statements.rs index 7e8fc482c3..724d8c9496 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/tests/integration/large_batch_statements.rs @@ -1,14 +1,11 @@ use assert_matches::assert_matches; -use crate::batch::BatchType; -use crate::query::Query; -use crate::test_utils::setup_tracing; -use crate::transport::errors::{BadQuery, QueryError}; -use crate::{ - batch::Batch, - test_utils::{create_new_session_builder, unique_keyspace_name}, - QueryResult, Session, -}; +use crate::utils::{create_new_session_builder, setup_tracing, unique_keyspace_name, PerformDDL}; +use scylla::batch::Batch; +use scylla::batch::BatchType; +use scylla::query::Query; +use scylla::transport::errors::{BadQuery, QueryError}; +use scylla::{QueryResult, Session}; #[tokio::test] async fn test_large_batch_statements() { @@ -33,19 +30,15 @@ async fn test_large_batch_statements() { async fn create_test_session(session: Session, ks: &String) -> Session { session - .query_unpaged( + .ddl( format!("CREATE KEYSPACE {} WITH REPLICATION = {{ 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1 }}",ks), - &[], ) .await.unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE {}.pairs (dummy int, k blob, v blob, primary key (dummy, k))", - ks - ), - &[], - ) + .ddl(format!( + "CREATE TABLE {}.pairs (dummy int, k blob, v blob, primary key (dummy, k))", + ks + )) .await .unwrap(); session diff --git a/scylla/tests/integration/lwt_optimisation.rs b/scylla/tests/integration/lwt_optimisation.rs index ca56cff930..466120bce2 100644 --- a/scylla/tests/integration/lwt_optimisation.rs +++ b/scylla/tests/integration/lwt_optimisation.rs @@ -1,7 +1,8 @@ -use crate::utils::{setup_tracing, test_with_3_node_cluster}; +use crate::utils::{ + scylla_supports_tablets, setup_tracing, test_with_3_node_cluster, unique_keyspace_name, + PerformDDL, +}; use scylla::retry_policy::FallthroughRetryPolicy; -use scylla::test_utils::scylla_supports_tablets; -use scylla::test_utils::unique_keyspace_name; use scylla::transport::session::Session; use scylla::{ExecutionProfile, SessionBuilder}; use scylla_cql::frame::protocol_features::ProtocolFeatures; @@ -73,11 +74,11 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE t (a int primary key, b int)", &[]) + .ddl("CREATE TABLE t (a int primary key, b int)") .await .unwrap(); diff --git a/scylla/tests/integration/main.rs b/scylla/tests/integration/main.rs index ef190f1237..86b529dc08 100644 --- a/scylla/tests/integration/main.rs +++ b/scylla/tests/integration/main.rs @@ -1,11 +1,20 @@ +mod authenticate; +mod batch; mod consistency; +mod cql_collections; +mod cql_types; +mod cql_value; +mod default_policy; mod execution_profiles; +mod history; mod hygiene; +mod large_batch_statements; mod lwt_optimisation; mod new_session; mod retries; mod self_identity; mod shards; +mod silent_prepare_batch; mod silent_prepare_query; mod skip_metadata_optimization; mod tablets; diff --git a/scylla/tests/integration/retries.rs b/scylla/tests/integration/retries.rs index 43cbf58074..7c7d35c4d4 100644 --- a/scylla/tests/integration/retries.rs +++ b/scylla/tests/integration/retries.rs @@ -1,10 +1,10 @@ -use crate::utils::{setup_tracing, test_with_3_node_cluster}; +use crate::utils::{setup_tracing, test_with_3_node_cluster, unique_keyspace_name, PerformDDL}; +use scylla::query::Query; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::speculative_execution::SimpleSpeculativeExecutionPolicy; use scylla::transport::session::Session; use scylla::ExecutionProfile; use scylla::SessionBuilder; -use scylla::{query::Query, test_utils::unique_keyspace_name}; use std::sync::Arc; use std::time::Duration; use tracing::info; @@ -36,10 +36,10 @@ async fn speculative_execution_is_fired() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE t (a int primary key)", &[]) + .ddl("CREATE TABLE t (a int primary key)") .await .unwrap(); @@ -112,10 +112,10 @@ async fn retries_occur() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE t (a int primary key)", &[]) + .ddl("CREATE TABLE t (a int primary key)") .await .unwrap(); @@ -192,10 +192,10 @@ async fn speculative_execution_panic_regression_test() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE t (a int primary key)", &[]) + .ddl("CREATE TABLE t (a int primary key)") .await .unwrap(); diff --git a/scylla/tests/integration/shards.rs b/scylla/tests/integration/shards.rs index b22cfc397b..6c1f8fdb8f 100644 --- a/scylla/tests/integration/shards.rs +++ b/scylla/tests/integration/shards.rs @@ -1,8 +1,11 @@ +use std::collections::HashSet; use std::sync::Arc; -use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::test_utils::scylla_supports_tablets; -use scylla::{test_utils::unique_keyspace_name, SessionBuilder}; +use crate::utils::{ + scylla_supports_tablets, setup_tracing, test_with_3_node_cluster, unique_keyspace_name, + PerformDDL, +}; +use scylla::SessionBuilder; use tokio::sync::mpsc; use scylla_proxy::TargetShard; @@ -16,7 +19,6 @@ use scylla_proxy::{ProxyError, RequestFrame, WorkerError}; #[cfg(not(scylla_cloud_tests))] async fn test_consistent_shard_awareness() { setup_tracing(); - use std::collections::HashSet; let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { @@ -42,14 +44,13 @@ async fn test_consistent_shard_awareness() { if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } - session.query_unpaged(create_ks, &[]).await.unwrap(); + session.ddl(create_ks).await.unwrap(); session - .query_unpaged( + .ddl( format!( "CREATE TABLE IF NOT EXISTS {}.t (a int, b int, c text, primary key (a, b))", ks ), - &[], ) .await .unwrap(); diff --git a/scylla/src/transport/silent_prepare_batch_test.rs b/scylla/tests/integration/silent_prepare_batch.rs similarity index 89% rename from scylla/src/transport/silent_prepare_batch_test.rs rename to scylla/tests/integration/silent_prepare_batch.rs index bca8ef183a..b510e04626 100644 --- a/scylla/src/transport/silent_prepare_batch_test.rs +++ b/scylla/tests/integration/silent_prepare_batch.rs @@ -1,9 +1,8 @@ -use crate::{ - batch::Batch, - prepared_statement::PreparedStatement, - test_utils::{create_new_session_builder, setup_tracing, unique_keyspace_name}, - Session, -}; +use crate::utils::{create_new_session_builder, setup_tracing, unique_keyspace_name, PerformDDL}; +use scylla::batch::Batch; +use scylla::prepared_statement::PreparedStatement; +use scylla::Session; + use std::collections::BTreeSet; #[tokio::test] @@ -12,14 +11,11 @@ async fn test_quietly_prepare_batch() { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks.clone(), false).await.unwrap(); session - .query_unpaged( - "CREATE TABLE test_batch_table (a int, b int, primary key (a, b))", - (), - ) + .ddl("CREATE TABLE test_batch_table (a int, b int, primary key (a, b))") .await .unwrap(); diff --git a/scylla/tests/integration/silent_prepare_query.rs b/scylla/tests/integration/silent_prepare_query.rs index 93950206a5..477b633862 100644 --- a/scylla/tests/integration/silent_prepare_query.rs +++ b/scylla/tests/integration/silent_prepare_query.rs @@ -1,7 +1,7 @@ -use crate::utils::{setup_tracing, test_with_3_node_cluster}; +use crate::utils::{setup_tracing, test_with_3_node_cluster, unique_keyspace_name, PerformDDL}; +use scylla::query::Query; use scylla::Session; use scylla::SessionBuilder; -use scylla::{query::Query, test_utils::unique_keyspace_name}; use scylla_proxy::{ Condition, ProxyError, Reaction, RequestOpcode, RequestReaction, RequestRule, ShardAwareness, WorkerError, @@ -27,10 +27,10 @@ async fn test_prepare_query_with_values() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE t (a int primary key)", &[]) + .ddl("CREATE TABLE t (a int primary key)") .await .unwrap(); @@ -78,10 +78,10 @@ async fn test_query_with_no_values() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE t (a int primary key)", &[]) + .ddl("CREATE TABLE t (a int primary key)") .await .unwrap(); diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index dba646e895..eb8ff8520a 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -1,5 +1,5 @@ -use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::{prepared_statement::PreparedStatement, test_utils::unique_keyspace_name}; +use crate::utils::{setup_tracing, test_with_3_node_cluster, unique_keyspace_name, PerformDDL}; +use scylla::prepared_statement::PreparedStatement; use scylla::{Session, SessionBuilder}; use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; use scylla_cql::frame::types; @@ -27,10 +27,10 @@ async fn test_skip_result_metadata() { .unwrap(); let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks)).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); session - .query_unpaged("CREATE TABLE t (a int primary key, b int, c text)", &[]) + .ddl("CREATE TABLE t (a int primary key, b int, c text)") .await .unwrap(); session.query_unpaged("INSERT INTO t (a, b, c) VALUES (1, 2, 'foo_filter_data')", &[]).await.unwrap(); @@ -82,14 +82,13 @@ async fn test_skip_result_metadata() { { let ks = unique_keyspace_name(); - session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.ddl(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks)).await.unwrap(); session.use_keyspace(ks, true).await.unwrap(); type RowT = (i32, i32, String); session - .query_unpaged( + .ddl( "CREATE TABLE IF NOT EXISTS t2 (a int, b int, c text, primary key (a, b))", - &[], ) .await .unwrap(); diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index 9dbb5d31ab..fd56c7d939 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -1,7 +1,9 @@ use std::sync::Arc; -use crate::utils::setup_tracing; -use crate::utils::test_with_3_node_cluster; +use crate::utils::{ + scylla_supports_tablets, setup_tracing, test_with_3_node_cluster, unique_keyspace_name, + PerformDDL, +}; use futures::future::try_join_all; use futures::TryStreamExt; @@ -12,7 +14,6 @@ use scylla::load_balancing::RoutingInfo; use scylla::prepared_statement::PreparedStatement; use scylla::query::Query; use scylla::serialize::row::SerializeRow; -use scylla::test_utils::unique_keyspace_name; use scylla::transport::ClusterData; use scylla::transport::Node; use scylla::transport::NodeRef; @@ -251,25 +252,19 @@ fn count_tablet_feedbacks( async fn prepare_schema(session: &Session, ks: &str, table: &str, tablet_count: usize) { session - .query_unpaged( - format!( - "CREATE KEYSPACE IF NOT EXISTS {} + .ddl(format!( + "CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 2}} AND tablets = {{ 'initial': {} }}", - ks, tablet_count - ), - &[], - ) + ks, tablet_count + )) .await .unwrap(); session - .query_unpaged( - format!( - "CREATE TABLE IF NOT EXISTS {}.{} (a int, b int, c text, primary key (a, b))", - ks, table - ), - &[], - ) + .ddl(format!( + "CREATE TABLE IF NOT EXISTS {}.{} (a int, b int, c text, primary key (a, b))", + ks, table + )) .await .unwrap(); } @@ -300,7 +295,7 @@ async fn test_default_policy_is_tablet_aware() { .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets(&session).await { + if !scylla_supports_tablets(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } @@ -418,8 +413,6 @@ async fn test_default_policy_is_tablet_aware() { #[tokio::test] #[ntest::timeout(30000)] async fn test_tablet_feedback_not_sent_for_unprepared_queries() { - use scylla::test_utils::scylla_supports_tablets; - setup_tracing(); const TABLET_COUNT: usize = 16; @@ -492,8 +485,6 @@ async fn test_tablet_feedback_not_sent_for_unprepared_queries() { #[ntest::timeout(30000)] #[ignore] async fn test_lwt_optimization_works_with_tablets() { - use scylla::test_utils::scylla_supports_tablets; - setup_tracing(); const TABLET_COUNT: usize = 16; diff --git a/scylla/tests/integration/utils.rs b/scylla/tests/integration/utils.rs index 4d5b10f7a4..07d2079745 100644 --- a/scylla/tests/integration/utils.rs +++ b/scylla/tests/integration/utils.rs @@ -1,12 +1,23 @@ use futures::Future; +use scylla::deserialize::DeserializeValue; +use scylla::load_balancing::{FallbackPlan, LoadBalancingPolicy, RoutingInfo}; +use scylla::query::Query; +use scylla::routing::Shard; +use scylla::transport::errors::QueryError; +use scylla::transport::session_builder::{GenericSessionBuilder, SessionBuilderKind}; +use scylla::transport::{ClusterData, NodeRef}; +use scylla::{ExecutionProfile, Session}; use std::collections::HashMap; use std::env; use std::net::SocketAddr; +use std::num::NonZeroU32; use std::str::FromStr; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use scylla_proxy::{Node, Proxy, ProxyError, RunningProxy, ShardAwareness}; -#[cfg(test)] pub(crate) fn setup_tracing() { let _ = tracing_subscriber::fmt::fmt() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) @@ -14,6 +25,22 @@ pub(crate) fn setup_tracing() { .try_init(); } +static UNIQUE_COUNTER: AtomicUsize = AtomicUsize::new(0); + +pub(crate) fn unique_keyspace_name() -> String { + let cnt = UNIQUE_COUNTER.fetch_add(1, Ordering::SeqCst); + let name = format!( + "test_rust_{}_{}", + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + cnt + ); + println!("Unique name: {}", name); + name +} + pub(crate) async fn test_with_3_node_cluster( shard_awareness: ShardAwareness, test: F, @@ -63,3 +90,145 @@ where running_proxy.finish().await } + +pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { + // Cassandra doesn't have a concept of features, so first detect + // if there is the `supported_features` column in system.local + + let meta = session.get_cluster_data(); + let system_local = meta + .get_keyspace_info() + .get("system") + .unwrap() + .tables + .get("local") + .unwrap(); + + if !system_local.columns.contains_key("supported_features") { + return false; + } + + let result = session + .query_unpaged("SELECT supported_features FROM system.local", ()) + .await + .unwrap() + .into_rows_result() + .unwrap(); + + let (features,): (Option<&str>,) = result.single_row().unwrap(); + + features + .unwrap_or_default() + .split(',') + .any(|f| f == feature) +} + +pub(crate) async fn scylla_supports_tablets(session: &Session) -> bool { + supports_feature(session, "TABLETS").await +} + +// Creates a generic session builder based on conditional compilation configuration +// For SessionBuilder of DefaultMode type, adds localhost to known hosts, as all of the tests +// connect to localhost. +pub(crate) fn create_new_session_builder() -> GenericSessionBuilder { + let session_builder = { + #[cfg(not(scylla_cloud_tests))] + { + use scylla::SessionBuilder; + + let uri = std::env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); + + SessionBuilder::new().known_node(uri) + } + + #[cfg(scylla_cloud_tests)] + { + use scylla::transport::session_builder::CloudMode; + use scylla::CloudSessionBuilder; + use std::path::Path; + + std::env::var("CLOUD_CONFIG_PATH") + .map(|config_path| CloudSessionBuilder::new(Path::new(&config_path))) + .expect("Failed to initialize CloudSessionBuilder") + .expect("CLOUD_CONFIG_PATH environment variable is missing") + } + }; + + // The reason why we enable so long waiting for TracingInfo is... Cassandra. (Yes, again.) + // In Cassandra Java Driver, the wait time for tracing info is 10 seconds, so here we do the same. + // However, as Scylla usually gets TracingInfo ready really fast (our default interval is hence 3ms), + // we stick to a not-so-much-terribly-long interval here. + session_builder + .tracing_info_fetch_attempts(NonZeroU32::new(200).unwrap()) + .tracing_info_fetch_interval(Duration::from_millis(50)) +} + +// Shorthands for better readability. +// Copied from Scylla because we don't want to make it public there. +pub(crate) trait DeserializeOwnedValue: + for<'frame, 'metadata> DeserializeValue<'frame, 'metadata> +{ +} +impl DeserializeOwnedValue for T where + T: for<'frame, 'metadata> DeserializeValue<'frame, 'metadata> +{ +} + +// This LBP produces a predictable query plan - it order the nodes +// by position in the ring. +// This is to make sure that all DDL queries land on the same node, +// to prevent errors from concurrent DDL queries executed on different nodes. +#[derive(Debug)] +struct SchemaQueriesLBP; + +impl LoadBalancingPolicy for SchemaQueriesLBP { + fn pick<'a>( + &'a self, + _query: &'a RoutingInfo, + cluster: &'a ClusterData, + ) -> Option<(NodeRef<'a>, Option)> { + // I'm not sure if Scylla can handle concurrent DDL queries to different shard, + // in other words if its local lock is per-node or per shard. + // Just to be safe, let's use explicit shard. + cluster.get_nodes_info().first().map(|node| (node, Some(0))) + } + + fn fallback<'a>( + &'a self, + _query: &'a RoutingInfo, + cluster: &'a ClusterData, + ) -> FallbackPlan<'a> { + Box::new(cluster.get_nodes_info().iter().map(|node| (node, Some(0)))) + } + + fn name(&self) -> String { + "SchemaQueriesLBP".to_owned() + } +} + +fn apply_ddl_lbp(query: &mut Query) { + let policy = query + .get_execution_profile_handle() + .map(|profile| profile.pointee_to_builder()) + .unwrap_or(ExecutionProfile::builder()) + .load_balancing_policy(Arc::new(SchemaQueriesLBP)) + .build(); + query.set_execution_profile_handle(Some(policy.into_handle())); +} + +// This is just to make it easier to call the above function: +// we'll be able to do session.ddl(...) instead of perform_ddl(&session, ...) +// or something like that. +#[async_trait::async_trait] +pub(crate) trait PerformDDL { + async fn ddl(&self, query: impl Into + Send) -> Result<(), QueryError>; +} + +#[async_trait::async_trait] +impl PerformDDL for Session { + async fn ddl(&self, query: impl Into + Send) -> Result<(), QueryError> { + let mut query = query.into(); + apply_ddl_lbp(&mut query); + self.query_unpaged(query, &[]).await.map(|_| ()) + } +}