diff --git a/examples/allocations.rs b/examples/allocations.rs index a3ec2a5cba..d10ad9771b 100644 --- a/examples/allocations.rs +++ b/examples/allocations.rs @@ -1,5 +1,6 @@ use anyhow::Result; -use scylla::{statement::prepared_statement::PreparedStatement, Session, SessionBuilder}; +use scylla::transport::session::Session; +use scylla::{statement::prepared_statement::PreparedStatement, SessionBuilder}; use std::io::Write; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; diff --git a/examples/basic.rs b/examples/basic.rs index 72b6a5ce11..c4fe10b8b3 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -1,7 +1,9 @@ use anyhow::Result; -use futures::TryStreamExt; -use scylla::macros::FromRow; +use futures::StreamExt as _; +use futures::TryStreamExt as _; +use scylla::frame::response::result::Row; use scylla::transport::session::Session; +use scylla::DeserializeRow; use scylla::SessionBuilder; use std::env; @@ -53,23 +55,24 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.basic", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.try_next().await? { println!("a, b, c: {}, {}, {}", a, b, c); } - // Or as custom structs that derive FromRow - #[derive(Debug, FromRow)] + // Or as custom structs that derive DeserializeRow + #[allow(unused)] + #[derive(Debug, DeserializeRow)] struct RowData { - _a: i32, - _b: Option, - _c: String, + a: i32, + b: Option, + c: String, } let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.basic", &[]) .await? - .into_typed::(); + .rows_stream::()?; while let Some(row_data) = iter.try_next().await? { println!("row_data: {:?}", row_data); } @@ -77,15 +80,13 @@ async fn main() -> Result<()> { // Or simply as untyped rows let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.basic", &[]) - .await?; - while let Some(row) = iter.try_next().await? { + .await? + .rows_stream::()?; + while let Some(row) = iter.next().await.transpose()? { let a = row.columns[0].as_ref().unwrap().as_int().unwrap(); let b = row.columns[1].as_ref().unwrap().as_int().unwrap(); let c = row.columns[2].as_ref().unwrap().as_text().unwrap(); println!("a, b, c: {}, {}, {}", a, b, c); - - // Alternatively each row can be parsed individually - // let (a2, b2, c2) = row.into_typed::<(i32, i32, String)>() ?; } let metrics = session.get_metrics(); diff --git a/examples/compare-tokens.rs b/examples/compare-tokens.rs index 9e9431d869..5350006b99 100644 --- a/examples/compare-tokens.rs +++ b/examples/compare-tokens.rs @@ -51,7 +51,9 @@ async fn main() -> Result<()> { (pk,), ) .await? - .single_row_typed::<(i64,)>()?; + .into_rows_result()? + .expect("Got not Rows result") + .single_row()?; assert_eq!(t, qt); println!("token for {}: {}", pk, t); } diff --git a/examples/cql-time-types.rs b/examples/cql-time-types.rs index 548ac69878..29a66349e2 100644 --- a/examples/cql-time-types.rs +++ b/examples/cql-time-types.rs @@ -3,7 +3,7 @@ use anyhow::Result; use chrono::{DateTime, NaiveDate, NaiveTime, Utc}; -use futures::{StreamExt, TryStreamExt}; +use futures::{StreamExt as _, TryStreamExt as _}; use scylla::frame::response::result::CqlValue; use scylla::frame::value::{CqlDate, CqlTime, CqlTimestamp}; use scylla::transport::session::Session; @@ -44,7 +44,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.dates", &[]) .await? - .into_typed::<(NaiveDate,)>(); + .rows_stream::<(NaiveDate,)>()?; while let Some(row_result) = iter.next().await { let (read_date,): (NaiveDate,) = match row_result { Ok(read_date) => read_date, @@ -66,7 +66,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.dates", &[]) .await? - .into_typed::<(time::Date,)>(); + .rows_stream::<(time::Date,)>()?; while let Some(row_result) = iter.next().await { let (read_date,): (time::Date,) = match row_result { Ok(read_date) => read_date, @@ -88,7 +88,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.dates", &[]) .await? - .into_typed::<(CqlValue,)>(); + .rows_stream::<(CqlValue,)>()?; while let Some(row_result) = iter.next().await { let read_days: u32 = match row_result { Ok((CqlValue::Date(CqlDate(days)),)) => days, @@ -124,7 +124,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.times", &[]) .await? - .into_typed::<(NaiveTime,)>(); + .rows_stream::<(NaiveTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Parsed a time into chrono::NaiveTime: {:?}", read_time); } @@ -139,7 +139,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.times", &[]) .await? - .into_typed::<(time::Time,)>(); + .rows_stream::<(time::Time,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Parsed a time into time::Time: {:?}", read_time); } @@ -154,7 +154,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.times", &[]) .await? - .into_typed::<(CqlTime,)>(); + .rows_stream::<(CqlTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Read a time as raw nanos: {:?}", read_time); } @@ -185,7 +185,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.timestamps", &[]) .await? - .into_typed::<(DateTime,)>(); + .rows_stream::<(DateTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!( "Parsed a timestamp into chrono::DateTime: {:?}", @@ -206,7 +206,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.timestamps", &[]) .await? - .into_typed::<(time::OffsetDateTime,)>(); + .rows_stream::<(time::OffsetDateTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!( "Parsed a timestamp into time::OffsetDateTime: {:?}", @@ -227,7 +227,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.timestamps", &[]) .await? - .into_typed::<(CqlTimestamp,)>(); + .rows_stream::<(CqlTimestamp,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Read a timestamp as raw millis: {:?}", read_time); } diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index c12b17a76c..ba46519636 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -3,8 +3,11 @@ use rustyline::completion::{Completer, Pair}; use rustyline::error::ReadlineError; use rustyline::{CompletionType, Config, Context, Editor}; use rustyline_derive::{Helper, Highlighter, Hinter, Validator}; +use scylla::frame::response::result::Row; +use scylla::transport::session::Session; use scylla::transport::Compression; -use scylla::{LegacyQueryResult, Session, SessionBuilder}; +use scylla::QueryRowsResult; +use scylla::SessionBuilder; use std::env; #[derive(Helper, Highlighter, Validator, Hinter)] @@ -173,23 +176,24 @@ impl Completer for CqlHelper { } } -fn print_result(result: &LegacyQueryResult) { - if result.rows.is_none() { - println!("OK"); - return; - } - for row in result.rows.as_ref().unwrap() { - for column in &row.columns { - print!("|"); - print!( - " {:16}", - match column { - None => "null".to_owned(), - Some(value) => format!("{:?}", value), - } - ); +fn print_result(result: Option<&QueryRowsResult>) { + if let Some(rows_result) = result { + for row in rows_result.rows::().unwrap() { + let row = row.unwrap(); + for column in &row.columns { + print!("|"); + print!( + " {:16}", + match column { + None => "null".to_owned(), + Some(value) => format!("{:?}", value), + } + ); + } + println!("|") } - println!("|") + } else { + println!("OK"); } } @@ -222,7 +226,10 @@ async fn main() -> Result<()> { let maybe_res = session.query_unpaged(line, &[]).await; match maybe_res { Err(err) => println!("Error: {}", err), - Ok(res) => print_result(&res), + Ok(res) => { + let rows_res = res.into_rows_result()?; + print_result(rows_res.as_ref()) + } } } Err(ReadlineError::Interrupted) => continue, diff --git a/examples/custom_deserialization.rs b/examples/custom_deserialization.rs index 1d0173ca59..0306ebe879 100644 --- a/examples/custom_deserialization.rs +++ b/examples/custom_deserialization.rs @@ -1,8 +1,8 @@ -use anyhow::Result; -use scylla::cql_to_rust::{FromCqlVal, FromCqlValError}; -use scylla::frame::response::result::CqlValue; -use scylla::macros::impl_from_cql_value_from_method; -use scylla::{Session, SessionBuilder}; +use anyhow::{Context, Result}; +use scylla::deserialize::DeserializeValue; +use scylla::frame::response::result::ColumnType; +use scylla::transport::session::Session; +use scylla::SessionBuilder; use std::env; #[tokio::main] @@ -28,53 +28,38 @@ async fn main() -> Result<()> { ) .await?; - // You can implement FromCqlVal for your own types + // You can implement DeserializeValue for your own types #[derive(PartialEq, Eq, Debug)] - struct MyType(String); + struct MyType<'a>(&'a str); - impl FromCqlVal for MyType { - fn from_cql(cql_val: CqlValue) -> Result { - Ok(Self( - cql_val.into_string().ok_or(FromCqlValError::BadCqlType)?, - )) + impl<'frame, 'metadata> DeserializeValue<'frame, 'metadata> for MyType<'frame> { + fn type_check( + typ: &scylla::frame::response::result::ColumnType, + ) -> std::result::Result<(), scylla::deserialize::TypeCheckError> { + <&str as DeserializeValue<'frame, 'metadata>>::type_check(typ) } - } - - let (v,) = session - .query_unpaged( - "SELECT v FROM examples_ks.custom_deserialization WHERE pk = 1", - (), - ) - .await? - .single_row_typed::<(MyType,)>()?; - assert_eq!(v, MyType("asdf".to_owned())); - - // If you defined an extension trait for CqlValue then you can use - // the `impl_from_cql_value_from_method` macro to turn it into - // a FromCqlValue impl - #[derive(PartialEq, Eq, Debug)] - struct MyOtherType(String); - trait CqlValueExt { - fn into_my_other_type(self) -> Option; - } + fn deserialize( + typ: &'metadata ColumnType<'metadata>, + v: Option>, + ) -> std::result::Result { + let s = <&str as DeserializeValue<'frame, 'metadata>>::deserialize(typ, v)?; - impl CqlValueExt for CqlValue { - fn into_my_other_type(self) -> Option { - Some(MyOtherType(self.into_string()?)) + Ok(Self(s)) } } - impl_from_cql_value_from_method!(MyOtherType, into_my_other_type); - - let (v,) = session + let rows_result = session .query_unpaged( "SELECT v FROM examples_ks.custom_deserialization WHERE pk = 1", (), ) .await? - .single_row_typed::<(MyOtherType,)>()?; - assert_eq!(v, MyOtherType("asdf".to_owned())); + .into_rows_result()? + .context("Expected Result:Rows response, got a different Result response.")?; + + let (v,) = rows_result.single_row::<(MyType,)>()?; + assert_eq!(v, MyType("asdf")); println!("Ok."); diff --git a/examples/get_by_name.rs b/examples/get_by_name.rs index bb750de1b4..1caca3e3df 100644 --- a/examples/get_by_name.rs +++ b/examples/get_by_name.rs @@ -1,4 +1,5 @@ -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context as _, Result}; +use scylla::frame::response::result::Row; use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; @@ -35,18 +36,26 @@ async fn main() -> Result<()> { ) .await?; - let query_result = session + let rows_result = session .query_unpaged("SELECT pk, ck, value FROM examples_ks.get_by_name", &[]) - .await?; - let (ck_idx, _) = query_result - .get_column_spec("ck") + .await? + .into_rows_result()? + .context("Response is not of Rows type")?; + let col_specs = rows_result.column_specs(); + let (ck_idx, _) = col_specs + .get_by_name("ck") .ok_or_else(|| anyhow!("No ck column found"))?; - let (value_idx, _) = query_result - .get_column_spec("value") + let (value_idx, _) = col_specs + .get_by_name("value") .ok_or_else(|| anyhow!("No value column found"))?; + let rows = rows_result + .rows::() + .unwrap() + .collect::, _>>() + .unwrap(); println!("ck | value"); println!("---------------------"); - for row in query_result.rows.ok_or_else(|| anyhow!("no rows found"))? { + for row in rows { println!("{:?} | {:?}", row.columns[ck_idx], row.columns[value_idx]); } diff --git a/examples/logging_log.rs b/examples/logging_log.rs index da82f42241..19465018cc 100644 --- a/examples/logging_log.rs +++ b/examples/logging_log.rs @@ -1,6 +1,5 @@ use anyhow::Result; -use scylla::transport::session::Session; -use scylla::SessionBuilder; +use scylla::{Session, SessionBuilder}; use std::env; use tracing::info; diff --git a/examples/query_history.rs b/examples/query_history.rs index d5e361f0ec..04d9586481 100644 --- a/examples/query_history.rs +++ b/examples/query_history.rs @@ -1,7 +1,8 @@ //! This example shows how to collect history of query execution. use anyhow::Result; -use futures::StreamExt; +use futures::StreamExt as _; +use scylla::frame::response::result::Row; use scylla::history::{HistoryCollector, StructuredHistory}; use scylla::query::Query; use scylla::transport::session::Session; @@ -59,7 +60,10 @@ async fn main() -> Result<()> { let iter_history_listener = Arc::new(HistoryCollector::new()); iter_query.set_history_listener(iter_history_listener.clone()); - let mut rows_iterator = session.query_iter(iter_query, ()).await?; + let mut rows_iterator = session + .query_iter(iter_query, ()) + .await? + .rows_stream::()?; while let Some(_row) = rows_iterator.next().await { // Receive rows... } diff --git a/examples/schema_agreement.rs b/examples/schema_agreement.rs index 4709873fc4..d37cc32b72 100644 --- a/examples/schema_agreement.rs +++ b/examples/schema_agreement.rs @@ -1,5 +1,5 @@ use anyhow::{bail, Result}; -use futures::TryStreamExt; +use futures::TryStreamExt as _; use scylla::transport::errors::QueryError; use scylla::transport::session::Session; use scylla::SessionBuilder; @@ -70,7 +70,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.schema_agreement", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.try_next().await? { println!("a, b, c: {}, {}, {}", a, b, c); } diff --git a/examples/select-paging.rs b/examples/select-paging.rs index b3a19e3249..b3c7501feb 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use futures::stream::StreamExt; +use futures::StreamExt as _; use scylla::statement::PagingState; use scylla::{query::Query, Session, SessionBuilder}; use std::env; @@ -35,7 +35,7 @@ async fn main() -> Result<()> { let mut rows_stream = session .query_iter("SELECT a, b, c FROM examples_ks.select_paging", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some(next_row_res) = rows_stream.next().await { let (a, b, c) = next_row_res?; @@ -51,10 +51,14 @@ async fn main() -> Result<()> { .query_single_page(paged_query.clone(), &[], paging_state) .await?; + let res = res + .into_rows_result()? + .expect("Got result different than Rows"); + println!( "Paging state: {:#?} ({} rows)", paging_state_response, - res.rows_num()?, + res.rows_num(), ); match paging_state_response.into_paging_control_flow() { @@ -81,10 +85,14 @@ async fn main() -> Result<()> { .execute_single_page(&paged_prepared, &[], paging_state) .await?; + let res = res + .into_rows_result()? + .expect("Got result different than Rows"); + println!( "Paging state from the prepared statement execution: {:#?} ({} rows)", paging_state_response, - res.rows_num()?, + res.rows_num(), ); match paging_state_response.into_paging_control_flow() { diff --git a/examples/tls.rs b/examples/tls.rs index 0671352147..d95f14bea2 100644 --- a/examples/tls.rs +++ b/examples/tls.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use futures::TryStreamExt; +use futures::TryStreamExt as _; use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; @@ -90,7 +90,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.tls", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.try_next().await? { println!("a, b, c: {}, {}, {}", a, b, c); } diff --git a/examples/tower.rs b/examples/tower.rs index 0d28407da4..c34c3f3986 100644 --- a/examples/tower.rs +++ b/examples/tower.rs @@ -1,3 +1,5 @@ +use scylla::frame::response::result::Row; +use scylla::transport::session::Session; use std::env; use std::future::Future; use std::pin::Pin; @@ -7,12 +9,12 @@ use std::task::Poll; use tower::Service; struct SessionService { - session: Arc, + session: Arc, } // A trivial service implementation for sending parameterless simple string requests to Scylla. impl Service for SessionService { - type Response = scylla::LegacyQueryResult; + type Response = scylla::QueryResult; type Error = scylla::transport::errors::QueryError; type Future = Pin>>>; @@ -40,9 +42,11 @@ async fn main() -> anyhow::Result<()> { ), }; - let resp = session + let rows_result = session .call("SELECT keyspace_name, table_name FROM system_schema.tables;".into()) - .await?; + .await? + .into_rows_result()? + .expect("Got result different than Rows"); let print_text = |t: &Option| { t.as_ref() @@ -56,14 +60,15 @@ async fn main() -> anyhow::Result<()> { println!( "Tables:\n{}", - resp.rows()? - .into_iter() - .map(|r| format!( + rows_result + .rows::()? + .map(|r| r.map(|r| format!( "\t{}.{}", print_text(&r.columns[0]), print_text(&r.columns[1]) - )) - .collect::>() + ))) + .collect::, _>>() + .unwrap() .join("\n") ); Ok(()) diff --git a/examples/tracing.rs b/examples/tracing.rs index 12767de5b0..dd035c095d 100644 --- a/examples/tracing.rs +++ b/examples/tracing.rs @@ -2,14 +2,13 @@ // query() prepare() execute() batch() query_iter() and execute_iter() can be traced use anyhow::{anyhow, Result}; -use futures::StreamExt; +use futures::StreamExt as _; use scylla::batch::Batch; use scylla::statement::{ prepared_statement::PreparedStatement, query::Query, Consistency, SerialConsistency, }; use scylla::tracing::TracingInfo; -use scylla::transport::iterator::LegacyRowIterator; -use scylla::LegacyQueryResult; +use scylla::QueryResult; use scylla::{Session, SessionBuilder}; use std::env; use std::num::NonZeroU32; @@ -42,9 +41,9 @@ async fn main() -> Result<()> { query.set_serial_consistency(Some(SerialConsistency::LocalSerial)); // QueryResult will contain a tracing_id which can be used to query tracing information - let query_result: LegacyQueryResult = session.query_unpaged(query.clone(), &[]).await?; + let query_result: QueryResult = session.query_unpaged(query.clone(), &[]).await?; let query_tracing_id: Uuid = query_result - .tracing_id + .tracing_id() .ok_or_else(|| anyhow!("Tracing id is None!"))?; // Get tracing information for this query and print it @@ -79,23 +78,24 @@ async fn main() -> Result<()> { // To trace execution of a prepared statement tracing must be enabled for it prepared.set_tracing(true); - let execute_result: LegacyQueryResult = session.execute_unpaged(&prepared, &[]).await?; - println!("Execute tracing id: {:?}", execute_result.tracing_id); + let execute_result: QueryResult = session.execute_unpaged(&prepared, &[]).await?; + println!("Execute tracing id: {:?}", execute_result.tracing_id()); // PAGED QUERY_ITER EXECUTE_ITER // It's also possible to trace paged queries like query_iter or execute_iter - // After iterating through all rows iterator.get_tracing_ids() will give tracing ids - // for all page queries - let mut row_iterator: LegacyRowIterator = session.query_iter(query, &[]).await?; + // After iterating through all rows query_pager.tracing_ids() will give tracing ids + // for all page queries. + let mut row_stream = session.query_iter(query, &[]).await?.rows_stream()?; - while let Some(_row) = row_iterator.next().await { + while let Some(row) = row_stream.next().await { // Receive rows + let _row: (String, i32, Uuid) = row?; } // Now print tracing ids for all page queries: println!( - "Paged row iterator tracing ids: {:?}\n", - row_iterator.get_tracing_ids() + "Paged row stream tracing ids: {:?}\n", + row_stream.tracing_ids() ); // BATCH @@ -105,8 +105,8 @@ async fn main() -> Result<()> { batch.set_tracing(true); // Run the batch and print its tracing_id - let batch_result: LegacyQueryResult = session.batch(&batch, ((),)).await?; - println!("Batch tracing id: {:?}\n", batch_result.tracing_id); + let batch_result: QueryResult = session.batch(&batch, ((),)).await?; + println!("Batch tracing id: {:?}\n", batch_result.tracing_id()); // CUSTOM // Session configuration allows specifying custom settings for querying tracing info. diff --git a/examples/user-defined-type.rs b/examples/user-defined-type.rs index 6e2d65286c..39b3003737 100644 --- a/examples/user-defined-type.rs +++ b/examples/user-defined-type.rs @@ -1,6 +1,6 @@ use anyhow::Result; -use futures::TryStreamExt; -use scylla::macros::FromUserType; +use futures::TryStreamExt as _; +use scylla::macros::DeserializeValue; use scylla::{SerializeValue, Session, SessionBuilder}; use std::env; @@ -30,7 +30,7 @@ async fn main() -> Result<()> { // Define custom struct that matches User Defined Type created earlier // wrapping field in Option will gracefully handle null field values - #[derive(Debug, FromUserType, SerializeValue)] + #[derive(Debug, DeserializeValue, SerializeValue)] struct MyType { int_val: i32, text_val: Option, @@ -56,7 +56,7 @@ async fn main() -> Result<()> { &[], ) .await? - .into_typed::<(MyType,)>(); + .rows_stream::<(MyType,)>()?; while let Some((my_val,)) = iter.try_next().await? { println!("{:?}", my_val); } diff --git a/examples/value_list.rs b/examples/value_list.rs index 81568baeef..a8197edca1 100644 --- a/examples/value_list.rs +++ b/examples/value_list.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use futures::StreamExt; use scylla::{Session, SessionBuilder}; use std::env; @@ -8,7 +9,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await.unwrap(); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -56,11 +57,13 @@ async fn main() -> Result<()> { ) .await?; - let q = session - .query_unpaged("SELECT * FROM examples_ks.my_type", &[]) - .await?; + let iter = session + .query_iter("SELECT * FROM examples_ks.my_type", &[]) + .await? + .rows_stream::<(i32, String)>()?; - println!("Q: {:?}", q.rows); + let rows = iter.collect::>().await; + println!("Q: {:?}", rows); Ok(()) } diff --git a/scylla/src/history.rs b/scylla/src/history.rs index d4693163ef..a055f91a39 100644 --- a/scylla/src/history.rs +++ b/scylla/src/history.rs @@ -469,8 +469,8 @@ mod tests { use crate::test_utils::create_new_session_builder; use assert_matches::assert_matches; use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; - use futures::StreamExt; - use scylla_cql::Consistency; + use futures::StreamExt as _; + use scylla_cql::{frame::response::result::Row, Consistency}; // Set a single time for all timestamps within StructuredHistory. // HistoryCollector sets the timestamp to current time which changes with each test. @@ -1045,7 +1045,12 @@ mod tests { let history_collector = Arc::new(HistoryCollector::new()); iter_query.set_history_listener(history_collector.clone()); - let mut rows_iterator = session.query_iter(iter_query, ()).await.unwrap(); + let mut rows_iterator = session + .query_iter(iter_query, ()) + .await + .unwrap() + .rows_stream::() + .unwrap(); while let Some(_row) = rows_iterator.next().await { // Receive rows... } diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index aaa1506bd5..8b62c0c2b4 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -72,21 +72,21 @@ //! # use scylla::Session; //! # use std::error::Error; //! # async fn check_only_compiles(session: &Session) -> Result<(), Box> { -//! use scylla::IntoTypedRows; //! //! // Read rows containing an int and text //! // Keep in mind that all results come in one response (no paging is done!), //! // so the memory footprint and latency may be huge! //! // To prevent that, use `Session::query_iter` or `Session::query_single_page`. -//! let rows_opt = session +//! let query_rows = session //! .query_unpaged("SELECT a, b FROM ks.tab", &[]) //! .await? -//! .rows; +//! .into_rows_result()?; +//! //! -//! if let Some(rows) = rows_opt { -//! for row in rows.into_typed::<(i32, String)>() { +//! if let Some(rows) = query_rows { +//! for row in rows.rows()? { //! // Parse row as int and text \ -//! let (int_val, text_val): (i32, String) = row?; +//! let (int_val, text_val): (i32, &str) = row?; //! } //! } //! # Ok(()) @@ -230,6 +230,22 @@ pub mod deserialize { UdtIterator, UdtTypeCheckErrorKind, }; } + + // Shorthands for better readability. + #[cfg_attr(not(test), allow(unused))] + pub(crate) trait DeserializeOwnedValue: + for<'frame, 'metadata> DeserializeValue<'frame, 'metadata> + { + } + impl DeserializeOwnedValue for T where + T: for<'frame, 'metadata> DeserializeValue<'frame, 'metadata> + { + } + pub(crate) trait DeserializeOwnedRow: + for<'frame, 'metadata> DeserializeRow<'frame, 'metadata> + { + } + impl DeserializeOwnedRow for T where T: for<'frame, 'metadata> DeserializeRow<'frame, 'metadata> {} } pub mod authentication; @@ -256,11 +272,11 @@ pub use statement::query; pub use frame::response::cql_to_rust; pub use frame::response::cql_to_rust::FromRow; -pub use transport::caching_session::CachingSession; +pub use transport::caching_session::{CachingSession, GenericCachingSession, LegacyCachingSession}; pub use transport::execution_profile::ExecutionProfile; pub use transport::legacy_query_result::LegacyQueryResult; pub use transport::query_result::{QueryResult, QueryRowsResult}; -pub use transport::session::{IntoTypedRows, Session, SessionConfig}; +pub use transport::session::{IntoTypedRows, LegacySession, Session, SessionConfig}; pub use transport::session_builder::SessionBuilder; #[cfg(feature = "cloud")] diff --git a/scylla/src/tracing.rs b/scylla/src/tracing.rs index 53019e7865..459eb81e4c 100644 --- a/scylla/src/tracing.rs +++ b/scylla/src/tracing.rs @@ -1,15 +1,14 @@ +use crate::frame::value::CqlTimestamp; use itertools::Itertools; use scylla_cql::frame::value::CqlTimeuuid; +use scylla_macros::DeserializeRow; use std::collections::HashMap; use std::net::IpAddr; -use crate::cql_to_rust::{FromRow, FromRowError}; -use crate::frame::response::result::Row; -use crate::frame::value::CqlTimestamp; - /// Tracing info retrieved from `system_traces.sessions` /// with all events from `system_traces.events` -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, DeserializeRow, Clone, PartialEq, Eq)] +#[scylla(crate = "crate")] pub struct TracingInfo { pub client: Option, pub command: Option, @@ -20,11 +19,13 @@ pub struct TracingInfo { /// started_at is a timestamp - time since unix epoch pub started_at: Option, + #[scylla(skip)] pub events: Vec, } /// A single event happening during a traced query -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, DeserializeRow, Clone, PartialEq, Eq)] +#[scylla(crate = "crate")] pub struct TracingEvent { pub event_id: CqlTimeuuid, pub activity: Option, @@ -53,51 +54,3 @@ pub(crate) const TRACES_SESSION_QUERY_STR: &str = pub(crate) const TRACES_EVENTS_QUERY_STR: &str = "SELECT event_id, activity, source, source_elapsed, thread \ FROM system_traces.events WHERE session_id = ?"; - -// Converts a row received by performing TRACES_SESSION_QUERY_STR to TracingInfo -impl FromRow for TracingInfo { - fn from_row(row: Row) -> Result { - let (client, command, coordinator, duration, parameters, request, started_at) = - <( - Option, - Option, - Option, - Option, - Option>, - Option, - Option, - )>::from_row(row)?; - - Ok(TracingInfo { - client, - command, - coordinator, - duration, - parameters, - request, - started_at, - events: Vec::new(), - }) - } -} - -// Converts a row received by performing TRACES_SESSION_QUERY_STR to TracingInfo -impl FromRow for TracingEvent { - fn from_row(row: Row) -> Result { - let (event_id, activity, source, source_elapsed, thread) = <( - CqlTimeuuid, - Option, - Option, - Option, - Option, - )>::from_row(row)?; - - Ok(TracingEvent { - event_id, - activity, - source, - source_elapsed, - thread, - }) - } -} diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index cbf9d3c6dc..192ad6dd46 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -5,7 +5,7 @@ use crate::statement::{PagingState, PagingStateResponse}; use crate::transport::errors::QueryError; use crate::transport::iterator::LegacyRowIterator; use crate::transport::partitioner::PartitionerName; -use crate::{LegacyQueryResult, Session}; +use crate::{LegacyQueryResult, QueryResult}; use bytes::Bytes; use dashmap::DashMap; use futures::future::try_join_all; @@ -16,6 +16,11 @@ use std::collections::hash_map::RandomState; use std::hash::BuildHasher; use std::sync::Arc; +use super::iterator::QueryPager; +use super::session::{ + CurrentDeserializationApi, DeserializationApiKind, GenericSession, LegacyDeserializationApi, +}; + /// Contains just the parts of a prepared statement that were returned /// from the database. All remaining parts (query string, page size, /// consistency, etc.) are taken from the Query passed @@ -31,11 +36,12 @@ struct RawPreparedStatementData { /// Provides auto caching while executing queries #[derive(Debug)] -pub struct CachingSession +pub struct GenericCachingSession where S: Clone + BuildHasher, + DeserializationApi: DeserializationApiKind, { - session: Session, + session: GenericSession, /// The prepared statement cache size /// If a prepared statement is added while the limit is reached, the oldest prepared statement /// is removed from the cache @@ -43,11 +49,15 @@ where cache: DashMap, } -impl CachingSession +pub type CachingSession = GenericCachingSession; +pub type LegacyCachingSession = GenericCachingSession; + +impl GenericCachingSession where S: Default + BuildHasher + Clone, + DeserApi: DeserializationApiKind, { - pub fn from(session: Session, cache_size: usize) -> Self { + pub fn from(session: GenericSession, cache_size: usize) -> Self { Self { session, max_capacity: cache_size, @@ -56,21 +66,95 @@ where } } -impl CachingSession +impl GenericCachingSession where S: BuildHasher + Clone, + DeserApi: DeserializationApiKind, { - /// Builds a [`CachingSession`] from a [`Session`], a cache size, and a [`BuildHasher`]., - /// using a customer hasher. - pub fn with_hasher(session: Session, cache_size: usize, hasher: S) -> Self { + /// Builds a [`CachingSession`] from a [`Session`](GenericSession), a cache size, + /// and a [`BuildHasher`], using a customer hasher. + pub fn with_hasher(session: GenericSession, cache_size: usize, hasher: S) -> Self { Self { session, max_capacity: cache_size, cache: DashMap::with_hasher(hasher), } } +} - /// Does the same thing as [`Session::execute_unpaged`] but uses the prepared statement cache +impl GenericCachingSession +where + S: BuildHasher + Clone, +{ + /// Does the same thing as [`Session::execute_unpaged`](GenericSession::execute_unpaged) + /// but uses the prepared statement cache. + pub async fn execute_unpaged( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + let query = query.into(); + let prepared = self.add_prepared_statement_owned(query).await?; + self.session.execute_unpaged(&prepared, values).await + } + + /// Does the same thing as [`Session::execute_iter`](GenericSession::execute_iter) + /// but uses the prepared statement cache. + pub async fn execute_iter( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + let query = query.into(); + let prepared = self.add_prepared_statement_owned(query).await?; + self.session.execute_iter(prepared, values).await + } + + /// Does the same thing as [`Session::execute_single_page`](GenericSession::execute_single_page) + /// but uses the prepared statement cache. + pub async fn execute_single_page( + &self, + query: impl Into, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + let query = query.into(); + let prepared = self.add_prepared_statement_owned(query).await?; + self.session + .execute_single_page(&prepared, values, paging_state) + .await + } + + /// Does the same thing as [`Session::batch`](GenericSession::batch) but uses the + /// prepared statement cache.\ + /// Prepares batch using [`CachingSession::prepare_batch`](GenericCachingSession::prepare_batch) + /// if needed and then executes it. + pub async fn batch( + &self, + batch: &Batch, + values: impl BatchValues, + ) -> Result { + let all_prepared: bool = batch + .statements + .iter() + .all(|stmt| matches!(stmt, BatchStatement::PreparedStatement(_))); + + if all_prepared { + self.session.batch(batch, &values).await + } else { + let prepared_batch: Batch = self.prepare_batch(batch).await?; + + self.session.batch(&prepared_batch, &values).await + } + } +} + +impl GenericCachingSession +where + S: BuildHasher + Clone, +{ + /// Does the same thing as [`Session::execute_unpaged`](GenericSession::execute_unpaged) + /// but uses the prepared statement cache. pub async fn execute_unpaged( &self, query: impl Into, @@ -81,7 +165,8 @@ where self.session.execute_unpaged(&prepared, values).await } - /// Does the same thing as [`Session::execute_iter`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_iter`](GenericSession::execute_iter) + /// but uses the prepared statement cache. pub async fn execute_iter( &self, query: impl Into, @@ -92,7 +177,8 @@ where self.session.execute_iter(prepared, values).await } - /// Does the same thing as [`Session::execute_single_page`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_single_page`](GenericSession::execute_single_page) + /// but uses the prepared statement cache. pub async fn execute_single_page( &self, query: impl Into, @@ -106,8 +192,9 @@ where .await } - /// Does the same thing as [`Session::batch`] but uses the prepared statement cache\ - /// Prepares batch using CachingSession::prepare_batch if needed and then executes it + /// Does the same thing as [`Session::batch`](GenericSession::batch) but uses + /// the prepared statement cache.\ + /// Prepares batch using CachingSession::prepare_batch if needed and then executes it. pub async fn batch( &self, batch: &Batch, @@ -126,7 +213,13 @@ where self.session.batch(&prepared_batch, &values).await } } +} +impl GenericCachingSession +where + S: BuildHasher + Clone, + DeserApi: DeserializationApiKind, +{ /// Prepares all statements within the batch and returns a new batch where every /// statement is prepared. /// Uses the prepared statements cache. @@ -212,7 +305,7 @@ where self.max_capacity } - pub fn get_session(&self) -> &Session { + pub fn get_session(&self) -> &GenericSession { &self.session } } @@ -223,13 +316,15 @@ mod tests { use crate::statement::PagingState; use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; use crate::transport::partitioner::PartitionerName; + use crate::transport::session::Session; use crate::utils::test_utils::unique_keyspace_name; use crate::{ batch::{Batch, BatchStatement}, prepared_statement::PreparedStatement, - CachingSession, Session, + CachingSession, }; use futures::TryStreamExt; + use scylla_cql::frame::response::result::Row; use std::collections::BTreeSet; async fn new_for_test(with_tablet_support: bool) -> Session { @@ -333,17 +428,20 @@ mod tests { .execute_unpaged("select * from test_table", &[]) .await .unwrap(); + let result_rows = result.into_rows_result().unwrap().unwrap(); assert_eq!(1, session.cache.len()); - assert_eq!(1, result.rows_num().unwrap()); + assert_eq!(1, result_rows.rows_num()); let result = session .execute_unpaged("select * from test_table", &[]) .await .unwrap(); + let result_rows = result.into_rows_result().unwrap().unwrap(); + assert_eq!(1, session.cache.len()); - assert_eq!(1, result.rows_num().unwrap()); + assert_eq!(1, result_rows.rows_num()); } /// Checks that caching works with execute_iter @@ -357,9 +455,17 @@ mod tests { let iter = session .execute_iter("select * from test_table", &[]) .await - .unwrap(); + .unwrap() + .rows_stream::() + .unwrap() + .into_stream(); - let rows = iter.try_collect::>().await.unwrap().len(); + let rows = iter + .into_stream() + .try_collect::>() + .await + .unwrap() + .len(); assert_eq!(1, rows); assert_eq!(1, session.cache.len()); @@ -379,7 +485,7 @@ mod tests { .unwrap(); assert_eq!(1, session.cache.len()); - assert_eq!(1, result.rows_num().unwrap()); + assert_eq!(1, result.into_rows_result().unwrap().unwrap().rows_num()); } async fn assert_test_batch_table_rows_contain( @@ -390,7 +496,10 @@ mod tests { .execute_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() - .rows_typed::<(i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -599,7 +708,11 @@ mod tests { .execute_unpaged("SELECT b, WRITETIME(b) FROM tbl", ()) .await .unwrap() - .rows_typed_or_empty::<(i32, i64)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i64)>() + .unwrap() .collect::, _>>() .unwrap(); diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 188984393c..7610650c61 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -46,8 +46,8 @@ use std::{ net::{Ipv4Addr, Ipv6Addr}, }; -use super::errors::{ProtocolError, UseKeyspaceProtocolError}; -use super::iterator::{LegacyRowIterator, QueryPager}; +use super::errors::{ProtocolError, SchemaVersionFetchError, UseKeyspaceProtocolError}; +use super::iterator::QueryPager; use super::locator::tablets::{RawTablet, TabletParsingError}; use super::query_result::QueryResult; use super::session::AddressTranslator; @@ -1182,7 +1182,7 @@ impl Connection { pub(crate) async fn query_iter( self: Arc, query: Query, - ) -> Result { + ) -> Result { let consistency = query .config .determine_consistency(self.config.default_consistency); @@ -1190,7 +1190,6 @@ impl Connection { QueryPager::new_for_connection_query_iter(query, self, consistency, serial_consistency) .await - .map(QueryPager::into_legacy) } /// Executes a prepared statements and fetches its results over multiple pages, using @@ -1199,7 +1198,7 @@ impl Connection { self: Arc, prepared_statement: PreparedStatement, values: SerializedValues, - ) -> Result { + ) -> Result { let consistency = prepared_statement .config .determine_consistency(self.config.default_consistency); @@ -1213,7 +1212,6 @@ impl Connection { serial_consistency, ) .await - .map(QueryPager::into_legacy) } #[allow(dead_code)] @@ -1436,9 +1434,15 @@ impl Connection { let (version_id,) = self .query_unpaged(LOCAL_VERSION) .await? - .into_legacy_result()? - .single_row_typed() - .map_err(ProtocolError::SchemaVersionFetch)?; + .into_rows_result()? + .ok_or(QueryError::ProtocolError( + ProtocolError::SchemaVersionFetch(SchemaVersionFetchError::ResultNotRows), + ))? + .single_row::<(Uuid,)>() + .map_err(|err| { + ProtocolError::SchemaVersionFetch(SchemaVersionFetchError::SingleRowError(err)) + })?; + Ok(version_id) } @@ -2473,6 +2477,8 @@ mod tests { .query_iter(select_query.clone()) .await .unwrap() + .rows_stream::<(i32,)>() + .unwrap() .try_collect::>() .await .unwrap(); @@ -2497,7 +2503,8 @@ mod tests { .query_iter(select_query.clone()) .await .unwrap() - .into_typed::<(i32,)>() + .rows_stream::<(i32,)>() + .unwrap() .map(|ret| ret.unwrap().0) .collect::>() .await; @@ -2511,6 +2518,8 @@ mod tests { )) .await .unwrap() + .rows_stream::<()>() + .unwrap() .try_collect::>() .await .unwrap(); @@ -2609,9 +2618,10 @@ mod tests { .query_unpaged("SELECT p, v FROM t") .await .unwrap() - .into_legacy_result() + .into_rows_result() + .unwrap() .unwrap() - .rows_typed::<(i32, Vec)>() + .rows::<(i32, Vec)>() .unwrap() .collect::, _>>() .unwrap(); diff --git a/scylla/src/transport/cql_collections_test.rs b/scylla/src/transport/cql_collections_test.rs index d9fb521500..475bd47eeb 100644 --- a/scylla/src/transport/cql_collections_test.rs +++ b/scylla/src/transport/cql_collections_test.rs @@ -1,7 +1,9 @@ -use crate::cql_to_rust::FromCqlVal; +use crate::deserialize::DeserializeOwnedValue; +use crate::transport::session::Session; + +use crate::frame::response::result::CqlValue; use crate::test_utils::{create_new_session_builder, setup_tracing}; use crate::utils::test_utils::unique_keyspace_name; -use crate::{frame::response::result::CqlValue, Session}; use scylla_cql::types::serialize::value::SerializeValue; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; @@ -34,7 +36,7 @@ async fn insert_and_select( expected: &SelectT, ) where InsertT: SerializeValue, - SelectT: FromCqlVal> + PartialEq + std::fmt::Debug, + SelectT: DeserializeOwnedValue + PartialEq + std::fmt::Debug, { session .query_unpaged( @@ -48,7 +50,10 @@ async fn insert_and_select( .query_unpaged(format!("SELECT val FROM {} WHERE p = 0", table_name), ()) .await .unwrap() - .single_row_typed::<(SelectT,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(SelectT,)>() .unwrap() .0; diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/src/transport/cql_types_test.rs index 072e7b8fdf..2863df76c0 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/src/transport/cql_types_test.rs @@ -1,15 +1,14 @@ use crate as scylla; -use crate::cql_to_rust::FromCqlVal; +use crate::deserialize::DeserializeOwnedValue; use crate::frame::response::result::CqlValue; use crate::frame::value::{Counter, CqlDate, CqlTime, CqlTimestamp}; -use crate::macros::FromUserType; use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; use crate::transport::session::Session; use crate::utils::test_utils::unique_keyspace_name; use itertools::Itertools; use scylla_cql::frame::value::{CqlTimeuuid, CqlVarint}; use scylla_cql::types::serialize::value::SerializeValue; -use scylla_macros::SerializeValue; +use scylla_macros::{DeserializeValue, SerializeValue}; use std::cmp::PartialEq; use std::fmt::Debug; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -75,7 +74,7 @@ async fn init_test(table_name: &str, type_name: &str) -> Session { // Expected values and bound values are computed using T::from_str async fn run_tests(tests: &[&str], type_name: &str) where - T: SerializeValue + FromCqlVal + FromStr + Debug + Clone + PartialEq, + T: SerializeValue + DeserializeOwnedValue + FromStr + Debug + Clone + PartialEq, { let session: Session = init_test(type_name, type_name).await; session.await_schema_agreement().await.unwrap(); @@ -100,7 +99,10 @@ where .query_unpaged(select_values, &[]) .await .unwrap() - .rows_typed::<(T,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(T,)>() .unwrap() .map(Result::unwrap) .map(|row| row.0) @@ -218,7 +220,10 @@ async fn test_cql_varint() { .execute_unpaged(&prepared_select, &[]) .await .unwrap() - .rows_typed::<(CqlVarint,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(CqlVarint,)>() .unwrap() .map(Result::unwrap) .map(|row| row.0) @@ -293,7 +298,10 @@ async fn test_counter() { .query_unpaged(select_values, (i as i32,)) .await .unwrap() - .rows_typed::<(Counter,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(Counter,)>() .unwrap() .map(Result::unwrap) .map(|row| row.0) @@ -369,7 +377,10 @@ async fn test_naive_date_04() { .query_unpaged("SELECT val from chrono_naive_date_tests", &[]) .await .unwrap() - .rows_typed::<(NaiveDate,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(NaiveDate,)>() .unwrap() .next() .unwrap() @@ -392,7 +403,10 @@ async fn test_naive_date_04() { .query_unpaged("SELECT val from chrono_naive_date_tests", &[]) .await .unwrap() - .single_row_typed::<(NaiveDate,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(NaiveDate,)>() .unwrap(); assert_eq!(read_date, *naive_date); } @@ -427,15 +441,14 @@ async fn test_cql_date() { .await .unwrap(); - let read_date: CqlDate = session + let (read_date,): (CqlDate,) = session .query_unpaged("SELECT val from cql_date_tests", &[]) .await .unwrap() - .rows - .unwrap()[0] - .columns[0] - .as_ref() - .map(|cql_val| cql_val.as_cql_date().unwrap()) + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlDate,)>() .unwrap(); assert_eq!(read_date, *date); @@ -518,7 +531,10 @@ async fn test_date_03() { .query_unpaged("SELECT val from time_date_tests", &[]) .await .unwrap() - .first_row_typed::<(Date,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Date,)>() .ok() .map(|val| val.0); @@ -538,7 +554,10 @@ async fn test_date_03() { .query_unpaged("SELECT val from time_date_tests", &[]) .await .unwrap() - .first_row_typed::<(Date,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Date,)>() .unwrap(); assert_eq!(read_date, *date); } @@ -581,7 +600,10 @@ async fn test_cql_time() { .query_unpaged("SELECT val from cql_time_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTime,)>() .unwrap(); assert_eq!(read_time, *time_duration); @@ -599,7 +621,10 @@ async fn test_cql_time() { .query_unpaged("SELECT val from cql_time_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTime,)>() .unwrap(); assert_eq!(read_time, *time_duration); @@ -677,7 +702,10 @@ async fn test_naive_time_04() { .query_unpaged("SELECT val from chrono_time_tests", &[]) .await .unwrap() - .first_row_typed::<(NaiveTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(NaiveTime,)>() .unwrap(); assert_eq!(read_time, *time); @@ -695,7 +723,10 @@ async fn test_naive_time_04() { .query_unpaged("SELECT val from chrono_time_tests", &[]) .await .unwrap() - .first_row_typed::<(NaiveTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(NaiveTime,)>() .unwrap(); assert_eq!(read_time, *time); } @@ -757,7 +788,10 @@ async fn test_time_03() { .query_unpaged("SELECT val from time_time_tests", &[]) .await .unwrap() - .first_row_typed::<(Time,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Time,)>() .unwrap(); assert_eq!(read_time, *time); @@ -775,7 +809,10 @@ async fn test_time_03() { .query_unpaged("SELECT val from time_time_tests", &[]) .await .unwrap() - .first_row_typed::<(Time,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Time,)>() .unwrap(); assert_eq!(read_time, *time); } @@ -828,7 +865,10 @@ async fn test_cql_timestamp() { .query_unpaged("SELECT val from cql_timestamp_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimestamp,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimestamp,)>() .unwrap(); assert_eq!(read_timestamp, *timestamp_duration); @@ -846,7 +886,10 @@ async fn test_cql_timestamp() { .query_unpaged("SELECT val from cql_timestamp_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimestamp,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimestamp,)>() .unwrap(); assert_eq!(read_timestamp, *timestamp_duration); @@ -923,7 +966,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); @@ -941,7 +987,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); } @@ -969,7 +1018,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_1st_half_rounded); @@ -995,7 +1047,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_2nd_half_rounded); @@ -1084,7 +1139,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); @@ -1102,7 +1160,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); } @@ -1130,7 +1191,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_1st_half_rounded); @@ -1156,7 +1220,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_2nd_half_rounded); } @@ -1205,7 +1272,10 @@ async fn test_timeuuid() { .query_unpaged("SELECT val from timeuuid_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimeuuid,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimeuuid,)>() .unwrap(); assert_eq!(read_timeuuid.as_bytes(), timeuuid_bytes); @@ -1224,7 +1294,10 @@ async fn test_timeuuid() { .query_unpaged("SELECT val from timeuuid_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimeuuid,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimeuuid,)>() .unwrap(); assert_eq!(read_timeuuid.as_bytes(), timeuuid_bytes); @@ -1293,7 +1366,10 @@ async fn test_timeuuid_ordering() { .query_unpaged("SELECT t FROM tab WHERE p = 0", ()) .await .unwrap() - .rows_typed::<(CqlTimeuuid,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(CqlTimeuuid,)>() .unwrap() .map(|r| r.unwrap().0) .collect(); @@ -1372,7 +1448,10 @@ async fn test_inet() { .query_unpaged("SELECT val from inet_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(IpAddr,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(IpAddr,)>() .unwrap(); assert_eq!(read_inet, *inet); @@ -1387,7 +1466,10 @@ async fn test_inet() { .query_unpaged("SELECT val from inet_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(IpAddr,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(IpAddr,)>() .unwrap(); assert_eq!(read_inet, *inet); @@ -1438,7 +1520,10 @@ async fn test_blob() { .query_unpaged("SELECT val from blob_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(Vec,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(Vec,)>() .unwrap(); assert_eq!(read_blob, *blob); @@ -1453,7 +1538,10 @@ async fn test_blob() { .query_unpaged("SELECT val from blob_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(Vec,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(Vec,)>() .unwrap(); assert_eq!(read_blob, *blob); @@ -1514,7 +1602,7 @@ async fn test_udt_after_schema_update() { .await .unwrap(); - #[derive(SerializeValue, FromUserType, Debug, PartialEq)] + #[derive(SerializeValue, DeserializeValue, Debug, PartialEq)] #[scylla(crate = crate)] struct UdtV1 { first: i32, @@ -1541,7 +1629,10 @@ async fn test_udt_after_schema_update() { .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() - .single_row_typed::<(UdtV1,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(UdtV1,)>() .unwrap(); assert_eq!(read_udt, v1); @@ -1558,7 +1649,10 @@ async fn test_udt_after_schema_update() { .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() - .single_row_typed::<(UdtV1,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(UdtV1,)>() .unwrap(); assert_eq!(read_udt, v1); @@ -1568,7 +1662,7 @@ async fn test_udt_after_schema_update() { .await .unwrap(); - #[derive(FromUserType, Debug, PartialEq)] + #[derive(DeserializeValue, Debug, PartialEq)] struct UdtV2 { first: i32, second: bool, @@ -1579,7 +1673,10 @@ async fn test_udt_after_schema_update() { .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() - .single_row_typed::<(UdtV2,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(UdtV2,)>() .unwrap(); assert_eq!( @@ -1609,7 +1706,10 @@ async fn test_empty() { .query_unpaged("SELECT val FROM empty_tests WHERE id = 0", ()) .await .unwrap() - .first_row_typed::<(CqlValue,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(CqlValue,)>() .unwrap(); assert_eq!(empty, CqlValue::Empty); @@ -1626,7 +1726,10 @@ async fn test_empty() { .query_unpaged("SELECT val FROM empty_tests WHERE id = 1", ()) .await .unwrap() - .first_row_typed::<(CqlValue,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(CqlValue,)>() .unwrap(); assert_eq!(empty, CqlValue::Empty); @@ -1696,7 +1799,7 @@ async fn test_udt_with_missing_field() { expected: TR, ) where TQ: SerializeValue, - TR: FromCqlVal + PartialEq + Debug, + TR: DeserializeOwnedValue + PartialEq + Debug, { session .query_unpaged( @@ -1712,13 +1815,16 @@ async fn test_udt_with_missing_field() { ) .await .unwrap() - .single_row_typed::<(TR,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(TR,)>() .unwrap() .0; assert_eq!(expected, result); } - #[derive(FromUserType, Debug, PartialEq)] + #[derive(DeserializeValue, Debug, PartialEq)] struct UdtFull { first: i32, second: bool, diff --git a/scylla/src/transport/cql_value_test.rs b/scylla/src/transport/cql_value_test.rs index 781ab919b5..c5c2eedd55 100644 --- a/scylla/src/transport/cql_value_test.rs +++ b/scylla/src/transport/cql_value_test.rs @@ -1,4 +1,7 @@ -use crate::frame::{response::result::CqlValue, value::CqlDuration}; +use assert_matches::assert_matches; + +use crate::frame::response::result::CqlValue; +use crate::frame::value::CqlDuration; use crate::test_utils::{create_new_session_builder, setup_tracing}; use crate::utils::test_utils::unique_keyspace_name; @@ -54,19 +57,17 @@ async fn test_cqlvalue_udt() { .await .unwrap(); - let rows = session + let rows_result = session .query_unpaged("SELECT my FROM cqlvalue_udt_test", &[]) .await .unwrap() - .rows + .into_rows_result() + .unwrap() .unwrap(); - assert_eq!(rows.len(), 1); - assert_eq!(rows[0].columns.len(), 1); - - let received_udt_cql_value = rows[0].columns[0].as_ref().unwrap(); + let (received_udt_cql_value,) = rows_result.single_row::<(CqlValue,)>().unwrap(); - assert_eq!(received_udt_cql_value, &udt_cql_value); + assert_eq!(received_udt_cql_value, udt_cql_value); } #[tokio::test] @@ -106,45 +107,51 @@ async fn test_cqlvalue_duration() { session.query_unpaged(query.0, query.1).await.unwrap(); } - let rows = session + let rows_result = session .query_unpaged( "SELECT v FROM cqlvalue_duration_test WHERE pk = ?", (CqlValue::Int(0),), ) .await .unwrap() - .rows + .into_rows_result() + .unwrap() .unwrap(); - assert_eq!(rows.len(), 4); - assert_eq!(rows[0].columns.len(), 1); + let mut rows_iter = rows_result.rows::<(CqlValue,)>().unwrap(); - assert_eq!(rows[0].columns[0].as_ref().unwrap(), &duration_cql_value); + let (first_value,) = rows_iter.next().unwrap().unwrap(); + assert_eq!(first_value, duration_cql_value); + let (second_value,) = rows_iter.next().unwrap().unwrap(); assert_eq!( - rows[1].columns[0].as_ref().unwrap(), - &CqlValue::Duration(CqlDuration { + second_value, + CqlValue::Duration(CqlDuration { months: 0, days: 0, nanoseconds: 320_688_000_000_000, }) ); + let (third_value,) = rows_iter.next().unwrap().unwrap(); assert_eq!( - rows[2].columns[0].as_ref().unwrap(), - &CqlValue::Duration(CqlDuration { + third_value, + CqlValue::Duration(CqlDuration { months: 0, days: 0, nanoseconds: 320_933_000_000_000, }) ); + let (fourth_value,) = rows_iter.next().unwrap().unwrap(); assert_eq!( - rows[3].columns[0].as_ref().unwrap(), - &CqlValue::Duration(CqlDuration { + fourth_value, + CqlValue::Duration(CqlDuration { months: 0, days: 0, nanoseconds: 320_949_000_000_000, }) ); + + assert_matches!(rows_iter.next(), None); } diff --git a/scylla/src/transport/errors.rs b/scylla/src/transport/errors.rs index d95383054c..349d968a40 100644 --- a/scylla/src/transport/errors.rs +++ b/scylla/src/transport/errors.rs @@ -13,7 +13,6 @@ use std::{ }; use scylla_cql::{ - cql_to_rust::FromRowError, frame::{ frame_errors::{ CqlAuthChallengeParseError, CqlAuthSuccessParseError, CqlAuthenticateParseError, @@ -25,14 +24,17 @@ use scylla_cql::{ response::CqlResponseKind, value::SerializeValuesError, }, - types::serialize::SerializationError, + types::{ + deserialize::{DeserializationError, TypeCheckError}, + serialize::SerializationError, + }, }; use thiserror::Error; use crate::{authentication::AuthError, frame::response}; -use super::legacy_query_result::{RowsExpectedError, SingleRowTypedError}; +use super::query_result::SingleRowError; /// Error that occurred during query execution #[derive(Error, Debug, Clone)] @@ -304,7 +306,7 @@ pub enum ProtocolError { /// A protocol error appeared during schema version fetch. #[error("Schema version fetch protocol error: {0}")] - SchemaVersionFetch(SingleRowTypedError), + SchemaVersionFetch(#[from] SchemaVersionFetchError), /// A result with nonfinished paging state received for unpaged query. #[error("Unpaged query returned a non-empty paging state! This is a driver-side or server-side bug.")] @@ -345,25 +347,43 @@ pub enum UseKeyspaceProtocolError { UnexpectedResponse(CqlResponseKind), } +/// A protocol error that occurred during schema version fetch. +#[derive(Error, Debug, Clone)] +#[non_exhaustive] +pub enum SchemaVersionFetchError { + #[error("Schema version query returned non-rows result")] + ResultNotRows, + #[error(transparent)] + SingleRowError(SingleRowError), +} + /// A protocol error that occurred during tracing info fetch. #[derive(Error, Debug, Clone)] #[non_exhaustive] pub enum TracingProtocolError { /// Response to system_traces.session is not RESULT:Rows. - #[error("Response to system_traces.session is not RESULT:Rows: {0}")] - TracesSessionNotRows(RowsExpectedError), + #[error("Response to system_traces.session is not RESULT:Rows")] + TracesSessionNotRows, /// system_traces.session has invalid column type. #[error("system_traces.session has invalid column type: {0}")] - TracesSessionInvalidColumnType(FromRowError), + TracesSessionInvalidColumnType(TypeCheckError), + + /// Response to system_traces.session failed to deserialize. + #[error("Response to system_traces.session failed to deserialize: {0}")] + TracesSessionDeserializationFailed(DeserializationError), /// Response to system_traces.events is not RESULT:Rows. - #[error("Response to system_traces.events is not RESULT:Rows: {0}")] - TracesEventsNotRows(RowsExpectedError), + #[error("Response to system_traces.events is not RESULT:Rows")] + TracesEventsNotRows, /// system_traces.events has invalid column type. #[error("system_traces.events has invalid column type: {0}")] - TracesEventsInvalidColumnType(FromRowError), + TracesEventsInvalidColumnType(TypeCheckError), + + /// Response to system_traces.events failed to deserialize. + #[error("Response to system_traces.events failed to deserialize: {0}")] + TracesEventsDeserializationFailed(DeserializationError), /// All tracing queries returned an empty result. #[error( @@ -426,7 +446,7 @@ pub enum PeersMetadataError { pub enum KeyspacesMetadataError { /// system_schema.keyspaces has invalid column type. #[error("system_schema.keyspaces has invalid column type: {0}")] - SchemaKeyspacesInvalidColumnType(FromRowError), + SchemaKeyspacesInvalidColumnType(TypeCheckError), /// Bad keyspace replication strategy. #[error("Bad keyspace <{keyspace}> replication strategy: {error}")] @@ -464,7 +484,7 @@ pub enum KeyspaceStrategyError { pub enum UdtMetadataError { /// system_schema.types has invalid column type. #[error("system_schema.types has invalid column type: {0}")] - SchemaTypesInvalidColumnType(FromRowError), + SchemaTypesInvalidColumnType(TypeCheckError), /// Circular UDT dependency detected. #[error("Detected circular dependency between user defined types - toposort is impossible!")] @@ -477,11 +497,11 @@ pub enum UdtMetadataError { pub enum TablesMetadataError { /// system_schema.tables has invalid column type. #[error("system_schema.tables has invalid column type: {0}")] - SchemaTablesInvalidColumnType(FromRowError), + SchemaTablesInvalidColumnType(TypeCheckError), /// system_schema.columns has invalid column type. #[error("system_schema.columns has invalid column type: {0}")] - SchemaColumnsInvalidColumnType(FromRowError), + SchemaColumnsInvalidColumnType(TypeCheckError), /// Unknown column kind. #[error("Unknown column kind '{column_kind}' for {keyspace_name}.{table_name}.{column_name}")] @@ -499,7 +519,7 @@ pub enum TablesMetadataError { pub enum ViewsMetadataError { /// system_schema.views has invalid column type. #[error("system_schema.views has invalid column type: {0}")] - SchemaViewsInvalidColumnType(FromRowError), + SchemaViewsInvalidColumnType(TypeCheckError), } /// Error caused by caller creating an invalid query diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 1ea00c457f..160819f6c2 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -24,6 +24,7 @@ use super::query_result::ColumnSpecs; use super::session::RequestSpan; use crate::cql_to_rust::{FromRow, FromRowError}; +use crate::deserialize::DeserializeOwnedRow; use crate::frame::response::{ result, result::{ColumnSpec, Row}, @@ -126,9 +127,9 @@ use checked_channel_sender::{ProvingSender, SendAttemptedProof}; type PageSendAttemptedProof = SendAttemptedProof>; -// RowIteratorWorker works in the background to fetch pages -// RowIterator receives them through a channel -struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { +// PagerWorker works in the background to fetch pages +// QueryPager receives them through a channel +struct PagerWorker<'a, QueryFunc, SpanCreatorFunc> { sender: ProvingSender>, // Closure used to perform a single page query @@ -152,7 +153,7 @@ struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { span_creator: SpanCreatorFunc, } -impl RowIteratorWorker<'_, QueryFunc, SpanCreator> +impl PagerWorker<'_, QueryFunc, SpanCreator> where QueryFunc: Fn(Arc, Consistency, PagingState) -> QueryFut, QueryFut: Future>, @@ -259,7 +260,7 @@ where } } - // Send last_error to RowIterator - query failed fully + // Send last_error to QueryPager - query failed fully self.log_query_error(&last_error); let (proof, _) = self.sender.send(Err(last_error)).await; proof @@ -332,10 +333,10 @@ where let received_page = ReceivedPage { rows, tracing_id }; - // Send next page to RowIterator + // Send next page to QueryPager let (proof, res) = self.sender.send(Ok(received_page)).await; if res.is_err() { - // channel was closed, RowIterator was dropped - should shutdown + // channel was closed, QueryPager was dropped - should shutdown return Ok(ControlFlow::Break(proof)); } @@ -468,15 +469,15 @@ where } } -/// A massively simplified version of the RowIteratorWorker. It does not have +/// A massively simplified version of the PagerWorker. It does not have /// any complicated logic related to retries, it just fetches pages from /// a single connection. -struct SingleConnectionRowIteratorWorker { +struct SingleConnectionPagerWorker { sender: ProvingSender>, fetcher: Fetcher, } -impl SingleConnectionRowIteratorWorker +impl SingleConnectionPagerWorker where Fetcher: Fn(PagingState) -> FetchFut + Send + Sync, FetchFut: Future> + Send, @@ -507,7 +508,7 @@ where .await; if send_result.is_err() { - // channel was closed, RowIterator was dropped - should shutdown + // channel was closed, QueryPager was dropped - should shutdown return Ok(proof); } @@ -540,13 +541,13 @@ where } } -/// An intermediate object that allows to construct an iterator over a query +/// An intermediate object that allows to construct a stream over a query /// that is asynchronously paged in the background. /// /// Before the results can be processed in a convenient way, the QueryPager -/// needs to be cast into a typed iterator. This is done by use of `into_typed()` method. +/// needs to be cast into a typed stream. This is done by use of `rows_stream()` method. /// As the method is generic over the target type, the turbofish syntax -/// can come in handy there, e.g. `raw_iter.into_typed::<(i32, &str, Uuid)>()`. +/// can come in handy there, e.g. `query_pager.rows_stream::<(i32, String, Uuid)>()`. /// /// A pre-0.15.0 interface is also available, although deprecated: /// `into_legacy()` method converts QueryPager to LegacyRowIterator, @@ -662,12 +663,9 @@ impl QueryPager { /// It only allows deserializing owned types, because [Stream] is not lending. /// Begins with performing type check. #[inline] - pub fn rows_stream<'frame, 'metadata, RowT: 'static + DeserializeRow<'frame, 'metadata>>( + pub fn rows_stream DeserializeRow<'frame, 'metadata>>( self, - ) -> Result, TypeCheckError> - where - 'frame: 'metadata, - { + ) -> Result, TypeCheckError> { TypedRowLendingStream::::new(self).map(|typed_row_lending_stream| TypedRowStream { typed_row_lending_stream, }) @@ -741,7 +739,7 @@ impl QueryPager { span }; - let worker = RowIteratorWorker { + let worker = PagerWorker { sender: sender.into(), page_query, statement_info: routing_info, @@ -859,7 +857,7 @@ impl QueryPager { span }; - let worker = RowIteratorWorker { + let worker = PagerWorker { sender: sender.into(), page_query, statement_info, @@ -893,7 +891,7 @@ impl QueryPager { let page_size = query.get_validated_page_size(); let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { + let worker = SingleConnectionPagerWorker { sender: sender.into(), fetcher: |paging_state| { connection.query_raw_with_consistency( @@ -923,7 +921,7 @@ impl QueryPager { let page_size = prepared.get_validated_page_size(); let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { + let worker = SingleConnectionPagerWorker { sender: sender.into(), fetcher: |paging_state| { connection.execute_raw_with_consistency( @@ -1076,7 +1074,7 @@ impl TypedRowStream { /// It only works with owned types! For example, &str is not supported. impl Stream for TypedRowStream where - RowT: for<'r> DeserializeRow<'r, 'r>, + RowT: DeserializeOwnedRow, { type Item = Result; diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/src/transport/large_batch_statements_test.rs index 33628a49d4..7e8fc482c3 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/src/transport/large_batch_statements_test.rs @@ -7,7 +7,7 @@ use crate::transport::errors::{BadQuery, QueryError}; use crate::{ batch::Batch, test_utils::{create_new_session_builder, unique_keyspace_name}, - LegacyQueryResult, Session, + QueryResult, Session, }; #[tokio::test] @@ -51,11 +51,7 @@ async fn create_test_session(session: Session, ks: &String) -> Session { session } -async fn write_batch( - session: &Session, - n: usize, - ks: &String, -) -> Result { +async fn write_batch(session: &Session, n: usize, ks: &String) -> Result { let mut batch_query = Batch::new(BatchType::Unlogged); let mut batch_values = Vec::new(); let query = format!("INSERT INTO {}.pairs (dummy, k, v) VALUES (0, ?, ?)", ks); diff --git a/scylla/src/transport/legacy_query_result.rs b/scylla/src/transport/legacy_query_result.rs index 5b26f380c7..46818a297e 100644 --- a/scylla/src/transport/legacy_query_result.rs +++ b/scylla/src/transport/legacy_query_result.rs @@ -56,16 +56,6 @@ pub struct LegacyQueryResult { } impl LegacyQueryResult { - pub(crate) fn mock_empty() -> Self { - Self { - rows: None, - warnings: Vec::new(), - tracing_id: None, - metadata: None, - serialized_size: 0, - } - } - /// Returns the number of received rows.\ /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). pub fn rows_num(&self) -> Result { diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index eedcb34a17..52326ba325 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -279,10 +279,12 @@ impl QueryResult { /// This struct provides generic methods which enable typed access to the data, /// by deserializing rows on the fly to the type provided as a type parameter. /// Those methods are: -/// - rows() - for iterating through rows, -/// - first_row() and maybe_first_row() - for accessing the first row first, -/// - single_row() - for accessing the first row, additionally asserting -/// that it's the only one in the response. +/// - [rows()](QueryRowsResult::rows) - for iterating through rows, +/// - [first_row()](QueryRowsResult::first_row) and +/// [maybe_first_row()](QueryRowsResult::maybe_first_row) - +/// for accessing the first row, +/// - [single_row()](QueryRowsResult::single_row) - for accessing the first row, +/// additionally asserting that it's the only one in the response. /// /// ```rust /// # use scylla::transport::query_result::QueryResult; @@ -338,10 +340,8 @@ impl QueryRowsResult { pub fn column_specs(&self) -> ColumnSpecs { ColumnSpecs::new(self.raw_rows_with_metadata.metadata().col_specs()) } -} -impl QueryRowsResult { - /// Returns the received rows when present. + /// Returns an iterator over the received rows. /// /// Returns an error if the rows in the response are of incorrect type. #[inline] @@ -353,7 +353,7 @@ impl QueryRowsResult { .map_err(RowsError::TypeCheckFailed) } - /// Returns `Option` containing the first of a result. + /// Returns `Option` containing the first row of the result. /// /// Fails when the the rows in the response are of incorrect type, /// or when the deserialization fails. @@ -371,7 +371,7 @@ impl QueryRowsResult { .map_err(MaybeFirstRowError::DeserializationFailed) } - /// Returns first row from the received rows. + /// Returns the first row of the received result. /// /// When the first row is not available, returns an error. /// Fails when the the rows in the response are of incorrect type, diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 1defa514b4..4db0bbde20 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -27,6 +27,7 @@ use std::borrow::Borrow; use std::collections::HashMap; use std::fmt::Display; use std::future::Future; +use std::marker::PhantomData; use std::net::SocketAddr; use std::num::NonZeroU32; use std::str::FromStr; @@ -45,11 +46,12 @@ use super::connection::SslConfig; use super::errors::TracingProtocolError; use super::execution_profile::{ExecutionProfile, ExecutionProfileHandle, ExecutionProfileInner}; use super::iterator::QueryPager; -use super::legacy_query_result::MaybeFirstRowTypedError; #[cfg(feature = "cloud")] use super::node::CloudEndpoint; use super::node::{InternalKnownNode, KnownNode}; use super::partitioner::PartitionerName; +use super::query_result::MaybeFirstRowError; +use super::query_result::RowsError; use super::topology::UntranslatedPeer; use super::{NodeRef, SelfIdentity}; use crate::frame::response::result; @@ -57,7 +59,7 @@ use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::routing::{Shard, Token}; use crate::statement::{Consistency, PageSize, PagingState, PagingStateResponse}; -use crate::tracing::{TracingEvent, TracingInfo}; +use crate::tracing::TracingInfo; use crate::transport::cluster::{Cluster, ClusterData, ClusterNeatDebug}; use crate::transport::connection::{Connection, ConnectionConfig, VerifiedKeyspaceName}; use crate::transport::connection_pool::PoolConfig; @@ -85,6 +87,14 @@ use crate::authentication::AuthenticatorProvider; #[cfg(feature = "ssl")] use openssl::ssl::SslContext; +mod sealed { + // This is a sealed trait - its whole purpose is to be unnameable. + // This means we need to disable the check. + #[allow(unknown_lints)] // Rust 1.70 (our MSRV) doesn't know this lint + #[allow(unnameable_types)] + pub trait Sealed {} +} + pub(crate) const TABLET_CHANNEL_SIZE: usize = 8192; const TRACING_QUERY_PAGE_SIZE: i32 = 1024; @@ -154,8 +164,21 @@ impl AddressTranslator for HashMap<&'static str, &'static str> { } } +pub trait DeserializationApiKind: sealed::Sealed {} + +pub enum CurrentDeserializationApi {} +impl sealed::Sealed for CurrentDeserializationApi {} +impl DeserializationApiKind for CurrentDeserializationApi {} + +pub enum LegacyDeserializationApi {} +impl sealed::Sealed for LegacyDeserializationApi {} +impl DeserializationApiKind for LegacyDeserializationApi {} + /// `Session` manages connections to the cluster and allows to perform queries -pub struct Session { +pub struct GenericSession +where + DeserializationApi: DeserializationApiKind, +{ cluster: Cluster, default_execution_profile_handle: ExecutionProfileHandle, schema_agreement_interval: Duration, @@ -167,11 +190,18 @@ pub struct Session { tracing_info_fetch_attempts: NonZeroU32, tracing_info_fetch_interval: Duration, tracing_info_fetch_consistency: Consistency, + _phantom_deser_api: PhantomData, } +pub type Session = GenericSession; +pub type LegacySession = GenericSession; + /// This implementation deliberately omits some details from Cluster in order /// to avoid cluttering the print with much information of little usability. -impl std::fmt::Debug for Session { +impl std::fmt::Debug for GenericSession +where + DeserApi: DeserializationApiKind, +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Session") .field("cluster", &ClusterNeatDebug(&self.cluster)) @@ -433,129 +463,7 @@ pub(crate) enum RunQueryResult { Completed(ResT), } -/// Represents a CQL session, which can be used to communicate -/// with the database -impl Session { - /// Establishes a CQL session with the database - /// - /// Usually it's easier to use [SessionBuilder](crate::transport::session_builder::SessionBuilder) - /// instead of calling `Session::connect` directly, because it's more convenient. - /// # Arguments - /// * `config` - Connection configuration - known nodes, Compression, etc. - /// Must contain at least one known node. - /// - /// # Example - /// ```rust - /// # use std::error::Error; - /// # async fn check_only_compiles() -> Result<(), Box> { - /// use scylla::{Session, SessionConfig}; - /// use scylla::transport::KnownNode; - /// - /// let mut config = SessionConfig::new(); - /// config.known_nodes.push(KnownNode::Hostname("127.0.0.1:9042".to_string())); - /// - /// let session: Session = Session::connect(config).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn connect(config: SessionConfig) -> Result { - let known_nodes = config.known_nodes; - - #[cfg(feature = "cloud")] - let cloud_known_nodes: Option> = - if let Some(ref cloud_config) = config.cloud_config { - let cloud_servers = cloud_config - .get_datacenters() - .iter() - .map(|(dc_name, dc_data)| { - InternalKnownNode::CloudEndpoint(CloudEndpoint { - hostname: dc_data.get_server().to_owned(), - datacenter: dc_name.clone(), - }) - }) - .collect(); - Some(cloud_servers) - } else { - None - }; - - #[cfg(not(feature = "cloud"))] - let cloud_known_nodes: Option> = None; - - let known_nodes = cloud_known_nodes - .unwrap_or_else(|| known_nodes.into_iter().map(|node| node.into()).collect()); - - // Ensure there is at least one known node - if known_nodes.is_empty() { - return Err(NewSessionError::EmptyKnownNodesList); - } - - let (tablet_sender, tablet_receiver) = tokio::sync::mpsc::channel(TABLET_CHANNEL_SIZE); - - let connection_config = ConnectionConfig { - compression: config.compression, - tcp_nodelay: config.tcp_nodelay, - tcp_keepalive_interval: config.tcp_keepalive_interval, - #[cfg(feature = "ssl")] - ssl_config: config.ssl_context.map(SslConfig::new_with_global_context), - authenticator: config.authenticator.clone(), - connect_timeout: config.connect_timeout, - event_sender: None, - default_consistency: Default::default(), - address_translator: config.address_translator, - #[cfg(feature = "cloud")] - cloud_config: config.cloud_config, - enable_write_coalescing: config.enable_write_coalescing, - keepalive_interval: config.keepalive_interval, - keepalive_timeout: config.keepalive_timeout, - tablet_sender: Some(tablet_sender), - identity: config.identity, - }; - - let pool_config = PoolConfig { - connection_config, - pool_size: config.connection_pool_size, - can_use_shard_aware_port: !config.disallow_shard_aware_port, - keepalive_interval: config.keepalive_interval, - }; - - let cluster = Cluster::new( - known_nodes, - pool_config, - config.keyspaces_to_fetch, - config.fetch_schema_metadata, - config.host_filter, - config.cluster_metadata_refresh_interval, - tablet_receiver, - ) - .await?; - - let default_execution_profile_handle = config.default_execution_profile_handle; - - let session = Session { - cluster, - default_execution_profile_handle, - schema_agreement_interval: config.schema_agreement_interval, - metrics: Arc::new(Metrics::new()), - schema_agreement_timeout: config.schema_agreement_timeout, - schema_agreement_automatic_waiting: config.schema_agreement_automatic_waiting, - refresh_metadata_on_auto_schema_agreement: config - .refresh_metadata_on_auto_schema_agreement, - keyspace_name: ArcSwapOption::default(), // will be set by use_keyspace - tracing_info_fetch_attempts: config.tracing_info_fetch_attempts, - tracing_info_fetch_interval: config.tracing_info_fetch_interval, - tracing_info_fetch_consistency: config.tracing_info_fetch_consistency, - }; - - if let Some(keyspace_name) = config.used_keyspace { - session - .use_keyspace(keyspace_name, config.keyspace_case_sensitive) - .await?; - } - - Ok(session) - } - +impl GenericSession { /// Sends a request to the database and receives a response.\ /// Performs an unpaged query, i.e. all results are received in a single response. /// @@ -601,15 +509,15 @@ impl Session { /// // Keep in mind that all results come in one response (no paging is done!), /// // so the memory footprint and latency may be huge! /// // To prevent that, use `Session::query_iter` or `Session::query_single_page`. - /// let rows_opt = session - /// .query_unpaged("SELECT a, b FROM ks.tab", &[]) + /// let query_rows = session + /// .query_unpaged("SELECT a, b FROM ks.tab", &[]) /// .await? - /// .rows; + /// .into_rows_result()?; /// - /// if let Some(rows) = rows_opt { - /// for row in rows.into_typed::<(i32, String)>() { - /// // Parse row as int and text \ - /// let (int_val, text_val): (i32, String) = row?; + /// if let Some(rows) = query_rows { + /// for row in rows.rows()? { + /// // Parse row as int and text. + /// let (int_val, text_val): (i32, &str) = row?; /// } /// } /// # Ok(()) @@ -619,16 +527,8 @@ impl Session { &self, query: impl Into, values: impl SerializeRow, - ) -> Result { - let query = query.into(); - let (result, paging_state_response) = self - .query(&query, values, None, PagingState::start()) - .await?; - if !paging_state_response.finished() { - error!("Unpaged unprepared query returned a non-empty paging state! This is a driver-side or server-side bug."); - return Err(ProtocolError::NonfinishedPagingState.into()); - } - Ok(result) + ) -> Result { + self.do_query_unpaged(&query.into(), values).await } /// Queries a single page from the database, optionally continuing from a saved point. @@ -660,7 +560,11 @@ impl Session { /// .await?; /// /// // Do something with a single page of results. - /// for row in res.rows_typed::<(i32, String)>()? { + /// for row in res + /// .into_rows_result()? + /// .unwrap() + /// .rows::<(i32, &str)>()? + /// { /// let (a, b) = row?; /// } /// @@ -681,13 +585,505 @@ impl Session { /// ``` pub async fn query_single_page( &self, - query: impl Into, + query: impl Into, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + self.do_query_single_page(&query.into(), values, paging_state) + .await + } + + /// Run an unprepared query with paging\ + /// This method will query all pages of the result\ + /// + /// Returns an async iterator (stream) over all received rows\ + /// Page size can be specified in the [Query] passed to the function + /// + /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` + /// trait returns false). In such case, query first needs to be prepared (on a single connection), so + /// driver will initially perform 2 round trips instead of 1. Please use [`Session::execute_iter()`] instead. + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. + /// + /// # Arguments + /// * `query` - statement to be executed, can be just a `&str` or the [Query] struct. + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values. + /// + /// # Example + /// + /// ```rust + /// # use scylla::Session; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use scylla::IntoTypedRows; + /// use futures::stream::StreamExt; + /// + /// let mut rows_stream = session + /// .query_iter("SELECT a, b FROM ks.t", &[]) + /// .await? + /// .rows_stream::<(i32, i32)>()?; + /// + /// while let Some(next_row_res) = rows_stream.next().await { + /// let (a, b): (i32, i32) = next_row_res?; + /// println!("a, b: {}, {}", a, b); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn query_iter( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_query_iter(query.into(), values).await + } + + /// Execute a prepared statement. Requires a [PreparedStatement] + /// generated using [`Session::prepare`](Session::prepare).\ + /// Performs an unpaged query, i.e. all results are received in a single response. + /// + /// As all results come in one response (no paging is done!), the memory footprint and latency may be huge + /// for statements returning rows (i.e. SELECTs)! Prefer this method for non-SELECTs, and for SELECTs + /// it is best to use paged queries: + /// - to receive multiple pages and transparently iterate through them, use [execute_iter](Session::execute_iter). + /// - to manually receive multiple pages and iterate through them, use [execute_single_page](Session::execute_single_page). + /// + /// Prepared queries are much faster than simple queries: + /// * Database doesn't need to parse the query + /// * They are properly load balanced using token aware routing + /// + /// > ***Warning***\ + /// > For token/shard aware load balancing to work properly, all partition key values + /// > must be sent as bound values + /// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance)). + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information. + /// + /// # Arguments + /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values + /// + /// # Example + /// ```rust + /// # use scylla::Session; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use scylla::prepared_statement::PreparedStatement; + /// + /// // Prepare the query for later execution + /// let prepared: PreparedStatement = session + /// .prepare("INSERT INTO ks.tab (a) VALUES(?)") + /// .await?; + /// + /// // Run the prepared query with some values, just like a simple query. + /// let to_insert: i32 = 12345; + /// session.execute_unpaged(&prepared, (to_insert,)).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn execute_unpaged( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + ) -> Result { + self.do_execute_unpaged(prepared, values).await + } + + /// Executes a prepared statement, restricting results to single page. + /// Optionally continues fetching results from a saved point. + /// + /// # Arguments + /// + /// * `prepared` - a statement prepared with [prepare](crate::Session::prepare) + /// * `values` - values bound to the query + /// * `paging_state` - continuation based on a paging state received from a previous paged query or None + /// + /// # Example + /// + /// ```rust + /// # use scylla::Session; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use std::ops::ControlFlow; + /// use scylla::query::Query; + /// use scylla::statement::{PagingState, PagingStateResponse}; + /// + /// let paged_prepared = session + /// .prepare( + /// Query::new("SELECT a, b FROM ks.tbl") + /// .with_page_size(100.try_into().unwrap()), + /// ) + /// .await?; + /// + /// // Manual paging in a loop, prepared statement. + /// let mut paging_state = PagingState::start(); + /// loop { + /// let (res, paging_state_response) = session + /// .execute_single_page(&paged_prepared, &[], paging_state) + /// .await?; + /// + /// // Do something with a single page of results. + /// for row in res + /// .into_rows_result()? + /// .unwrap() + /// .rows::<(i32, &str)>()? + /// { + /// let (a, b) = row?; + /// } + /// + /// match paging_state_response.into_paging_control_flow() { + /// ControlFlow::Break(()) => { + /// // No more pages to be fetched. + /// break; + /// } + /// ControlFlow::Continue(new_paging_state) => { + /// // Update paging continuation from the paging state, so that query + /// // will be resumed from where it ended the last time. + /// paging_state = new_paging_state; + /// } + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn execute_single_page( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + self.do_execute_single_page(prepared, values, paging_state) + .await + } + + /// Run a prepared query with paging.\ + /// This method will query all pages of the result.\ + /// + /// Returns an async iterator (stream) over all received rows.\ + /// Page size can be specified in the [PreparedStatement] passed to the function. + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. + /// + /// # Arguments + /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values + /// + /// # Example + /// + /// ```rust + /// # use scylla::Session; + /// # use futures::StreamExt as _; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use scylla::prepared_statement::PreparedStatement; + /// use scylla::IntoTypedRows; + /// + /// // Prepare the query for later execution + /// let prepared: PreparedStatement = session + /// .prepare("SELECT a, b FROM ks.t") + /// .await?; + /// + /// // Execute the query and receive all pages + /// let mut rows_stream = session + /// .execute_iter(prepared, &[]) + /// .await? + /// .rows_stream::<(i32, i32)>()?; + /// + /// while let Some(next_row_res) = rows_stream.next().await { + /// let (a, b): (i32, i32) = next_row_res?; + /// println!("a, b: {}, {}", a, b); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn execute_iter( + &self, + prepared: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_execute_iter(prepared.into(), values).await + } + + /// Perform a batch query\ + /// Batch contains many `simple` or `prepared` queries which are executed at once\ + /// Batch doesn't return any rows + /// + /// Batch values must contain values for each of the queries + /// + /// Avoid using non-empty values (`SerializeRow::is_empty()` return false) for simple queries + /// inside the batch. Such queries will first need to be prepared, so the driver will need to + /// send (numer_of_unprepared_queries_with_values + 1) requests instead of 1 request, severly + /// affecting performance. + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information + /// + /// # Arguments + /// * `batch` - [Batch] to be performed + /// * `values` - List of values for each query, it's the easiest to use a tuple of tuples + /// + /// # Example + /// ```rust + /// # use scylla::Session; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use scylla::batch::Batch; + /// + /// let mut batch: Batch = Default::default(); + /// + /// // A query with two bound values + /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(?, ?)"); + /// + /// // A query with one bound value + /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(3, ?)"); + /// + /// // A query with no bound values + /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(5, 6)"); + /// + /// // Batch values is a tuple of 3 tuples containing values for each query + /// let batch_values = ((1_i32, 2_i32), // Tuple with two values for the first query + /// (4_i32,), // Tuple with one value for the second query + /// ()); // Empty tuple/unit for the third query + /// + /// // Run the batch + /// session.batch(&batch, batch_values).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn batch( + &self, + batch: &Batch, + values: impl BatchValues, + ) -> Result { + self.do_batch(batch, values).await + } +} + +impl GenericSession { + pub async fn query_unpaged( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + Ok(self + .do_query_unpaged(&query.into(), values) + .await? + .into_legacy_result()?) + } + + pub async fn query_single_page( + &self, + query: impl Into, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + let (result, paging_state_response) = self + .do_query_single_page(&query.into(), values, paging_state) + .await?; + Ok((result.into_legacy_result()?, paging_state_response)) + } + + pub async fn query_iter( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_query_iter(query.into(), values) + .await + .map(QueryPager::into_legacy) + } + + pub async fn execute_unpaged( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + ) -> Result { + Ok(self + .do_execute_unpaged(prepared, values) + .await? + .into_legacy_result()?) + } + + pub async fn execute_single_page( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + let (result, paging_state_response) = self + .do_execute_single_page(prepared, values, paging_state) + .await?; + Ok((result.into_legacy_result()?, paging_state_response)) + } + + pub async fn execute_iter( + &self, + prepared: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_execute_iter(prepared.into(), values) + .await + .map(QueryPager::into_legacy) + } + + pub async fn batch( + &self, + batch: &Batch, + values: impl BatchValues, + ) -> Result { + Ok(self.do_batch(batch, values).await?.into_legacy_result()?) + } +} + +/// Represents a CQL session, which can be used to communicate +/// with the database +impl GenericSession +where + DeserApi: DeserializationApiKind, +{ + /// Estabilishes a CQL session with the database + /// + /// Usually it's easier to use [SessionBuilder](crate::transport::session_builder::SessionBuilder) + /// instead of calling `Session::connect` directly, because it's more convenient. + /// # Arguments + /// * `config` - Connection configuration - known nodes, Compression, etc. + /// Must contain at least one known node. + /// + /// # Example + /// ```rust + /// # use std::error::Error; + /// # async fn check_only_compiles() -> Result<(), Box> { + /// use scylla::{Session, SessionConfig}; + /// use scylla::transport::KnownNode; + /// + /// let mut config = SessionConfig::new(); + /// config.known_nodes.push(KnownNode::Hostname("127.0.0.1:9042".to_string())); + /// + /// let session: Session = Session::connect(config).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn connect(config: SessionConfig) -> Result { + let known_nodes = config.known_nodes; + + #[cfg(feature = "cloud")] + let cloud_known_nodes: Option> = + if let Some(ref cloud_config) = config.cloud_config { + let cloud_servers = cloud_config + .get_datacenters() + .iter() + .map(|(dc_name, dc_data)| { + InternalKnownNode::CloudEndpoint(CloudEndpoint { + hostname: dc_data.get_server().to_owned(), + datacenter: dc_name.clone(), + }) + }) + .collect(); + Some(cloud_servers) + } else { + None + }; + + #[cfg(not(feature = "cloud"))] + let cloud_known_nodes: Option> = None; + + let known_nodes = cloud_known_nodes + .unwrap_or_else(|| known_nodes.into_iter().map(|node| node.into()).collect()); + + // Ensure there is at least one known node + if known_nodes.is_empty() { + return Err(NewSessionError::EmptyKnownNodesList); + } + + let (tablet_sender, tablet_receiver) = tokio::sync::mpsc::channel(TABLET_CHANNEL_SIZE); + + let connection_config = ConnectionConfig { + compression: config.compression, + tcp_nodelay: config.tcp_nodelay, + tcp_keepalive_interval: config.tcp_keepalive_interval, + #[cfg(feature = "ssl")] + ssl_config: config.ssl_context.map(SslConfig::new_with_global_context), + authenticator: config.authenticator.clone(), + connect_timeout: config.connect_timeout, + event_sender: None, + default_consistency: Default::default(), + address_translator: config.address_translator, + #[cfg(feature = "cloud")] + cloud_config: config.cloud_config, + enable_write_coalescing: config.enable_write_coalescing, + keepalive_interval: config.keepalive_interval, + keepalive_timeout: config.keepalive_timeout, + tablet_sender: Some(tablet_sender), + identity: config.identity, + }; + + let pool_config = PoolConfig { + connection_config, + pool_size: config.connection_pool_size, + can_use_shard_aware_port: !config.disallow_shard_aware_port, + keepalive_interval: config.keepalive_interval, + }; + + let cluster = Cluster::new( + known_nodes, + pool_config, + config.keyspaces_to_fetch, + config.fetch_schema_metadata, + config.host_filter, + config.cluster_metadata_refresh_interval, + tablet_receiver, + ) + .await?; + + let default_execution_profile_handle = config.default_execution_profile_handle; + + let session = Self { + cluster, + default_execution_profile_handle, + schema_agreement_interval: config.schema_agreement_interval, + metrics: Arc::new(Metrics::new()), + schema_agreement_timeout: config.schema_agreement_timeout, + schema_agreement_automatic_waiting: config.schema_agreement_automatic_waiting, + refresh_metadata_on_auto_schema_agreement: config + .refresh_metadata_on_auto_schema_agreement, + keyspace_name: ArcSwapOption::default(), // will be set by use_keyspace + tracing_info_fetch_attempts: config.tracing_info_fetch_attempts, + tracing_info_fetch_interval: config.tracing_info_fetch_interval, + tracing_info_fetch_consistency: config.tracing_info_fetch_consistency, + _phantom_deser_api: PhantomData, + }; + + if let Some(keyspace_name) = config.used_keyspace { + session + .use_keyspace(keyspace_name, config.keyspace_case_sensitive) + .await?; + } + + Ok(session) + } + + async fn do_query_unpaged( + &self, + query: &Query, + values: impl SerializeRow, + ) -> Result { + let (result, paging_state_response) = self + .query(query, values, None, PagingState::start()) + .await?; + if !paging_state_response.finished() { + error!("Unpaged unprepared query returned a non-empty paging state! This is a driver-side or server-side bug."); + return Err(ProtocolError::NonfinishedPagingState.into()); + } + Ok(result) + } + + async fn do_query_single_page( + &self, + query: &Query, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { - let query = query.into(); + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { self.query( - &query, + query, values, Some(query.get_validated_page_size()), paging_state, @@ -712,7 +1108,7 @@ impl Session { values: impl SerializeRow, page_size: Option, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -798,10 +1194,10 @@ impl Session { self.handle_set_keyspace_response(&response).await?; self.handle_auto_await_schema_agreement(&response).await?; - let (result, paging_state) = response.into_query_result_and_paging_state()?; + let (result, paging_state_response) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); - let result = result.into_legacy_result()?; - Ok((result, paging_state)) + + Ok((result, paging_state_response)) } async fn handle_set_keyspace_response( @@ -839,50 +1235,11 @@ impl Session { Ok(()) } - /// Run an unprepared query with paging\ - /// This method will query all pages of the result\ - /// - /// Returns an async iterator (stream) over all received rows\ - /// Page size can be specified in the [Query] passed to the function - /// - /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` - /// trait returns false). In such case, query first needs to be prepared (on a single connection), so - /// driver will initially perform 2 round trips instead of 1. Please use [`Session::execute_iter()`] instead. - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. - /// - /// # Arguments - /// * `query` - statement to be executed, can be just a `&str` or the [Query] struct. - /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values. - /// - /// # Example - /// - /// ```rust - /// # use scylla::Session; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { - /// use scylla::IntoTypedRows; - /// use futures::stream::StreamExt; - /// - /// let mut rows_stream = session - /// .query_iter("SELECT a, b FROM ks.t", &[]) - /// .await? - /// .into_typed::<(i32, i32)>(); - /// - /// while let Some(next_row_res) = rows_stream.next().await { - /// let (a, b): (i32, i32) = next_row_res?; - /// println!("a, b: {}, {}", a, b); - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn query_iter( + async fn do_query_iter( &self, - query: impl Into, + query: Query, values: impl SerializeRow, - ) -> Result { - let query: Query = query.into(); - + ) -> Result { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -896,7 +1253,6 @@ impl Session { self.metrics.clone(), ) .await - .map(QueryPager::into_legacy) } else { // Making QueryPager::new_for_query work with values is too hard (if even possible) // so instead of sending one prepare to a specific connection on each iterator query, @@ -911,7 +1267,6 @@ impl Session { metrics: self.metrics.clone(), }) .await - .map(QueryPager::into_legacy) } } @@ -1007,54 +1362,11 @@ impl Session { .as_deref() } - /// Execute a prepared statement. Requires a [PreparedStatement] - /// generated using [`Session::prepare`](Session::prepare).\ - /// Performs an unpaged query, i.e. all results are received in a single response. - /// - /// As all results come in one response (no paging is done!), the memory footprint and latency may be huge - /// for statements returning rows (i.e. SELECTs)! Prefer this method for non-SELECTs, and for SELECTs - /// it is best to use paged queries: - /// - to receive multiple pages and transparently iterate through them, use [execute_iter](Session::execute_iter). - /// - to manually receive multiple pages and iterate through them, use [execute_single_page](Session::execute_single_page). - /// - /// Prepared queries are much faster than simple queries: - /// * Database doesn't need to parse the query - /// * They are properly load balanced using token aware routing - /// - /// > ***Warning***\ - /// > For token/shard aware load balancing to work properly, all partition key values - /// > must be sent as bound values - /// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance)). - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information. - /// - /// # Arguments - /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) - /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values - /// - /// # Example - /// ```rust - /// # use scylla::Session; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { - /// use scylla::prepared_statement::PreparedStatement; - /// - /// // Prepare the query for later execution - /// let prepared: PreparedStatement = session - /// .prepare("INSERT INTO ks.tab (a) VALUES(?)") - /// .await?; - /// - /// // Run the prepared query with some values, just like a simple query. - /// let to_insert: i32 = 12345; - /// session.execute_unpaged(&prepared, (to_insert,)).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn execute_unpaged( + async fn do_execute_unpaged( &self, prepared: &PreparedStatement, values: impl SerializeRow, - ) -> Result { + ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let (result, paging_state) = self .execute(prepared, &serialized_values, None, PagingState::start()) @@ -1066,65 +1378,12 @@ impl Session { Ok(result) } - /// Executes a prepared statement, restricting results to single page. - /// Optionally continues fetching results from a saved point. - /// - /// # Arguments - /// - /// * `prepared` - a statement prepared with [prepare](crate::Session::prepare) - /// * `values` - values bound to the query - /// * `paging_state` - continuation based on a paging state received from a previous paged query or None - /// - /// # Example - /// - /// ```rust - /// # use scylla::Session; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { - /// use std::ops::ControlFlow; - /// use scylla::query::Query; - /// use scylla::statement::{PagingState, PagingStateResponse}; - /// - /// let paged_prepared = session - /// .prepare( - /// Query::new("SELECT a, b FROM ks.tbl") - /// .with_page_size(100.try_into().unwrap()), - /// ) - /// .await?; - /// - /// // Manual paging in a loop, prepared statement. - /// let mut paging_state = PagingState::start(); - /// loop { - /// let (res, paging_state_response) = session - /// .execute_single_page(&paged_prepared, &[], paging_state) - /// .await?; - /// - /// // Do something with a single page of results. - /// for row in res.rows_typed::<(i32, String)>()? { - /// let (a, b) = row?; - /// } - /// - /// match paging_state_response.into_paging_control_flow() { - /// ControlFlow::Break(()) => { - /// // No more pages to be fetched. - /// break; - /// } - /// ControlFlow::Continue(new_paging_state) => { - /// // Update paging continuation from the paging state, so that query - /// // will be resumed from where it ended the last time. - /// paging_state = new_paging_state; - /// } - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn execute_single_page( + async fn do_execute_single_page( &self, prepared: &PreparedStatement, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let serialized_values = prepared.serialize_values(&values)?; let page_size = prepared.get_validated_page_size(); self.execute(prepared, &serialized_values, Some(page_size), paging_state) @@ -1147,7 +1406,7 @@ impl Session { serialized_values: &SerializedValues, page_size: Option, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let values_ref = &serialized_values; let paging_state_ref = &paging_state; @@ -1236,58 +1495,17 @@ impl Session { self.handle_set_keyspace_response(&response).await?; self.handle_auto_await_schema_agreement(&response).await?; - let (result, paging_state) = response.into_query_result_and_paging_state()?; + let (result, paging_state_response) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); - let result = result.into_legacy_result()?; - Ok((result, paging_state)) + + Ok((result, paging_state_response)) } - /// Run a prepared query with paging.\ - /// This method will query all pages of the result.\ - /// - /// Returns an async iterator (stream) over all received rows.\ - /// Page size can be specified in the [PreparedStatement] passed to the function. - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. - /// - /// # Arguments - /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) - /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values - /// - /// # Example - /// - /// ```rust - /// # use scylla::Session; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { - /// use scylla::prepared_statement::PreparedStatement; - /// use scylla::IntoTypedRows; - /// use futures::stream::StreamExt; - /// - /// // Prepare the query for later execution - /// let prepared: PreparedStatement = session - /// .prepare("SELECT a, b FROM ks.t") - /// .await?; - /// - /// // Execute the query and receive all pages - /// let mut rows_stream = session - /// .execute_iter(prepared, &[]) - /// .await? - /// .into_typed::<(i32, i32)>(); - /// - /// while let Some(next_row_res) = rows_stream.next().await { - /// let (a, b): (i32, i32) = next_row_res?; - /// println!("a, b: {}, {}", a, b); - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn execute_iter( + async fn do_execute_iter( &self, - prepared: impl Into, + prepared: PreparedStatement, values: impl SerializeRow, - ) -> Result { - let prepared = prepared.into(); + ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let execution_profile = prepared @@ -1303,59 +1521,13 @@ impl Session { metrics: self.metrics.clone(), }) .await - .map(QueryPager::into_legacy) } - /// Perform a batch request.\ - /// Batch contains many `simple` or `prepared` queries which are executed at once.\ - /// Batch doesn't return any rows. - /// - /// Batch values must contain values for each of the queries. - /// - /// Avoid using non-empty values (`SerializeRow::is_empty()` return false) for unprepared statements - /// inside the batch. Such statements will first need to be prepared, so the driver will need to - /// send (numer_of_unprepared_statements_with_values + 1) requests instead of 1 request, severly - /// affecting performance. - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information. - /// - /// # Arguments - /// * `batch` - [Batch] to be performed - /// * `values` - List of values for each query, it's the easiest to use a tuple of tuples - /// - /// # Example - /// ```rust - /// # use scylla::Session; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { - /// use scylla::batch::Batch; - /// - /// let mut batch: Batch = Default::default(); - /// - /// // A query with two bound values - /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(?, ?)"); - /// - /// // A query with one bound value - /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(3, ?)"); - /// - /// // A query with no bound values - /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(5, 6)"); - /// - /// // Batch values is a tuple of 3 tuples containing values for each query - /// let batch_values = ((1_i32, 2_i32), // Tuple with two values for the first query - /// (4_i32,), // Tuple with one value for the second query - /// ()); // Empty tuple/unit for the third query - /// - /// // Run the batch - /// session.batch(&batch, batch_values).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn batch( + async fn do_batch( &self, batch: &Batch, values: impl BatchValues, - ) -> Result { + ) -> Result { // Shard-awareness behavior for batch will be to pick shard based on first batch statement's shard // If users batch statements by shard, they will be rewarded with full shard awareness @@ -1432,10 +1604,10 @@ impl Session { .await?; let result = match run_query_result { - RunQueryResult::IgnoredWriteError => LegacyQueryResult::mock_empty(), + RunQueryResult::IgnoredWriteError => QueryResult::mock_empty(), RunQueryResult::Completed(result) => { span.record_result_fields(&result); - result.into_legacy_result()? + result } }; @@ -1621,20 +1793,24 @@ impl Session { traces_events_query.set_page_size(TRACING_QUERY_PAGE_SIZE); let (traces_session_res, traces_events_res) = tokio::try_join!( - self.query_unpaged(traces_session_query, (tracing_id,)), - self.query_unpaged(traces_events_query, (tracing_id,)) + self.do_query_unpaged(&traces_session_query, (tracing_id,)), + self.do_query_unpaged(&traces_events_query, (tracing_id,)) )?; // Get tracing info let maybe_tracing_info: Option = traces_session_res - .maybe_first_row_typed() + .into_rows_result()? + .ok_or(ProtocolError::Tracing( + TracingProtocolError::TracesSessionNotRows, + ))? + .maybe_first_row() .map_err(|err| match err { - MaybeFirstRowTypedError::RowsExpected(e) => { - ProtocolError::Tracing(TracingProtocolError::TracesSessionNotRows(e)) - } - MaybeFirstRowTypedError::FromRowError(e) => { + MaybeFirstRowError::TypeCheckFailed(e) => { ProtocolError::Tracing(TracingProtocolError::TracesSessionInvalidColumnType(e)) } + MaybeFirstRowError::DeserializationFailed(e) => ProtocolError::Tracing( + TracingProtocolError::TracesSessionDeserializationFailed(e), + ), })?; let mut tracing_info = match maybe_tracing_info { @@ -1643,18 +1819,24 @@ impl Session { }; // Get tracing events - let tracing_event_rows = traces_events_res.rows_typed().map_err(|err| { - ProtocolError::Tracing(TracingProtocolError::TracesEventsNotRows(err)) + let tracing_event_rows_result = + traces_events_res + .into_rows_result()? + .ok_or(ProtocolError::Tracing( + TracingProtocolError::TracesEventsNotRows, + ))?; + let tracing_event_rows = tracing_event_rows_result.rows().map_err(|err| match err { + RowsError::TypeCheckFailed(err) => { + ProtocolError::Tracing(TracingProtocolError::TracesEventsInvalidColumnType(err)) + } })?; - for event in tracing_event_rows { - let tracing_event: TracingEvent = event.map_err(|err| { - ProtocolError::Tracing(TracingProtocolError::TracesEventsInvalidColumnType(err)) + tracing_info.events = tracing_event_rows + .collect::>() + .map_err(|err| { + ProtocolError::Tracing(TracingProtocolError::TracesEventsDeserializationFailed(err)) })?; - tracing_info.events.push(tracing_event); - } - if tracing_info.events.is_empty() { return Ok(None); } diff --git a/scylla/src/transport/session_builder.rs b/scylla/src/transport/session_builder.rs index 9a7a9cbf71..31b653a5c2 100644 --- a/scylla/src/transport/session_builder.rs +++ b/scylla/src/transport/session_builder.rs @@ -2,7 +2,10 @@ use super::connection::SelfIdentity; use super::execution_profile::ExecutionProfileHandle; -use super::session::{AddressTranslator, Session, SessionConfig}; +use super::session::{ + AddressTranslator, CurrentDeserializationApi, GenericSession, LegacyDeserializationApi, + SessionConfig, +}; use super::Compression; #[cfg(feature = "cloud")] @@ -96,7 +99,10 @@ impl GenericSessionBuilder { /// ``` /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; + /// let session: Session = SessionBuilder::new() + /// .known_node("127.0.0.1:9042") + /// .build() + /// .await?; /// # Ok(()) /// # } /// ``` @@ -104,7 +110,10 @@ impl GenericSessionBuilder { /// ``` /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new().known_node("db1.example.com").build().await?; + /// let session: Session = SessionBuilder::new() + /// .known_node("db1.example.com") + /// .build() + /// .await?; /// # Ok(()) /// # } /// ``` @@ -493,7 +502,7 @@ impl GenericSessionBuilder { /// Set keyspace to be used on all connections.\ /// Each connection will send `"USE "` before sending any requests.\ - /// This can be later changed with [`Session::use_keyspace`] + /// This can be later changed with [`crate::Session::use_keyspace`] /// /// # Example /// ``` @@ -514,7 +523,34 @@ impl GenericSessionBuilder { self } - /// Builds the Session after setting all the options + /// Builds the Session after setting all the options. + /// + /// The new session object uses the legacy deserialization API. If you wish + /// to use the new API, use [`SessionBuilder::build`]. + /// + /// # Example + /// ``` + /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::transport::Compression; + /// # async fn example() -> Result<(), Box> { + /// let session: LegacySession = SessionBuilder::new() + /// .known_node("127.0.0.1:9042") + /// .compression(Some(Compression::Snappy)) + /// .build_legacy() // Turns SessionBuilder into LegacySession + /// .await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn build_legacy( + &self, + ) -> Result, NewSessionError> { + GenericSession::connect(self.config.clone()).await + } + + /// Builds the Session after setting all the options. + /// + /// The new session object uses the new deserialization API. If you wish + /// to use the old API, use [`SessionBuilder::build`]. /// /// # Example /// ``` @@ -529,8 +565,10 @@ impl GenericSessionBuilder { /// # Ok(()) /// # } /// ``` - pub async fn build(&self) -> Result { - Session::connect(self.config.clone()).await + pub async fn build( + &self, + ) -> Result, NewSessionError> { + GenericSession::connect(self.config.clone()).await } /// Changes connection timeout @@ -815,7 +853,7 @@ impl GenericSessionBuilder { } /// Set the number of attempts to fetch [TracingInfo](crate::tracing::TracingInfo) - /// in [`Session::get_tracing_info`]. + /// in [`Session::get_tracing_info`](crate::Session::get_tracing_info). /// The default is 5 attempts. /// /// Tracing info might not be available immediately on queried node - that's why @@ -844,7 +882,7 @@ impl GenericSessionBuilder { } /// Set the delay between attempts to fetch [TracingInfo](crate::tracing::TracingInfo) - /// in [`Session::get_tracing_info`]. + /// in [`Session::get_tracing_info`](crate::Session::get_tracing_info). /// The default is 3 milliseconds. /// /// Tracing info might not be available immediately on queried node - that's why @@ -873,7 +911,7 @@ impl GenericSessionBuilder { } /// Set the consistency level of fetching [TracingInfo](crate::tracing::TracingInfo) - /// in [`Session::get_tracing_info`]. + /// in [`Session::get_tracing_info`](crate::Session::get_tracing_info). /// The default is [`Consistency::One`]. /// /// # Example diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index d4222d3b55..6c4beeb4ae 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -1,6 +1,5 @@ -use crate as scylla; use crate::batch::{Batch, BatchStatement}; -use crate::frame::response::result::Row; +use crate::deserialize::DeserializeOwnedValue; use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::retry_policy::{QueryInfo, RetryDecision, RetryPolicy, RetrySession}; @@ -12,6 +11,7 @@ use crate::transport::errors::{BadKeyspaceName, BadQuery, DbError, QueryError}; use crate::transport::partitioner::{ calculate_token_for_partition_key, Murmur3Partitioner, Partitioner, PartitionerName, }; +use crate::transport::session::Session; use crate::transport::topology::Strategy::NetworkTopologyStrategy; use crate::transport::topology::{ CollectionType, ColumnKind, CqlType, NativeType, UserDefinedType, @@ -19,24 +19,26 @@ use crate::transport::topology::{ use crate::utils::test_utils::{ create_new_session_builder, supports_feature, unique_keyspace_name, }; -use crate::CachingSession; use crate::ExecutionProfile; -use crate::LegacyQueryResult; -use crate::{Session, SessionBuilder}; +use crate::{self as scylla, QueryResult}; +use crate::{CachingSession, SessionBuilder}; use assert_matches::assert_matches; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{FutureExt, StreamExt as _, TryStreamExt}; use itertools::Itertools; use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; -use scylla_cql::frame::response::result::ColumnType; +use scylla_cql::frame::response::result::{ColumnType, Row}; +use scylla_cql::frame::value::CqlVarint; use scylla_cql::types::serialize::row::{SerializeRow, SerializedValues}; use scylla_cql::types::serialize::value::SerializeValue; -use std::collections::BTreeSet; use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeSet, HashSet}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tokio::net::TcpListener; use uuid::Uuid; +use super::query_result::QueryRowsResult; + #[tokio::test] async fn test_connection_failure() { setup_tracing(); @@ -107,42 +109,40 @@ async fn test_unprepared_statement() { .await .unwrap(); - let (a_idx, _) = query_result.get_column_spec("a").unwrap(); - let (b_idx, _) = query_result.get_column_spec("b").unwrap(); - let (c_idx, _) = query_result.get_column_spec("c").unwrap(); - assert!(query_result.get_column_spec("d").is_none()); + let rows = query_result.into_rows_result().unwrap().unwrap(); - let rs = query_result.rows.unwrap(); + let col_specs = rows.column_specs(); + assert_eq!(col_specs.get_by_name("a").unwrap().0, 0); + assert_eq!(col_specs.get_by_name("b").unwrap().0, 1); + assert_eq!(col_specs.get_by_name("c").unwrap().0, 2); + assert!(col_specs.get_by_name("d").is_none()); + + let mut results = rows + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() + .unwrap(); - let mut results: Vec<(i32, i32, &String)> = rs - .iter() - .map(|r| { - let a = r.columns[a_idx].as_ref().unwrap().as_int().unwrap(); - let b = r.columns[b_idx].as_ref().unwrap().as_int().unwrap(); - let c = r.columns[c_idx].as_ref().unwrap().as_text().unwrap(); - (a, b, c) - }) - .collect(); results.sort(); assert_eq!( results, vec![ - (1, 2, &String::from("abc")), - (1, 4, &String::from("hello")), - (7, 11, &String::from("")) + (1, 2, String::from("abc")), + (1, 4, String::from("hello")), + (7, 11, String::from("")) ] ); let query_result = session .query_iter(format!("SELECT a, b, c FROM {}.t", ks), &[]) .await .unwrap(); - let specs = query_result.get_column_specs(); + let specs = query_result.column_specs(); assert_eq!(specs.len(), 3); for (spec, name) in specs.iter().zip(["a", "b", "c"]) { assert_eq!(spec.name(), name); // Check column name. assert_eq!(spec.table_spec().ks_name(), ks); } - let mut results_from_manual_paging: Vec = vec![]; + let mut results_from_manual_paging = vec![]; let query = Query::new(format!("SELECT a, b, c FROM {}.t", ks)).with_page_size(1); let mut paging_state = PagingState::start(); let mut watchdog = 0; @@ -151,7 +151,15 @@ async fn test_unprepared_statement() { .query_single_page(query.clone(), &[], paging_state) .await .unwrap(); - results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); + let mut page_results = rs_manual + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() + .unwrap(); + results_from_manual_paging.append(&mut page_results); match paging_state_response { PagingStateResponse::HasMorePages { state } => { paging_state = state; @@ -161,7 +169,7 @@ async fn test_unprepared_statement() { } watchdog += 1; } - assert_eq!(results_from_manual_paging, rs); + assert_eq!(results_from_manual_paging, results); } #[tokio::test] @@ -195,7 +203,7 @@ async fn test_prepared_statement() { .await .unwrap(); let query_result = session.execute_iter(prepared_statement, &[]).await.unwrap(); - let specs = query_result.get_column_specs(); + let specs = query_result.column_specs(); assert_eq!(specs.len(), 3); for (spec, name) in specs.iter().zip(["a", "b", "c"]) { assert_eq!(spec.name(), name); // Check column name. @@ -234,7 +242,10 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT token(a) FROM {}.t2", ks), &[]) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i64,)>() .unwrap(); let token = Token::new(value); let prepared_token = Murmur3Partitioner @@ -253,7 +264,10 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT token(a,b,c) FROM {}.complex_pk", ks), &[]) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i64,)>() .unwrap(); let token = Token::new(value); let prepared_token = Murmur3Partitioner.hash_one( @@ -275,15 +289,17 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT a,b,c FROM {}.t2", ks), &[]) .await .unwrap() - .rows + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() .unwrap(); - let r = rs.first().unwrap(); - let a = r.columns[0].as_ref().unwrap().as_int().unwrap(); - let b = r.columns[1].as_ref().unwrap().as_int().unwrap(); - let c = r.columns[2].as_ref().unwrap().as_text().unwrap(); - assert_eq!((a, b, c), (17, 16, &String::from("I'm prepared!!!"))); + let r = &rs[0]; + assert_eq!(r, &(17, 16, String::from("I'm prepared!!!"))); - let mut results_from_manual_paging: Vec = vec![]; + let mut results_from_manual_paging = vec![]; let query = Query::new(format!("SELECT a, b, c FROM {}.t2", ks)).with_page_size(1); let prepared_paged = session.prepare(query).await.unwrap(); let mut paging_state = PagingState::start(); @@ -293,7 +309,15 @@ async fn test_prepared_statement() { .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); - results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); + let mut page_results = rs_manual + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() + .unwrap(); + results_from_manual_paging.append(&mut page_results); match paging_state_response { PagingStateResponse::HasMorePages { state } => { paging_state = state; @@ -310,7 +334,10 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT a,b,c,d,e FROM {}.complex_pk", ks), &[]) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i32, i32, String, i32, Option)>() .unwrap(); assert!(e.is_none()); assert_eq!( @@ -318,9 +345,9 @@ async fn test_prepared_statement() { (17, 16, "I'm prepared!!!", 7, None) ); } - // Check that SerializeRow macro works + // Check that SerializeRow and DeserializeRow macros work { - #[derive(scylla::SerializeRow, scylla::FromRow, PartialEq, Debug, Clone)] + #[derive(scylla::SerializeRow, scylla::DeserializeRow, PartialEq, Debug, Clone)] #[scylla(crate = crate)] struct ComplexPk { a: i32, @@ -356,7 +383,10 @@ async fn test_prepared_statement() { ) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row() .unwrap(); assert_eq!(input, output) } @@ -477,7 +507,10 @@ async fn test_batch() { .query_unpaged(format!("SELECT a, b, c FROM {}.t_batch", ks), &[]) .await .unwrap() - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() .unwrap() .collect::>() .unwrap(); @@ -514,7 +547,10 @@ async fn test_batch() { ) .await .unwrap() - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() .unwrap() .collect::>() .unwrap(); @@ -567,7 +603,10 @@ async fn test_token_calculation() { ) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i64,)>() .unwrap(); let token = Token::new(value); let prepared_token = Murmur3Partitioner @@ -623,7 +662,7 @@ async fn test_token_awareness() { .await .unwrap(); let tracing_info = session - .get_tracing_info(res.tracing_id.as_ref().unwrap()) + .get_tracing_info(res.tracing_id().as_ref().unwrap()) .await .unwrap(); @@ -635,7 +674,7 @@ async fn test_token_awareness() { .execute_iter(prepared_statement.clone(), values) .await .unwrap(); - let tracing_id = iter.get_tracing_ids()[0]; + let tracing_id = iter.tracing_ids()[0]; let tracing_info = session.get_tracing_info(&tracing_id).await.unwrap(); // Again, verify that only one node was involved @@ -675,7 +714,10 @@ async fn test_use_keyspace() { .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|res| res.unwrap().0) .collect(); @@ -724,7 +766,10 @@ async fn test_use_keyspace() { .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|res| res.unwrap().0) .collect(); @@ -784,7 +829,10 @@ async fn test_use_keyspace_case_sensitivity() { .query_unpaged("SELECT * from tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|row| row.unwrap().0) .collect(); @@ -799,7 +847,10 @@ async fn test_use_keyspace_case_sensitivity() { .query_unpaged("SELECT * from tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|row| row.unwrap().0) .collect(); @@ -840,7 +891,10 @@ async fn test_raw_use_keyspace() { .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|res| res.unwrap().0) .collect(); @@ -959,21 +1013,20 @@ async fn test_tracing() { async fn test_tracing_query(session: &Session, ks: String) { // A query without tracing enabled has no tracing uuid in result let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); - let untraced_query_result: LegacyQueryResult = + let untraced_query_result: QueryResult = session.query_unpaged(untraced_query, &[]).await.unwrap(); - assert!(untraced_query_result.tracing_id.is_none()); + assert!(untraced_query_result.tracing_id().is_none()); // A query with tracing enabled has a tracing uuid in result let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: LegacyQueryResult = - session.query_unpaged(traced_query, &[]).await.unwrap(); - assert!(traced_query_result.tracing_id.is_some()); + let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + assert!(traced_query_result.tracing_id().is_some()); // Querying this uuid from tracing table gives some results - assert_in_tracing_table(session, traced_query_result.tracing_id.unwrap()).await; + assert_in_tracing_table(session, traced_query_result.tracing_id().unwrap()).await; } async fn test_tracing_execute(session: &Session, ks: String) { @@ -983,12 +1036,12 @@ async fn test_tracing_execute(session: &Session, ks: String) { .await .unwrap(); - let untraced_prepared_result: LegacyQueryResult = session + let untraced_prepared_result: QueryResult = session .execute_unpaged(&untraced_prepared, &[]) .await .unwrap(); - assert!(untraced_prepared_result.tracing_id.is_none()); + assert!(untraced_prepared_result.tracing_id().is_none()); // Executing a prepared statement with tracing enabled has a tracing uuid in result let mut traced_prepared = session @@ -998,14 +1051,14 @@ async fn test_tracing_execute(session: &Session, ks: String) { traced_prepared.config.tracing = true; - let traced_prepared_result: LegacyQueryResult = session + let traced_prepared_result: QueryResult = session .execute_unpaged(&traced_prepared, &[]) .await .unwrap(); - assert!(traced_prepared_result.tracing_id.is_some()); + assert!(traced_prepared_result.tracing_id().is_some()); // Querying this uuid from tracing table gives some results - assert_in_tracing_table(session, traced_prepared_result.tracing_id.unwrap()).await; + assert_in_tracing_table(session, traced_prepared_result.tracing_id().unwrap()).await; } async fn test_tracing_prepare(session: &Session, ks: String) { @@ -1035,9 +1088,8 @@ async fn test_get_tracing_info(session: &Session, ks: String) { let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: LegacyQueryResult = - session.query_unpaged(traced_query, &[]).await.unwrap(); - let tracing_id: Uuid = traced_query_result.tracing_id.unwrap(); + let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + let tracing_id: Uuid = traced_query_result.tracing_id().unwrap(); // Getting tracing info from session using this uuid works let tracing_info: TracingInfo = session.get_tracing_info(&tracing_id).await.unwrap(); @@ -1049,33 +1101,22 @@ async fn test_tracing_query_iter(session: &Session, ks: String) { // A query without tracing enabled has no tracing ids let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); - let mut untraced_row_iter = session.query_iter(untraced_query, &[]).await.unwrap(); - while let Some(_row) = untraced_row_iter.next().await { - // Receive rows - } - - assert!(untraced_row_iter.get_tracing_ids().is_empty()); + let untraced_query_pager = session.query_iter(untraced_query, &[]).await.unwrap(); + assert!(untraced_query_pager.tracing_ids().is_empty()); - // The same is true for TypedRowIter - let untraced_typed_row_iter = untraced_row_iter.into_typed::<(String,)>(); - assert!(untraced_typed_row_iter.get_tracing_ids().is_empty()); + let untraced_typed_row_iter = untraced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(untraced_typed_row_iter.tracing_ids().is_empty()); // A query with tracing enabled has a tracing ids in result let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let mut traced_row_iter = session.query_iter(traced_query, &[]).await.unwrap(); - while let Some(_row) = traced_row_iter.next().await { - // Receive rows - } - - assert!(!traced_row_iter.get_tracing_ids().is_empty()); + let traced_query_pager = session.query_iter(traced_query, &[]).await.unwrap(); - // The same is true for TypedRowIter - let traced_typed_row_iter = traced_row_iter.into_typed::<(String,)>(); - assert!(!traced_typed_row_iter.get_tracing_ids().is_empty()); + let traced_typed_row_stream = traced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(!traced_typed_row_stream.tracing_ids().is_empty()); - for tracing_id in traced_typed_row_iter.get_tracing_ids() { + for tracing_id in traced_typed_row_stream.tracing_ids() { assert_in_tracing_table(session, *tracing_id).await; } } @@ -1087,16 +1128,11 @@ async fn test_tracing_execute_iter(session: &Session, ks: String) { .await .unwrap(); - let mut untraced_row_iter = session.execute_iter(untraced_prepared, &[]).await.unwrap(); - while let Some(_row) = untraced_row_iter.next().await { - // Receive rows - } + let untraced_query_pager = session.execute_iter(untraced_prepared, &[]).await.unwrap(); + assert!(untraced_query_pager.tracing_ids().is_empty()); - assert!(untraced_row_iter.get_tracing_ids().is_empty()); - - // The same is true for TypedRowIter - let untraced_typed_row_iter = untraced_row_iter.into_typed::<(String,)>(); - assert!(untraced_typed_row_iter.get_tracing_ids().is_empty()); + let untraced_typed_row_stream = untraced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(untraced_typed_row_stream.tracing_ids().is_empty()); // A prepared statement with tracing enabled has a tracing ids in result let mut traced_prepared = session @@ -1105,18 +1141,12 @@ async fn test_tracing_execute_iter(session: &Session, ks: String) { .unwrap(); traced_prepared.config.tracing = true; - let mut traced_row_iter = session.execute_iter(traced_prepared, &[]).await.unwrap(); - while let Some(_row) = traced_row_iter.next().await { - // Receive rows - } - - assert!(!traced_row_iter.get_tracing_ids().is_empty()); + let traced_query_pager = session.execute_iter(traced_prepared, &[]).await.unwrap(); - // The same is true for TypedRowIter - let traced_typed_row_iter = traced_row_iter.into_typed::<(String,)>(); - assert!(!traced_typed_row_iter.get_tracing_ids().is_empty()); + let traced_typed_row_stream = traced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(!traced_typed_row_stream.tracing_ids().is_empty()); - for tracing_id in traced_typed_row_iter.get_tracing_ids() { + for tracing_id in traced_typed_row_stream.tracing_ids() { assert_in_tracing_table(session, *tracing_id).await; } } @@ -1126,19 +1156,18 @@ async fn test_tracing_batch(session: &Session, ks: String) { let mut untraced_batch: Batch = Default::default(); untraced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); - let untraced_batch_result: LegacyQueryResult = - session.batch(&untraced_batch, ((),)).await.unwrap(); - assert!(untraced_batch_result.tracing_id.is_none()); + let untraced_batch_result: QueryResult = session.batch(&untraced_batch, ((),)).await.unwrap(); + assert!(untraced_batch_result.tracing_id().is_none()); // Batch with tracing enabled has a tracing uuid in result let mut traced_batch: Batch = Default::default(); traced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); traced_batch.config.tracing = true; - let traced_batch_result: LegacyQueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); - assert!(traced_batch_result.tracing_id.is_some()); + let traced_batch_result: QueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); + assert!(traced_batch_result.tracing_id().is_some()); - assert_in_tracing_table(session, traced_batch_result.tracing_id.unwrap()).await; + assert_in_tracing_table(session, traced_batch_result.tracing_id().unwrap()).await; } async fn assert_in_tracing_table(session: &Session, tracing_uuid: Uuid) { @@ -1157,9 +1186,10 @@ async fn assert_in_tracing_table(session: &Session, tracing_uuid: Uuid) { .query_unpaged(traces_query.clone(), (tracing_uuid,)) .await .unwrap() - .rows_num() - .unwrap(); - + .into_rows_result() + .unwrap() + .unwrap() + .rows_num(); if rows_num > 0 { // Ok there was some row for this tracing_uuid return; @@ -1180,13 +1210,6 @@ async fn test_await_schema_agreement() { let _schema_version = session.await_schema_agreement().await.unwrap(); } -#[tokio::test] -async fn test_await_timed_schema_agreement() { - setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); - session.await_schema_agreement().await.unwrap(); -} - #[tokio::test] async fn test_timestamp() { setup_tracing(); @@ -1271,14 +1294,19 @@ async fn test_timestamp() { .await .unwrap(); - let mut results = session + let query_rows_result = session .query_unpaged( format!("SELECT a, b, WRITETIME(b) FROM {}.t_timestamp", ks), &[], ) .await .unwrap() - .rows_typed::<(String, String, i64)>() + .into_rows_result() + .unwrap() + .unwrap(); + + let mut results = query_rows_result + .rows::<(&str, &str, i64)>() .unwrap() .map(Result::unwrap) .collect::>(); @@ -1290,8 +1318,7 @@ async fn test_timestamp() { ("regular query", "higher timestamp", 420), ("second query in batch", "higher timestamp", 420), ] - .iter() - .map(|(x, y, t)| (x.to_string(), y.to_string(), *t)) + .into_iter() .collect::>(); assert_eq!(results, expected_results); @@ -1932,7 +1959,10 @@ async fn test_named_bind_markers() { .query_unpaged("SELECT pk, ck, v FROM t", &[]) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|res| res.unwrap()) .collect(); @@ -2083,7 +2113,10 @@ async fn test_unprepared_reprepare_in_execute() { .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2138,7 +2171,10 @@ async fn test_unusual_valuelists() { .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() - .rows_typed::<(i32, i32, String)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2209,7 +2245,10 @@ async fn test_unprepared_reprepare_in_batch() { .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2276,7 +2315,10 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { .execute_unpaged("SELECT a, b, c FROM tab", &()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2343,7 +2385,10 @@ async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[( .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() - .rows_typed::<(i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2570,33 +2615,34 @@ async fn test_batch_lwts() { batch.append_statement("INSERT INTO tab (p1, c1, r1, r2) VALUES (0, 123, 321, 312)"); batch.append_statement("UPDATE tab SET r1 = 1 WHERE p1 = 0 AND c1 = 0 IF r2 = 0"); - let batch_res: LegacyQueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); + let batch_res: QueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); + let batch_deserializer = batch_res.into_rows_result().unwrap().unwrap(); // Scylla returns 5 columns, but Cassandra returns only 1 - let is_scylla: bool = batch_res.col_specs().len() == 5; + let is_scylla: bool = batch_deserializer.column_specs().len() == 5; if is_scylla { - test_batch_lwts_for_scylla(&session, &batch, batch_res).await; + test_batch_lwts_for_scylla(&session, &batch, &batch_deserializer).await; } else { - test_batch_lwts_for_cassandra(&session, &batch, batch_res).await; + test_batch_lwts_for_cassandra(&session, &batch, &batch_deserializer).await; } } async fn test_batch_lwts_for_scylla( session: &Session, batch: &Batch, - batch_res: LegacyQueryResult, + query_rows_result: &QueryRowsResult, ) { // Alias required by clippy type IntOrNull = Option; // Returned columns are: // [applied], p1, c1, r1, r2 - let batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = batch_res - .rows_typed() + let batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = query_rows_result + .rows() .unwrap() - .map(|r| r.unwrap()) - .collect(); + .collect::>() + .unwrap(); let expected_batch_res_rows = vec![ (true, Some(0), Some(0), Some(0), Some(0)), @@ -2607,12 +2653,15 @@ async fn test_batch_lwts_for_scylla( assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: LegacyQueryResult = + let prepared_batch_res: QueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); let prepared_batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = prepared_batch_res - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2629,15 +2678,15 @@ async fn test_batch_lwts_for_scylla( async fn test_batch_lwts_for_cassandra( session: &Session, batch: &Batch, - batch_res: LegacyQueryResult, + query_rows_result: &QueryRowsResult, ) { // Alias required by clippy type IntOrNull = Option; // Returned columns are: // [applied] - let batch_res_rows: Vec<(bool,)> = batch_res - .rows_typed() + let batch_res_rows: Vec<(bool,)> = query_rows_result + .rows() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2647,14 +2696,17 @@ async fn test_batch_lwts_for_cassandra( assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: LegacyQueryResult = + let prepared_batch_res: QueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); // Returned columns are: // [applied], p1, c1, r1, r2 let prepared_batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = prepared_batch_res - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2759,13 +2811,15 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { assert!(!retried_flag.load(Ordering::Relaxed)); // Try to write something to the new table - it should fail and the policy // will tell us to ignore the error - let mut iter = session + let mut stream = session .query_iter("INSERT INTO t (pk v) VALUES (1, 2)", ()) .await + .unwrap() + .rows_stream::() .unwrap(); assert!(retried_flag.load(Ordering::Relaxed)); - while iter.try_next().await.unwrap().is_some() {} + while stream.try_next().await.unwrap().is_some() {} retried_flag.store(false, Ordering::Relaxed); // Try the same with execute_iter() @@ -2773,7 +2827,13 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { .prepare("INSERT INTO t (pk, v) VALUES (?, ?)") .await .unwrap(); - let mut iter = session.execute_iter(p, (1, 2)).await.unwrap(); + let mut iter = session + .execute_iter(p, (1, 2)) + .await + .unwrap() + .rows_stream::() + .unwrap() + .into_stream(); assert!(retried_flag.load(Ordering::Relaxed)); while iter.try_next().await.unwrap().is_some() {} @@ -2801,19 +2861,30 @@ async fn test_iter_methods_with_modification_statements() { ks )); query.set_tracing(true); - let mut row_iterator = session.query_iter(query, &[]).await.unwrap(); - row_iterator.next().await.ok_or(()).unwrap_err(); // assert empty - assert!(!row_iterator.get_tracing_ids().is_empty()); + let mut rows_stream = session + .query_iter(query, &[]) + .await + .unwrap() + .rows_stream::() + .unwrap(); + rows_stream.next().await.ok_or(()).unwrap_err(); // assert empty + assert!(!rows_stream.tracing_ids().is_empty()); let prepared_statement = session .prepare(format!("INSERT INTO {}.t (a, b, c) VALUES (?, ?, ?)", ks)) .await .unwrap(); - let mut row_iterator = session + let query_pager = session .execute_iter(prepared_statement, (2, 3, "cba")) .await .unwrap(); - row_iterator.next().await.ok_or(()).unwrap_err(); // assert empty + query_pager + .rows_stream::<()>() + .unwrap() + .next() + .await + .ok_or(()) + .unwrap_err(); // assert empty } #[tokio::test] @@ -2899,7 +2970,10 @@ async fn simple_strategy_test() { .query_unpaged(format!("SELECT p, c, r FROM {}.tab", ks), ()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect::>(); @@ -3011,3 +3085,68 @@ async fn test_manual_primary_key_computation() { .await; } } + +/// ScyllaDB does not distinguish empty collections from nulls. That is, INSERTing an empty collection +/// is equivalent to nullifying the corresponding column. +/// As pointed out in [#1001](https://github.com/scylladb/scylla-rust-driver/issues/1001), it's a nice +/// QOL feature to be able to deserialize empty CQL collections to empty Rust collections instead of +/// `None::`. This test checks that. +#[tokio::test] +async fn test_deserialize_empty_collections() { + // Setup session. + let ks = unique_keyspace_name(); + let session = create_new_session_builder().build().await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.use_keyspace(&ks, true).await.unwrap(); + + async fn deserialize_empty_collection< + Collection: Default + DeserializeOwnedValue + SerializeValue, + >( + session: &Session, + collection_name: &str, + collection_type_params: &str, + ) -> Collection { + // Create a table for the given collection type. + let table_name = "test_empty_".to_owned() + collection_name; + let query = format!( + "CREATE TABLE {} (n int primary key, c {}<{}>)", + table_name, collection_name, collection_type_params + ); + session.query_unpaged(query, ()).await.unwrap(); + + // Populate the table with an empty collection, effectively inserting null as the collection. + session + .query_unpaged( + format!("INSERT INTO {} (n, c) VALUES (?, ?)", table_name,), + (0, Collection::default()), + ) + .await + .unwrap(); + + let query_rows_result = session + .query_unpaged(format!("SELECT c FROM {}", table_name), ()) + .await + .unwrap() + .into_rows_result() + .unwrap() + .unwrap(); + let (collection,) = query_rows_result.first_row::<(Collection,)>().unwrap(); + + // Drop the table + collection + } + + let list = deserialize_empty_collection::>(&session, "list", "int").await; + assert!(list.is_empty()); + + let set = deserialize_empty_collection::>(&session, "set", "bigint").await; + assert!(set.is_empty()); + + let map = deserialize_empty_collection::>( + &session, + "map", + "boolean, varint", + ) + .await; + assert!(map.is_empty()); +} diff --git a/scylla/src/transport/silent_prepare_batch_test.rs b/scylla/src/transport/silent_prepare_batch_test.rs index ece8d1d3fc..48c0dc1f1e 100644 --- a/scylla/src/transport/silent_prepare_batch_test.rs +++ b/scylla/src/transport/silent_prepare_batch_test.rs @@ -96,7 +96,10 @@ async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[( .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() - .rows_typed::<(i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); diff --git a/scylla/src/transport/topology.rs b/scylla/src/transport/topology.rs index 93a80d2fa0..ab29cd46b2 100644 --- a/scylla/src/transport/topology.rs +++ b/scylla/src/transport/topology.rs @@ -1,3 +1,4 @@ +use crate::deserialize::DeserializeOwnedRow; use crate::frame::response::event::Event; use crate::routing::Token; use crate::statement::query::Query; @@ -5,6 +6,7 @@ use crate::transport::connection::{Connection, ConnectionConfig}; use crate::transport::connection_pool::{NodeConnectionPool, PoolConfig, PoolSize}; use crate::transport::errors::{DbError, NewSessionError, QueryError}; use crate::transport::host_filter::HostFilter; +use crate::transport::iterator::QueryPager; use crate::transport::node::resolve_contact_points; use crate::utils::parse::{ParseErrorCause, ParseResult, ParserState}; @@ -13,8 +15,9 @@ use futures::stream::{self, StreamExt, TryStreamExt}; use futures::Stream; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; -use scylla_cql::frame::response::result::Row; -use scylla_macros::FromRow; +use scylla_cql::frame::frame_errors::RowsParseError; +use scylla_cql::types::deserialize::TypeCheckError; +use scylla_macros::DeserializeRow; use std::borrow::BorrowMut; use std::cell::Cell; use std::collections::HashMap; @@ -765,11 +768,13 @@ async fn query_metadata( Ok(Metadata { peers, keyspaces }) } -#[derive(FromRow)] -#[scylla_crate = "scylla_cql"] +#[derive(DeserializeRow)] +#[scylla(crate = "scylla_cql")] struct NodeInfoRow { host_id: Option, + #[scylla(rename = "rpc_address")] untranslated_ip_addr: IpAddr, + #[scylla(rename = "data_center")] datacenter: Option, rack: Option, tokens: Option>, @@ -799,6 +804,13 @@ async fn query_peers(conn: &Arc, connect_port: u16) -> Result() + .map_err(RowsParseError::from)?; + Ok::<_, QueryError>(rows_stream) + }) .into_stream() .try_flatten() .and_then(|row_result| future::ok((NodeInfoSource::Peer, row_result))); @@ -809,6 +821,13 @@ async fn query_peers(conn: &Arc, connect_port: u16) -> Result() + .map_err(RowsParseError::from)?; + Ok::<_, QueryError>(rows_stream) + }) .into_stream() .try_flatten() .and_then(|row_result| future::ok((NodeInfoSource::Local, row_result))); @@ -819,9 +838,8 @@ async fn query_peers(conn: &Arc, connect_port: u16) -> Result create_peer_from_row(source, row, local_address).await, + match row_result { + Ok((source, row)) => create_peer_from_row(source, row, local_address).await, Err(err) => { warn!( "system.peers or system.local has an invalid row, skipping it: {}", @@ -905,14 +923,25 @@ async fn create_peer_from_row( })) } -fn query_filter_keyspace_name<'a>( +fn query_filter_keyspace_name<'a, R>( conn: &Arc, query_str: &'a str, keyspaces_to_fetch: &'a [String], -) -> impl Stream> + 'a { + convert_typecheck_error: impl FnOnce(TypeCheckError) -> MetadataError + 'a, +) -> impl Stream> + 'a +where + R: DeserializeOwnedRow + 'static, +{ let conn = conn.clone(); - let fut = async move { + // This function is extracted to reduce monomorphisation penalty: + // query_filter_keyspace_name() is going to be monomorphised into 5 distinct functions, + // so it's better to extract the common part. + async fn make_keyspace_filtered_query_pager( + conn: Arc, + query_str: &str, + keyspaces_to_fetch: &[String], + ) -> Result { if keyspaces_to_fetch.is_empty() { let mut query = Query::new(query_str); query.set_page_size(METADATA_QUERY_PAGE_SIZE); @@ -929,6 +958,13 @@ fn query_filter_keyspace_name<'a>( let serialized_values = prepared.serialize_values(&keyspaces)?; conn.execute_iter(prepared, serialized_values).await } + } + + let fut = async move { + let pager = make_keyspace_filtered_query_pager(conn, query_str, keyspaces_to_fetch).await?; + let stream: super::iterator::TypedRowStream = + pager.rows_stream::().map_err(convert_typecheck_error)?; + Ok::<_, QueryError>(stream) }; fut.into_stream().try_flatten() } @@ -938,10 +974,15 @@ async fn query_keyspaces( keyspaces_to_fetch: &[String], fetch_schema: bool, ) -> Result, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::<(String, HashMap)>( conn, "select keyspace_name, replication from system_schema.keyspaces", keyspaces_to_fetch, + |err| { + MetadataError::Keyspaces(KeyspacesMetadataError::SchemaKeyspacesInvalidColumnType( + err, + )) + }, ); let (mut all_tables, mut all_views, mut all_user_defined_types) = if fetch_schema { @@ -956,12 +997,7 @@ async fn query_keyspaces( }; rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, strategy_map) = row.into_typed::<(String, _)>().map_err(|err| { - MetadataError::Keyspaces(KeyspacesMetadataError::SchemaKeyspacesInvalidColumnType( - err, - )) - })?; + let (keyspace_name, strategy_map) = row_result?; let strategy: Strategy = strategy_from_string_map(strategy_map).map_err(|error| { MetadataError::Keyspaces(KeyspacesMetadataError::Strategy { @@ -988,8 +1024,8 @@ async fn query_keyspaces( .await } -#[derive(FromRow, Debug)] -#[scylla_crate = "crate"] +#[derive(DeserializeRow, Debug)] +#[scylla(crate = "crate")] struct UdtRow { keyspace_name: String, type_name: String, @@ -1031,21 +1067,16 @@ async fn query_user_defined_types( conn: &Arc, keyspaces_to_fetch: &[String], ) -> Result>>, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::( conn, "select keyspace_name, type_name, field_names, field_types from system_schema.types", keyspaces_to_fetch, + |err| MetadataError::Udts(UdtMetadataError::SchemaTypesInvalidColumnType(err)), ); let mut udt_rows: Vec = rows .map(|row_result| { - let row = row_result?; - let udt_row = row - .into_typed::() - .map_err(|err| { - MetadataError::Udts(UdtMetadataError::SchemaTypesInvalidColumnType(err)) - })? - .try_into()?; + let udt_row = row_result?.try_into()?; Ok::<_, QueryError>(udt_row) }) @@ -1355,21 +1386,17 @@ async fn query_tables( keyspaces_to_fetch: &[String], udts: &HashMap>>, ) -> Result>, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::<(String, String)>( conn, "SELECT keyspace_name, table_name FROM system_schema.tables", keyspaces_to_fetch, + |err| MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)), ); let mut result = HashMap::new(); let mut tables = query_tables_schema(conn, keyspaces_to_fetch, udts).await?; rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, table_name) = row.into_typed().map_err(|err| { - MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)) - })?; - - let keyspace_and_table_name = (keyspace_name, table_name); + let keyspace_and_table_name = row_result?; let table = tables.remove(&keyspace_and_table_name).unwrap_or(Table { columns: HashMap::new(), @@ -1396,20 +1423,18 @@ async fn query_views( keyspaces_to_fetch: &[String], udts: &HashMap>>, ) -> Result>, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::<(String, String, String)>( conn, "SELECT keyspace_name, view_name, base_table_name FROM system_schema.views", keyspaces_to_fetch, + |err| MetadataError::Views(ViewsMetadataError::SchemaViewsInvalidColumnType(err)), ); let mut result = HashMap::new(); let mut tables = query_tables_schema(conn, keyspaces_to_fetch, udts).await?; rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, view_name, base_table_name) = row.into_typed().map_err(|err| { - MetadataError::Views(ViewsMetadataError::SchemaViewsInvalidColumnType(err)) - })?; + let (keyspace_name, view_name, base_table_name) = row_result?; let keyspace_and_view_name = (keyspace_name, view_name); @@ -1447,24 +1472,18 @@ async fn query_tables_schema( // This column shouldn't be exposed to the user but is currently exposed in system tables. const THRIFT_EMPTY_TYPE: &str = "empty"; - let rows = query_filter_keyspace_name(conn, - "select keyspace_name, table_name, column_name, kind, position, type from system_schema.columns", keyspaces_to_fetch + type RowType = (String, String, String, String, i32, String); + + let rows = query_filter_keyspace_name::(conn, + "select keyspace_name, table_name, column_name, kind, position, type from system_schema.columns", keyspaces_to_fetch, |err| { + MetadataError::Tables(TablesMetadataError::SchemaColumnsInvalidColumnType(err)) + } ); let mut tables_schema = HashMap::new(); rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, table_name, column_name, kind, position, type_): ( - String, - String, - String, - String, - i32, - String, - ) = row.into_typed().map_err(|err| { - MetadataError::Tables(TablesMetadataError::SchemaColumnsInvalidColumnType(err)) - })?; + let (keyspace_name, table_name, column_name, kind, position, type_) = row_result?; if type_ == THRIFT_EMPTY_TYPE { return Ok::<_, QueryError>(()); @@ -1674,15 +1693,21 @@ async fn query_table_partitioners( let rows = conn .clone() .query_iter(partitioner_query) + .map(|pager_res| { + let pager = pager_res?; + let stream = pager + .rows_stream::<(String, String, Option)>() + .map_err(|err| { + MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)) + })?; + Ok::<_, QueryError>(stream) + }) .into_stream() .try_flatten(); let result = rows .map(|row_result| { - let (keyspace_name, table_name, partitioner) = - row_result?.into_typed().map_err(|err| { - MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)) - })?; + let (keyspace_name, table_name, partitioner) = row_result?; Ok::<_, QueryError>(((keyspace_name, table_name), partitioner)) }) .try_collect::>() diff --git a/scylla/src/utils/test_utils.rs b/scylla/src/utils/test_utils.rs index 6c52fde355..2a7a21f690 100644 --- a/scylla/src/utils/test_utils.rs +++ b/scylla/src/utils/test_utils.rs @@ -1,3 +1,5 @@ +use scylla_cql::frame::response::result::Row; + #[cfg(test)] use crate::transport::session_builder::{GenericSessionBuilder, SessionBuilderKind}; use crate::Session; @@ -46,7 +48,10 @@ pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { .query_unpaged("SELECT supported_features FROM system.local", ()) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row() .unwrap(); features @@ -95,15 +100,18 @@ pub fn create_new_session_builder() -> GenericSessionBuilder bool { let result = session .query_unpaged( - "select column_name from system_schema.columns where + "select column_name from system_schema.columns where keyspace_name = 'system_schema' and table_name = 'scylla_keyspaces' and column_name = 'initial_tablets'", &[], ) .await + .unwrap() + .into_rows_result() .unwrap(); - result.single_row().is_ok() + + result.map_or(false, |rows_result| rows_result.single_row::().is_ok()) } #[cfg(test)] diff --git a/scylla/tests/integration/consistency.rs b/scylla/tests/integration/consistency.rs index f12f2d8677..09780066ac 100644 --- a/scylla/tests/integration/consistency.rs +++ b/scylla/tests/integration/consistency.rs @@ -6,8 +6,8 @@ use scylla::prepared_statement::PreparedStatement; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::routing::{Shard, Token}; use scylla::test_utils::unique_keyspace_name; -use scylla::transport::session::Session; use scylla::transport::NodeRef; +use scylla::Session; use scylla_cql::frame::response::result::TableSpec; use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; diff --git a/scylla/tests/integration/silent_prepare_query.rs b/scylla/tests/integration/silent_prepare_query.rs index d814f70a8a..93950206a5 100644 --- a/scylla/tests/integration/silent_prepare_query.rs +++ b/scylla/tests/integration/silent_prepare_query.rs @@ -1,5 +1,5 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::transport::session::Session; +use scylla::Session; use scylla::SessionBuilder; use scylla::{query::Query, test_utils::unique_keyspace_name}; use scylla_proxy::{ diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index 1c84569e75..17f595400b 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -1,7 +1,6 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::transport::session::Session; -use scylla::SessionBuilder; use scylla::{prepared_statement::PreparedStatement, test_utils::unique_keyspace_name}; +use scylla::{Session, SessionBuilder}; use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; use scylla_cql::frame::types; use scylla_proxy::{ @@ -114,7 +113,10 @@ async fn test_skip_result_metadata() { .query_unpaged(select_query, ()) .await .unwrap() - .rows_typed::() + .into_rows_result() + .unwrap() + .unwrap() + .rows::() .unwrap() .collect::, _>>() .unwrap(); @@ -130,8 +132,14 @@ async fn test_skip_result_metadata() { .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); - results_from_manual_paging - .extend(rs_manual.rows_typed::().unwrap().map(Result::unwrap)); + results_from_manual_paging.extend( + rs_manual.into_rows_result() + .unwrap() + .unwrap() + .rows::() + .unwrap() + .map(Result::unwrap) + ); match paging_state_response { PagingStateResponse::HasMorePages { state } => { diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index 2bdf969877..9dbb5d31ab 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -16,9 +16,7 @@ use scylla::test_utils::unique_keyspace_name; use scylla::transport::ClusterData; use scylla::transport::Node; use scylla::transport::NodeRef; -use scylla::ExecutionProfile; -use scylla::LegacyQueryResult; -use scylla::Session; +use scylla::{ExecutionProfile, QueryResult, Session}; use scylla::transport::errors::QueryError; use scylla_proxy::{ @@ -30,7 +28,7 @@ use tokio::sync::mpsc; use tracing::info; use uuid::Uuid; -#[derive(scylla::FromRow)] +#[derive(scylla::DeserializeRow)] struct SelectedTablet { last_token: i64, replicas: Vec<(Uuid, i32)>, @@ -57,8 +55,10 @@ async fn get_tablets(session: &Session, ks: &str, table: &str) -> Vec { "select last_token, replicas from system.tablets WHERE keyspace_name = ? and table_name = ? ALLOW FILTERING", &(ks, table)).await.unwrap(); - let mut selected_tablets = selected_tablets_response - .into_typed::() + let mut selected_tablets: Vec = selected_tablets_response + .rows_stream::() + .unwrap() + .into_stream() .try_collect::>() .await .unwrap(); @@ -185,7 +185,7 @@ async fn send_statement_everywhere( cluster: &ClusterData, statement: &PreparedStatement, values: &dyn SerializeRow, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| { @@ -210,7 +210,7 @@ async fn send_unprepared_query_everywhere( session: &Session, cluster: &ClusterData, query: &Query, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| { @@ -418,6 +418,8 @@ async fn test_default_policy_is_tablet_aware() { #[tokio::test] #[ntest::timeout(30000)] async fn test_tablet_feedback_not_sent_for_unprepared_queries() { + use scylla::test_utils::scylla_supports_tablets; + setup_tracing(); const TABLET_COUNT: usize = 16; @@ -431,7 +433,7 @@ async fn test_tablet_feedback_not_sent_for_unprepared_queries() { .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets(&session).await { + if !scylla_supports_tablets(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } @@ -490,6 +492,8 @@ async fn test_tablet_feedback_not_sent_for_unprepared_queries() { #[ntest::timeout(30000)] #[ignore] async fn test_lwt_optimization_works_with_tablets() { + use scylla::test_utils::scylla_supports_tablets; + setup_tracing(); const TABLET_COUNT: usize = 16; @@ -503,7 +507,7 @@ async fn test_lwt_optimization_works_with_tablets() { .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets(&session).await { + if !scylla_supports_tablets(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; }