From 51c1005e4756a60aa81bdeec2ae3c591faaf687e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Thu, 7 Nov 2024 15:46:16 +0100 Subject: [PATCH 01/25] iterator: fix QueryPager docstring The docstring still described the legacy RowIterator's functionalities. --- scylla/src/transport/iterator.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 1ea00c457..100fafe2e 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -540,13 +540,13 @@ where } } -/// An intermediate object that allows to construct an iterator over a query +/// An intermediate object that allows to construct a stream over a query /// that is asynchronously paged in the background. /// /// Before the results can be processed in a convenient way, the QueryPager -/// needs to be cast into a typed iterator. This is done by use of `into_typed()` method. +/// needs to be cast into a typed stream. This is done by use of `rows_stream()` method. /// As the method is generic over the target type, the turbofish syntax -/// can come in handy there, e.g. `raw_iter.into_typed::<(i32, &str, Uuid)>()`. +/// can come in handy there, e.g. `query_pager.rows_stream::<(i32, String, Uuid)>()`. /// /// A pre-0.15.0 interface is also available, although deprecated: /// `into_legacy()` method converts QueryPager to LegacyRowIterator, From e67e5daf9714583f3533387438982d56eca71f05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Sun, 10 Nov 2024 20:29:53 +0100 Subject: [PATCH 02/25] query_result: fix QueryRowsResult's docstrings There were multiple typos, cross-references were missing, and there were redundant two distinct `impl` blocks - one is enough. --- scylla/src/transport/query_result.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index eedcb34a1..52326ba32 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -279,10 +279,12 @@ impl QueryResult { /// This struct provides generic methods which enable typed access to the data, /// by deserializing rows on the fly to the type provided as a type parameter. /// Those methods are: -/// - rows() - for iterating through rows, -/// - first_row() and maybe_first_row() - for accessing the first row first, -/// - single_row() - for accessing the first row, additionally asserting -/// that it's the only one in the response. +/// - [rows()](QueryRowsResult::rows) - for iterating through rows, +/// - [first_row()](QueryRowsResult::first_row) and +/// [maybe_first_row()](QueryRowsResult::maybe_first_row) - +/// for accessing the first row, +/// - [single_row()](QueryRowsResult::single_row) - for accessing the first row, +/// additionally asserting that it's the only one in the response. /// /// ```rust /// # use scylla::transport::query_result::QueryResult; @@ -338,10 +340,8 @@ impl QueryRowsResult { pub fn column_specs(&self) -> ColumnSpecs { ColumnSpecs::new(self.raw_rows_with_metadata.metadata().col_specs()) } -} -impl QueryRowsResult { - /// Returns the received rows when present. + /// Returns an iterator over the received rows. /// /// Returns an error if the rows in the response are of incorrect type. #[inline] @@ -353,7 +353,7 @@ impl QueryRowsResult { .map_err(RowsError::TypeCheckFailed) } - /// Returns `Option` containing the first of a result. + /// Returns `Option` containing the first row of the result. /// /// Fails when the the rows in the response are of incorrect type, /// or when the deserialization fails. @@ -371,7 +371,7 @@ impl QueryRowsResult { .map_err(MaybeFirstRowError::DeserializationFailed) } - /// Returns first row from the received rows. + /// Returns the first row of the received result. /// /// When the first row is not available, returns an error. /// Fails when the the rows in the response are of incorrect type, From 463b9b550c51190139d63a70c0dbd007c4302ec5 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Wed, 15 Mar 2023 15:35:35 +0100 Subject: [PATCH 03/25] treewide: rename Session to LegacySession MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a preparation for the API change of the Session: current implementation is renamed to LegacySession, a new one will be introduced later and everything will be gradually switched to the new implementation. Co-authored-by: Wojciech Przytuła --- examples/allocations.rs | 6 +- examples/basic.rs | 4 +- examples/compare-tokens.rs | 4 +- examples/cql-time-types.rs | 4 +- examples/cqlsh-rs.rs | 4 +- examples/custom_deserialization.rs | 4 +- examples/custom_load_balancing_policy.rs | 4 +- examples/execution_profile.rs | 8 +- examples/get_by_name.rs | 4 +- examples/logging.rs | 4 +- examples/logging_log.rs | 4 +- examples/parallel-prepared.rs | 4 +- examples/parallel.rs | 4 +- examples/query_history.rs | 4 +- examples/schema_agreement.rs | 4 +- examples/select-paging.rs | 4 +- examples/speculative-execution.rs | 4 +- examples/tls.rs | 4 +- examples/tower.rs | 2 +- examples/tracing.rs | 6 +- examples/user-defined-type.rs | 4 +- examples/value_list.rs | 4 +- scylla/src/lib.rs | 14 +- scylla/src/transport/caching_session.rs | 14 +- scylla/src/transport/cql_collections_test.rs | 16 +- scylla/src/transport/cql_types_test.rs | 40 ++--- scylla/src/transport/cql_value_test.rs | 6 +- scylla/src/transport/execution_profile.rs | 8 +- .../transport/large_batch_statements_test.rs | 6 +- scylla/src/transport/session.rs | 58 +++---- scylla/src/transport/session_builder.rs | 156 +++++++++--------- scylla/src/transport/session_test.rs | 30 ++-- .../transport/silent_prepare_batch_test.rs | 4 +- scylla/src/utils/test_utils.rs | 6 +- scylla/tests/integration/consistency.rs | 16 +- scylla/tests/integration/lwt_optimisation.rs | 4 +- scylla/tests/integration/retries.rs | 8 +- scylla/tests/integration/self_identity.rs | 4 +- .../tests/integration/silent_prepare_query.rs | 6 +- .../integration/skip_metadata_optimization.rs | 6 +- scylla/tests/integration/tablets.rs | 12 +- 41 files changed, 253 insertions(+), 255 deletions(-) diff --git a/examples/allocations.rs b/examples/allocations.rs index a3ec2a5cb..039d21e01 100644 --- a/examples/allocations.rs +++ b/examples/allocations.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::{statement::prepared_statement::PreparedStatement, Session, SessionBuilder}; +use scylla::{statement::prepared_statement::PreparedStatement, LegacySession, SessionBuilder}; use std::io::Write; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -65,7 +65,7 @@ fn print_stats(stats: &stats_alloc::Stats, reqs: f64) { } async fn measure( - session: Arc, + session: Arc, prepared: Arc, reqs: usize, parallelism: usize, @@ -128,7 +128,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", args.node); - let session: Session = SessionBuilder::new().known_node(args.node).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(args.node).build().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/basic.rs b/examples/basic.rs index 72b6a5ce1..48d97b713 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -1,7 +1,7 @@ use anyhow::Result; use futures::TryStreamExt; use scylla::macros::FromRow; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/compare-tokens.rs b/examples/compare-tokens.rs index 9e9431d86..e302b9f83 100644 --- a/examples/compare-tokens.rs +++ b/examples/compare-tokens.rs @@ -1,7 +1,7 @@ use anyhow::Result; use scylla::routing::Token; use scylla::transport::NodeAddr; -use scylla::{Session, SessionBuilder}; +use scylla::{LegacySession, SessionBuilder}; use std::env; #[tokio::main] @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/cql-time-types.rs b/examples/cql-time-types.rs index 548ac6987..8a8cedb66 100644 --- a/examples/cql-time-types.rs +++ b/examples/cql-time-types.rs @@ -6,7 +6,7 @@ use chrono::{DateTime, NaiveDate, NaiveTime, Utc}; use futures::{StreamExt, TryStreamExt}; use scylla::frame::response::result::CqlValue; use scylla::frame::value::{CqlDate, CqlTime, CqlTimestamp}; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; @@ -16,7 +16,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index c12b17a76..0b9cd5a18 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -4,7 +4,7 @@ use rustyline::error::ReadlineError; use rustyline::{CompletionType, Config, Context, Editor}; use rustyline_derive::{Helper, Highlighter, Hinter, Validator}; use scylla::transport::Compression; -use scylla::{LegacyQueryResult, Session, SessionBuilder}; +use scylla::{LegacyQueryResult, LegacySession, SessionBuilder}; use std::env; #[derive(Helper, Highlighter, Validator, Hinter)] @@ -199,7 +199,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(uri) .compression(Some(Compression::Lz4)) .build() diff --git a/examples/custom_deserialization.rs b/examples/custom_deserialization.rs index 1d0173ca5..976afe468 100644 --- a/examples/custom_deserialization.rs +++ b/examples/custom_deserialization.rs @@ -2,7 +2,7 @@ use anyhow::Result; use scylla::cql_to_rust::{FromCqlVal, FromCqlValError}; use scylla::frame::response::result::CqlValue; use scylla::macros::impl_from_cql_value_from_method; -use scylla::{Session, SessionBuilder}; +use scylla::{LegacySession, SessionBuilder}; use std::env; #[tokio::main] @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session diff --git a/examples/custom_load_balancing_policy.rs b/examples/custom_load_balancing_policy.rs index 5c279f233..9fa505384 100644 --- a/examples/custom_load_balancing_policy.rs +++ b/examples/custom_load_balancing_policy.rs @@ -6,7 +6,7 @@ use scylla::{ load_balancing::{LoadBalancingPolicy, RoutingInfo}, routing::Shard, transport::{ClusterData, ExecutionProfile}, - Session, SessionBuilder, + LegacySession, SessionBuilder, }; use std::{env, sync::Arc}; @@ -68,7 +68,7 @@ async fn main() -> Result<()> { .load_balancing_policy(Arc::new(custom_load_balancing)) .build(); - let _session: Session = SessionBuilder::new() + let _session: LegacySession = SessionBuilder::new() .known_node(uri) .default_execution_profile_handle(profile.into_handle()) .build() diff --git a/examples/execution_profile.rs b/examples/execution_profile.rs index 3562966ac..944245660 100644 --- a/examples/execution_profile.rs +++ b/examples/execution_profile.rs @@ -4,7 +4,7 @@ use scylla::query::Query; use scylla::retry_policy::{DefaultRetryPolicy, FallthroughRetryPolicy}; use scylla::speculative_execution::PercentileSpeculativeExecutionPolicy; use scylla::statement::{Consistency, SerialConsistency}; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::transport::ExecutionProfile; use scylla::{SessionBuilder, SessionConfig}; use std::env; @@ -42,13 +42,13 @@ async fn main() -> Result<()> { let mut handle2 = profile2.into_handle(); // It is even possible to use multiple sessions interleaved, having them configured with different profiles. - let session1: Session = SessionBuilder::new() + let session1: LegacySession = SessionBuilder::new() .known_node(&uri) .default_execution_profile_handle(handle1.clone()) .build() .await?; - let session2: Session = SessionBuilder::new() + let session2: LegacySession = SessionBuilder::new() .known_node(&uri) .default_execution_profile_handle(handle2.clone()) .build() @@ -57,7 +57,7 @@ async fn main() -> Result<()> { // As default execution profile is not provided explicitly, session 3 uses a predefined one. let mut session_3_config = SessionConfig::new(); session_3_config.add_known_node(uri); - let session3: Session = Session::connect(session_3_config).await?; + let session3: LegacySession = LegacySession::connect(session_3_config).await?; session1.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/get_by_name.rs b/examples/get_by_name.rs index bb750de1b..2f3996e5e 100644 --- a/examples/get_by_name.rs +++ b/examples/get_by_name.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, Result}; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/logging.rs b/examples/logging.rs index 6b090acbc..00071c4cd 100644 --- a/examples/logging.rs +++ b/examples/logging.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; use tracing::info; @@ -16,7 +16,7 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); info!("Connecting to {}", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session.query_unpaged("USE examples_ks", &[]).await?; diff --git a/examples/logging_log.rs b/examples/logging_log.rs index da82f4224..9e8c81687 100644 --- a/examples/logging_log.rs +++ b/examples/logging_log.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; use tracing::info; @@ -18,7 +18,7 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); info!("Connecting to {}", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session.query_unpaged("USE examples_ks", &[]).await?; diff --git a/examples/parallel-prepared.rs b/examples/parallel-prepared.rs index 167b58394..e848b305c 100644 --- a/examples/parallel-prepared.rs +++ b/examples/parallel-prepared.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::{Session, SessionBuilder}; +use scylla::{LegacySession, SessionBuilder}; use std::env; use std::sync::Arc; @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/parallel.rs b/examples/parallel.rs index 716225fb7..3cf191661 100644 --- a/examples/parallel.rs +++ b/examples/parallel.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::{Session, SessionBuilder}; +use scylla::{LegacySession, SessionBuilder}; use std::env; use std::sync::Arc; @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/query_history.rs b/examples/query_history.rs index d5e361f0e..61ea56723 100644 --- a/examples/query_history.rs +++ b/examples/query_history.rs @@ -4,7 +4,7 @@ use anyhow::Result; use futures::StreamExt; use scylla::history::{HistoryCollector, StructuredHistory}; use scylla::query::Query; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; use std::sync::Arc; @@ -15,7 +15,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/schema_agreement.rs b/examples/schema_agreement.rs index 4709873fc..bfc5ed01b 100644 --- a/examples/schema_agreement.rs +++ b/examples/schema_agreement.rs @@ -1,7 +1,7 @@ use anyhow::{bail, Result}; use futures::TryStreamExt; use scylla::transport::errors::QueryError; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; use std::time::Duration; @@ -13,7 +13,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(uri) .schema_agreement_interval(Duration::from_secs(1)) // check every second for schema agreement if not agreed first check .build() diff --git a/examples/select-paging.rs b/examples/select-paging.rs index b3a19e324..36d40f62d 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -1,7 +1,7 @@ use anyhow::Result; use futures::stream::StreamExt; use scylla::statement::PagingState; -use scylla::{query::Query, Session, SessionBuilder}; +use scylla::{query::Query, LegacySession, SessionBuilder}; use std::env; use std::ops::ControlFlow; @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/speculative-execution.rs b/examples/speculative-execution.rs index e6c64e3ad..13513c1d9 100644 --- a/examples/speculative-execution.rs +++ b/examples/speculative-execution.rs @@ -1,6 +1,6 @@ use scylla::{ speculative_execution::PercentileSpeculativeExecutionPolicy, - transport::execution_profile::ExecutionProfile, Session, SessionBuilder, + transport::execution_profile::ExecutionProfile, LegacySession, SessionBuilder, }; use anyhow::Result; @@ -20,7 +20,7 @@ async fn main() -> Result<()> { .speculative_execution_policy(Some(Arc::new(speculative))) .build(); - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(uri) .default_execution_profile_handle(speculative_profile.into_handle()) .build() diff --git a/examples/tls.rs b/examples/tls.rs index 067135214..c41e5e7f9 100644 --- a/examples/tls.rs +++ b/examples/tls.rs @@ -1,6 +1,6 @@ use anyhow::Result; use futures::TryStreamExt; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use std::env; use std::fs; @@ -44,7 +44,7 @@ async fn main() -> Result<()> { context_builder.set_ca_file(ca_dir.as_path())?; context_builder.set_verify(SslVerifyMode::PEER); - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(uri) .ssl_context(Some(context_builder.build())) .build() diff --git a/examples/tower.rs b/examples/tower.rs index 0d28407da..5f89890fc 100644 --- a/examples/tower.rs +++ b/examples/tower.rs @@ -7,7 +7,7 @@ use std::task::Poll; use tower::Service; struct SessionService { - session: Arc, + session: Arc, } // A trivial service implementation for sending parameterless simple string requests to Scylla. diff --git a/examples/tracing.rs b/examples/tracing.rs index 12767de5b..2ce7b2e61 100644 --- a/examples/tracing.rs +++ b/examples/tracing.rs @@ -10,7 +10,7 @@ use scylla::statement::{ use scylla::tracing::TracingInfo; use scylla::transport::iterator::LegacyRowIterator; use scylla::LegacyQueryResult; -use scylla::{Session, SessionBuilder}; +use scylla::{LegacySession, SessionBuilder}; use std::env; use std::num::NonZeroU32; use std::time::Duration; @@ -21,7 +21,7 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(uri.as_str()) .build() .await?; @@ -112,7 +112,7 @@ async fn main() -> Result<()> { // Session configuration allows specifying custom settings for querying tracing info. // Tracing info might not immediately be available on queried node // so the driver performs a few attempts with sleeps in between. - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(uri) .tracing_info_fetch_attempts(NonZeroU32::new(8).unwrap()) .tracing_info_fetch_interval(Duration::from_millis(100)) diff --git a/examples/user-defined-type.rs b/examples/user-defined-type.rs index 6e2d65286..e8be4b2f9 100644 --- a/examples/user-defined-type.rs +++ b/examples/user-defined-type.rs @@ -1,7 +1,7 @@ use anyhow::Result; use futures::TryStreamExt; use scylla::macros::FromUserType; -use scylla::{SerializeValue, Session, SessionBuilder}; +use scylla::{LegacySession, SerializeValue, SessionBuilder}; use std::env; #[tokio::main] @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/value_list.rs b/examples/value_list.rs index 81568baee..e72d488b4 100644 --- a/examples/value_list.rs +++ b/examples/value_list.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::{Session, SessionBuilder}; +use scylla::{LegacySession, SessionBuilder}; use std::env; #[tokio::main] @@ -8,7 +8,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: Session = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index aaa1506bd..0ecdc09f0 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -17,12 +17,12 @@ //! `Session` is created by specifying a few known nodes and connecting to them: //! //! ```rust,no_run -//! use scylla::{Session, SessionBuilder}; +//! use scylla::{LegacySession, SessionBuilder}; //! use std::error::Error; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { -//! let session: Session = SessionBuilder::new() +//! let session: LegacySession = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .known_node("1.2.3.4:9876") //! .build() @@ -50,9 +50,9 @@ //! //! The easiest way to specify bound values in a query is using a tuple: //! ```rust -//! # use scylla::Session; +//! # use scylla::LegacySession; //! # use std::error::Error; -//! # async fn check_only_compiles(session: &Session) -> Result<(), Box> { +//! # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { //! // Insert an int and text into the table //! session //! .query_unpaged( @@ -69,9 +69,9 @@ //! The easiest way to read rows returned by a query is to cast each row to a tuple of values: //! //! ```rust -//! # use scylla::Session; +//! # use scylla::LegacySession; //! # use std::error::Error; -//! # async fn check_only_compiles(session: &Session) -> Result<(), Box> { +//! # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { //! use scylla::IntoTypedRows; //! //! // Read rows containing an int and text @@ -260,7 +260,7 @@ pub use transport::caching_session::CachingSession; pub use transport::execution_profile::ExecutionProfile; pub use transport::legacy_query_result::LegacyQueryResult; pub use transport::query_result::{QueryResult, QueryRowsResult}; -pub use transport::session::{IntoTypedRows, Session, SessionConfig}; +pub use transport::session::{IntoTypedRows, LegacySession, SessionConfig}; pub use transport::session_builder::SessionBuilder; #[cfg(feature = "cloud")] diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index cbf9d3c6d..2b0fcc05e 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -5,7 +5,7 @@ use crate::statement::{PagingState, PagingStateResponse}; use crate::transport::errors::QueryError; use crate::transport::iterator::LegacyRowIterator; use crate::transport::partitioner::PartitionerName; -use crate::{LegacyQueryResult, Session}; +use crate::{LegacyQueryResult, LegacySession}; use bytes::Bytes; use dashmap::DashMap; use futures::future::try_join_all; @@ -35,7 +35,7 @@ pub struct CachingSession where S: Clone + BuildHasher, { - session: Session, + session: LegacySession, /// The prepared statement cache size /// If a prepared statement is added while the limit is reached, the oldest prepared statement /// is removed from the cache @@ -47,7 +47,7 @@ impl CachingSession where S: Default + BuildHasher + Clone, { - pub fn from(session: Session, cache_size: usize) -> Self { + pub fn from(session: LegacySession, cache_size: usize) -> Self { Self { session, max_capacity: cache_size, @@ -62,7 +62,7 @@ where { /// Builds a [`CachingSession`] from a [`Session`], a cache size, and a [`BuildHasher`]., /// using a customer hasher. - pub fn with_hasher(session: Session, cache_size: usize, hasher: S) -> Self { + pub fn with_hasher(session: LegacySession, cache_size: usize, hasher: S) -> Self { Self { session, max_capacity: cache_size, @@ -212,7 +212,7 @@ where self.max_capacity } - pub fn get_session(&self) -> &Session { + pub fn get_session(&self) -> &LegacySession { &self.session } } @@ -227,12 +227,12 @@ mod tests { use crate::{ batch::{Batch, BatchStatement}, prepared_statement::PreparedStatement, - CachingSession, Session, + CachingSession, LegacySession, }; use futures::TryStreamExt; use std::collections::BTreeSet; - async fn new_for_test(with_tablet_support: bool) -> Session { + async fn new_for_test(with_tablet_support: bool) -> LegacySession { let session = create_new_session_builder() .build() .await diff --git a/scylla/src/transport/cql_collections_test.rs b/scylla/src/transport/cql_collections_test.rs index d9fb52150..fe2a8a8d2 100644 --- a/scylla/src/transport/cql_collections_test.rs +++ b/scylla/src/transport/cql_collections_test.rs @@ -1,11 +1,11 @@ use crate::cql_to_rust::FromCqlVal; use crate::test_utils::{create_new_session_builder, setup_tracing}; use crate::utils::test_utils::unique_keyspace_name; -use crate::{frame::response::result::CqlValue, Session}; +use crate::{frame::response::result::CqlValue, LegacySession}; use scylla_cql::types::serialize::value::SerializeValue; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -async fn connect() -> Session { +async fn connect() -> LegacySession { let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -14,7 +14,7 @@ async fn connect() -> Session { session } -async fn create_table(session: &Session, table_name: &str, value_type: &str) { +async fn create_table(session: &LegacySession, table_name: &str, value_type: &str) { session .query_unpaged( format!( @@ -28,7 +28,7 @@ async fn create_table(session: &Session, table_name: &str, value_type: &str) { } async fn insert_and_select( - session: &Session, + session: &LegacySession, table_name: &str, to_insert: &InsertT, expected: &SelectT, @@ -58,7 +58,7 @@ async fn insert_and_select( #[tokio::test] async fn test_cql_list() { setup_tracing(); - let session: Session = connect().await; + let session: LegacySession = connect().await; let table_name: &str = "test_cql_list_tab"; create_table(&session, table_name, "list").await; @@ -91,7 +91,7 @@ async fn test_cql_list() { #[tokio::test] async fn test_cql_set() { setup_tracing(); - let session: Session = connect().await; + let session: LegacySession = connect().await; let table_name: &str = "test_cql_set_tab"; create_table(&session, table_name, "set").await; @@ -155,7 +155,7 @@ async fn test_cql_set() { #[tokio::test] async fn test_cql_map() { setup_tracing(); - let session: Session = connect().await; + let session: LegacySession = connect().await; let table_name: &str = "test_cql_map_tab"; create_table(&session, table_name, "map").await; @@ -206,7 +206,7 @@ async fn test_cql_map() { #[tokio::test] async fn test_cql_tuple() { setup_tracing(); - let session: Session = connect().await; + let session: LegacySession = connect().await; let table_name: &str = "test_cql_tuple_tab"; create_table(&session, table_name, "tuple").await; diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/src/transport/cql_types_test.rs index 072e7b8fd..32dd11638 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/src/transport/cql_types_test.rs @@ -4,7 +4,7 @@ use crate::frame::response::result::CqlValue; use crate::frame::value::{Counter, CqlDate, CqlTime, CqlTimestamp}; use crate::macros::FromUserType; use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; -use crate::transport::session::Session; +use crate::transport::session::LegacySession; use crate::utils::test_utils::unique_keyspace_name; use itertools::Itertools; use scylla_cql::frame::value::{CqlTimeuuid, CqlVarint}; @@ -22,8 +22,8 @@ async fn init_test_maybe_without_tablets( table_name: &str, type_name: &str, supports_tablets: bool, -) -> Session { - let session: Session = create_new_session_builder().build().await.unwrap(); +) -> LegacySession { + let session: LegacySession = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); let mut create_ks = format!( @@ -61,7 +61,7 @@ async fn init_test_maybe_without_tablets( // Used to prepare a table for test // Creates a new keyspace // Drops and creates table {table_name} (id int PRIMARY KEY, val {type_name}) -async fn init_test(table_name: &str, type_name: &str) -> Session { +async fn init_test(table_name: &str, type_name: &str) -> LegacySession { init_test_maybe_without_tablets(table_name, type_name, true).await } @@ -77,7 +77,7 @@ async fn run_tests(tests: &[&str], type_name: &str) where T: SerializeValue + FromCqlVal + FromStr + Debug + Clone + PartialEq, { - let session: Session = init_test(type_name, type_name).await; + let session: LegacySession = init_test(type_name, type_name).await; session.await_schema_agreement().await.unwrap(); for test in tests.iter() { @@ -168,7 +168,7 @@ async fn test_cql_varint() { ]; let table_name = "cql_varint_tests"; - let session: Session = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -278,7 +278,7 @@ async fn test_counter() { // Can't use run_tests, because counters are special and can't be inserted let type_name = "counter"; - let session: Session = init_test_maybe_without_tablets(type_name, type_name, false).await; + let session: LegacySession = init_test_maybe_without_tablets(type_name, type_name, false).await; for (i, test) in tests.iter().enumerate() { let update_bound_value = format!("UPDATE {} SET val = val + ? WHERE id = ?", type_name); @@ -311,7 +311,7 @@ async fn test_naive_date_04() { use chrono::Datelike; use chrono::NaiveDate; - let session: Session = init_test("chrono_naive_date_tests", "date").await; + let session: LegacySession = init_test("chrono_naive_date_tests", "date").await; let min_naive_date: NaiveDate = NaiveDate::MIN; let min_naive_date_string = min_naive_date.format("%Y-%m-%d").to_string(); @@ -404,7 +404,7 @@ async fn test_cql_date() { setup_tracing(); // Tests value::Date which allows to insert dates outside NaiveDate range - let session: Session = init_test("cql_date_tests", "date").await; + let session: LegacySession = init_test("cql_date_tests", "date").await; let tests = [ ("1970-01-01", CqlDate(2_u32.pow(31))), @@ -465,7 +465,7 @@ async fn test_date_03() { setup_tracing(); use time::{Date, Month::*}; - let session: Session = init_test("time_date_tests", "date").await; + let session: LegacySession = init_test("time_date_tests", "date").await; let tests = [ // Basic test values @@ -551,7 +551,7 @@ async fn test_cql_time() { // CqlTime is an i64 - nanoseconds since midnight // in range 0..=86399999999999 - let session: Session = init_test("cql_time_tests", "time").await; + let session: LegacySession = init_test("cql_time_tests", "time").await; let max_time: i64 = 24 * 60 * 60 * 1_000_000_000 - 1; assert_eq!(max_time, 86399999999999); @@ -784,7 +784,7 @@ async fn test_time_03() { #[tokio::test] async fn test_cql_timestamp() { setup_tracing(); - let session: Session = init_test("cql_timestamp_tests", "timestamp").await; + let session: LegacySession = init_test("cql_timestamp_tests", "timestamp").await; //let epoch_date = NaiveDate::from_ymd_opt(1970, 1, 1).unwrap(); @@ -1164,7 +1164,7 @@ async fn test_offset_date_time_03() { #[tokio::test] async fn test_timeuuid() { setup_tracing(); - let session: Session = init_test("timeuuid_tests", "timeuuid").await; + let session: LegacySession = init_test("timeuuid_tests", "timeuuid").await; // A few random timeuuids generated manually let tests = [ @@ -1234,7 +1234,7 @@ async fn test_timeuuid() { #[tokio::test] async fn test_timeuuid_ordering() { setup_tracing(); - let session: Session = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1316,7 +1316,7 @@ async fn test_timeuuid_ordering() { #[tokio::test] async fn test_inet() { setup_tracing(); - let session: Session = init_test("inet_tests", "inet").await; + let session: LegacySession = init_test("inet_tests", "inet").await; let tests = [ ("0.0.0.0", IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), @@ -1397,7 +1397,7 @@ async fn test_inet() { #[tokio::test] async fn test_blob() { setup_tracing(); - let session: Session = init_test("blob_tests", "blob").await; + let session: LegacySession = init_test("blob_tests", "blob").await; let long_blob: Vec = vec![0x11; 1234]; let mut long_blob_str: String = "0x".to_string(); @@ -1466,7 +1466,7 @@ async fn test_udt_after_schema_update() { let table_name = "udt_tests"; let type_name = "usertype1"; - let session: Session = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1595,7 +1595,7 @@ async fn test_udt_after_schema_update() { #[tokio::test] async fn test_empty() { setup_tracing(); - let session: Session = init_test("empty_tests", "int").await; + let session: LegacySession = init_test("empty_tests", "int").await; session .query_unpaged( @@ -1638,7 +1638,7 @@ async fn test_udt_with_missing_field() { let table_name = "udt_tests"; let type_name = "usertype1"; - let session: Session = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1689,7 +1689,7 @@ async fn test_udt_with_missing_field() { let mut id = 0; async fn verify_insert_select_identity( - session: &Session, + session: &LegacySession, table_name: &str, id: i32, element: TQ, diff --git a/scylla/src/transport/cql_value_test.rs b/scylla/src/transport/cql_value_test.rs index 781ab919b..be1047ede 100644 --- a/scylla/src/transport/cql_value_test.rs +++ b/scylla/src/transport/cql_value_test.rs @@ -2,12 +2,12 @@ use crate::frame::{response::result::CqlValue, value::CqlDuration}; use crate::test_utils::{create_new_session_builder, setup_tracing}; use crate::utils::test_utils::unique_keyspace_name; -use crate::Session; +use crate::LegacySession; #[tokio::test] async fn test_cqlvalue_udt() { setup_tracing(); - let session: Session = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session .query_unpaged( @@ -72,7 +72,7 @@ async fn test_cqlvalue_udt() { #[tokio::test] async fn test_cqlvalue_duration() { setup_tracing(); - let session: Session = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session diff --git a/scylla/src/transport/execution_profile.rs b/scylla/src/transport/execution_profile.rs index a94addec5..2854611b4 100644 --- a/scylla/src/transport/execution_profile.rs +++ b/scylla/src/transport/execution_profile.rs @@ -16,7 +16,7 @@ //! # extern crate scylla; //! # use std::error::Error; //! # async fn check_only_compiles() -> Result<(), Box> { -//! use scylla::{Session, SessionBuilder}; +//! use scylla::{LegacySession, SessionBuilder}; //! use scylla::statement::Consistency; //! use scylla::transport::ExecutionProfile; //! @@ -27,7 +27,7 @@ //! //! let handle = profile.into_handle(); //! -//! let session: Session = SessionBuilder::new() +//! let session: LegacySession = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .default_execution_profile_handle(handle) //! .build() @@ -109,7 +109,7 @@ //! # extern crate scylla; //! # use std::error::Error; //! # async fn check_only_compiles() -> Result<(), Box> { -//! use scylla::{Session, SessionBuilder}; +//! use scylla::{LegacySession, SessionBuilder}; //! use scylla::query::Query; //! use scylla::statement::Consistency; //! use scylla::transport::ExecutionProfile; @@ -125,7 +125,7 @@ //! let mut handle1 = profile1.clone().into_handle(); //! let mut handle2 = profile2.clone().into_handle(); //! -//! let session: Session = SessionBuilder::new() +//! let session: LegacySession = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .default_execution_profile_handle(handle1.clone()) //! .build() diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/src/transport/large_batch_statements_test.rs index 33628a49d..2b394ed32 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/src/transport/large_batch_statements_test.rs @@ -7,7 +7,7 @@ use crate::transport::errors::{BadQuery, QueryError}; use crate::{ batch::Batch, test_utils::{create_new_session_builder, unique_keyspace_name}, - LegacyQueryResult, Session, + LegacyQueryResult, LegacySession, }; #[tokio::test] @@ -31,7 +31,7 @@ async fn test_large_batch_statements() { ) } -async fn create_test_session(session: Session, ks: &String) -> Session { +async fn create_test_session(session: LegacySession, ks: &String) -> LegacySession { session .query_unpaged( format!("CREATE KEYSPACE {} WITH REPLICATION = {{ 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1 }}",ks), @@ -52,7 +52,7 @@ async fn create_test_session(session: Session, ks: &String) -> Session { } async fn write_batch( - session: &Session, + session: &LegacySession, n: usize, ks: &String, ) -> Result { diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 1defa514b..42a959684 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -155,7 +155,7 @@ impl AddressTranslator for HashMap<&'static str, &'static str> { } /// `Session` manages connections to the cluster and allows to perform queries -pub struct Session { +pub struct LegacySession { cluster: Cluster, default_execution_profile_handle: ExecutionProfileHandle, schema_agreement_interval: Duration, @@ -171,7 +171,7 @@ pub struct Session { /// This implementation deliberately omits some details from Cluster in order /// to avoid cluttering the print with much information of little usability. -impl std::fmt::Debug for Session { +impl std::fmt::Debug for LegacySession { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Session") .field("cluster", &ClusterNeatDebug(&self.cluster)) @@ -435,8 +435,8 @@ pub(crate) enum RunQueryResult { /// Represents a CQL session, which can be used to communicate /// with the database -impl Session { - /// Establishes a CQL session with the database +impl LegacySession { + /// Estabilishes a CQL session with the database /// /// Usually it's easier to use [SessionBuilder](crate::transport::session_builder::SessionBuilder) /// instead of calling `Session::connect` directly, because it's more convenient. @@ -448,17 +448,17 @@ impl Session { /// ```rust /// # use std::error::Error; /// # async fn check_only_compiles() -> Result<(), Box> { - /// use scylla::{Session, SessionConfig}; + /// use scylla::{LegacySession, SessionConfig}; /// use scylla::transport::KnownNode; /// /// let mut config = SessionConfig::new(); /// config.known_nodes.push(KnownNode::Hostname("127.0.0.1:9042".to_string())); /// - /// let session: Session = Session::connect(config).await?; + /// let session: LegacySession = LegacySession::connect(config).await?; /// # Ok(()) /// # } /// ``` - pub async fn connect(config: SessionConfig) -> Result { + pub async fn connect(config: SessionConfig) -> Result { let known_nodes = config.known_nodes; #[cfg(feature = "cloud")] @@ -532,7 +532,7 @@ impl Session { let default_execution_profile_handle = config.default_execution_profile_handle; - let session = Session { + let session = LegacySession { cluster, default_execution_profile_handle, schema_agreement_interval: config.schema_agreement_interval, @@ -578,9 +578,9 @@ impl Session { /// /// # Examples /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// // Insert an int and text into a table. /// session /// .query_unpaged( @@ -592,9 +592,9 @@ impl Session { /// # } /// ``` /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use scylla::IntoTypedRows; /// /// // Read rows containing an int and text. @@ -646,9 +646,9 @@ impl Session { /// # Example /// /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use std::ops::ControlFlow; /// use scylla::statement::PagingState; /// @@ -858,9 +858,9 @@ impl Session { /// # Example /// /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use scylla::IntoTypedRows; /// use futures::stream::StreamExt; /// @@ -935,9 +935,9 @@ impl Session { /// /// # Example /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use scylla::prepared_statement::PreparedStatement; /// /// // Prepare the query for later execution @@ -1034,9 +1034,9 @@ impl Session { /// /// # Example /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use scylla::prepared_statement::PreparedStatement; /// /// // Prepare the query for later execution @@ -1078,9 +1078,9 @@ impl Session { /// # Example /// /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use std::ops::ControlFlow; /// use scylla::query::Query; /// use scylla::statement::{PagingState, PagingStateResponse}; @@ -1257,9 +1257,9 @@ impl Session { /// # Example /// /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use scylla::prepared_statement::PreparedStatement; /// use scylla::IntoTypedRows; /// use futures::stream::StreamExt; @@ -1325,9 +1325,9 @@ impl Session { /// /// # Example /// ```rust - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use scylla::batch::Batch; /// /// let mut batch: Batch = Default::default(); @@ -1447,9 +1447,9 @@ impl Session { /// /// # Example /// ```rust /// # extern crate scylla; - /// # use scylla::Session; + /// # use scylla::LegacySession; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { /// use scylla::batch::Batch; /// /// // Create a batch statement with unprepared statements @@ -1508,7 +1508,7 @@ impl Session { /// * `case_sensitive` - if set to true the generated query will put keyspace name in quotes /// # Example /// ```rust - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { /// # let session = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; diff --git a/scylla/src/transport/session_builder.rs b/scylla/src/transport/session_builder.rs index 9a7a9cbf7..15e10e098 100644 --- a/scylla/src/transport/session_builder.rs +++ b/scylla/src/transport/session_builder.rs @@ -2,7 +2,7 @@ use super::connection::SelfIdentity; use super::execution_profile::ExecutionProfileHandle; -use super::session::{AddressTranslator, Session, SessionConfig}; +use super::session::{AddressTranslator, LegacySession, SessionConfig}; use super::Compression; #[cfg(feature = "cloud")] @@ -59,10 +59,10 @@ pub type CloudSessionBuilder = GenericSessionBuilder; /// # Example /// /// ``` -/// # use scylla::{Session, SessionBuilder}; +/// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { -/// let session: Session = SessionBuilder::new() +/// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) /// .build() @@ -94,17 +94,17 @@ impl GenericSessionBuilder { /// Add a known node with a hostname /// # Examples /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; + /// let session: LegacySession = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; /// # Ok(()) /// # } /// ``` /// /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new().known_node("db1.example.com").build().await?; + /// let session: LegacySession = SessionBuilder::new().known_node("db1.example.com").build().await?; /// # Ok(()) /// # } /// ``` @@ -116,10 +116,10 @@ impl GenericSessionBuilder { /// Add a known node with an IP address /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use std::net::{SocketAddr, IpAddr, Ipv4Addr}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node_addr(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 9042)) /// .build() /// .await?; @@ -134,9 +134,9 @@ impl GenericSessionBuilder { /// Add a list of known nodes with hostnames /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_nodes(["127.0.0.1:9042", "db1.example.com"]) /// .build() /// .await?; @@ -151,13 +151,13 @@ impl GenericSessionBuilder { /// Add a list of known nodes with IP addresses /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use std::net::{SocketAddr, IpAddr, Ipv4Addr}; /// # async fn example() -> Result<(), Box> { /// let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 17, 0, 3)), 9042); /// let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 17, 0, 4)), 9042); /// - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_nodes_addr([addr1, addr2]) /// .build() /// .await?; @@ -177,10 +177,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) /// .user("cassandra", "cassandra") @@ -203,7 +203,7 @@ impl GenericSessionBuilder { /// ``` /// # use std::sync::Arc; /// use bytes::Bytes; - /// use scylla::{Session, SessionBuilder}; + /// use scylla::{LegacySession, SessionBuilder}; /// use async_trait::async_trait; /// use scylla::authentication::{AuthenticatorProvider, AuthenticatorSession, AuthError}; /// # use scylla::transport::Compression; @@ -231,7 +231,7 @@ impl GenericSessionBuilder { /// } /// /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) /// .user("cassandra", "cassandra") @@ -257,7 +257,7 @@ impl GenericSessionBuilder { /// # use async_trait::async_trait; /// # use std::net::SocketAddr; /// # use std::sync::Arc; - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::session::{AddressTranslator, TranslationError}; /// # use scylla::transport::topology::UntranslatedPeer; /// struct IdentityTranslator; @@ -273,7 +273,7 @@ impl GenericSessionBuilder { /// } /// /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .address_translator(Arc::new(IdentityTranslator)) /// .build() @@ -287,7 +287,7 @@ impl GenericSessionBuilder { /// # use std::sync::Arc; /// # use std::collections::HashMap; /// # use std::str::FromStr; - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::session::{AddressTranslator, TranslationError}; /// # /// # async fn example() -> Result<(), Box> { @@ -295,7 +295,7 @@ impl GenericSessionBuilder { /// let addr_before_translation = SocketAddr::from_str("192.168.0.42:19042").unwrap(); /// let addr_after_translation = SocketAddr::from_str("157.123.12.42:23203").unwrap(); /// translation_rules.insert(addr_before_translation, addr_after_translation); - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .address_translator(Arc::new(translation_rules)) /// .build() @@ -318,7 +318,7 @@ impl GenericSessionBuilder { /// ``` /// # use std::fs; /// # use std::path::PathBuf; - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use openssl::ssl::{SslContextBuilder, SslVerifyMode, SslMethod, SslFiletype}; /// # async fn example() -> Result<(), Box> { /// let certdir = fs::canonicalize(PathBuf::from("./examples/certs/scylla.crt"))?; @@ -326,7 +326,7 @@ impl GenericSessionBuilder { /// context_builder.set_certificate_file(certdir.as_path(), SslFiletype::PEM)?; /// context_builder.set_verify(SslVerifyMode::NONE); /// - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .ssl_context(Some(context_builder.build())) /// .build() @@ -341,8 +341,8 @@ impl GenericSessionBuilder { } } -// NOTE: this `impl` block contains configuration options specific for **Cloud** [`Session`]. -// This means that if an option fits both non-Cloud and Cloud `Session`s, it should NOT be put +// NOTE: this `impl` block contains configuration options specific for **Cloud** [`LegacySession`]. +// This means that if an option fits both non-Cloud and Cloud `LegacySession`s, it should NOT be put // here, but rather in `impl GenericSessionBuilder` block. #[cfg(feature = "cloud")] impl CloudSessionBuilder { @@ -377,10 +377,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) /// .build() @@ -398,10 +398,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .schema_agreement_interval(Duration::from_secs(5)) /// .build() @@ -418,14 +418,14 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{statement::Consistency, ExecutionProfile, Session, SessionBuilder}; + /// # use scylla::{statement::Consistency, ExecutionProfile, LegacySession, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { /// let execution_profile = ExecutionProfile::builder() /// .consistency(Consistency::All) /// .request_timeout(Some(Duration::from_secs(2))) /// .build(); - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .default_execution_profile_handle(execution_profile.into_handle()) /// .build() @@ -446,9 +446,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tcp_nodelay(true) /// .build() @@ -469,9 +469,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tcp_keepalive_interval(std::time::Duration::from_secs(42)) /// .build() @@ -497,10 +497,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) /// .build() @@ -518,10 +518,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) /// .build() // Turns SessionBuilder into Session @@ -529,8 +529,8 @@ impl GenericSessionBuilder { /// # Ok(()) /// # } /// ``` - pub async fn build(&self) -> Result { - Session::connect(self.config.clone()).await + pub async fn build(&self) -> Result { + LegacySession::connect(self.config.clone()).await } /// Changes connection timeout @@ -539,10 +539,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .connection_timeout(Duration::from_secs(30)) /// .build() // Turns SessionBuilder into Session @@ -560,14 +560,14 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { /// use std::num::NonZeroUsize; /// use scylla::transport::session::PoolSize; /// /// // This session will establish 4 connections to each node. /// // For Scylla clusters, this number will be divided across shards - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .pool_size(PoolSize::PerHost(NonZeroUsize::new(4).unwrap())) /// .build() @@ -604,9 +604,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .disallow_shard_aware_port(true) /// .build() @@ -624,9 +624,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keyspaces_to_fetch(["my_keyspace"]) /// .build() @@ -647,9 +647,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .fetch_schema_metadata(true) /// .build() @@ -670,9 +670,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keepalive_interval(std::time::Duration::from_secs(42)) /// .build() @@ -700,9 +700,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keepalive_timeout(std::time::Duration::from_secs(42)) /// .build() @@ -727,9 +727,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .schema_agreement_timeout(std::time::Duration::from_secs(120)) /// .build() @@ -747,9 +747,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .auto_await_schema_agreement(false) /// .build() @@ -775,13 +775,13 @@ impl GenericSessionBuilder { /// # use async_trait::async_trait; /// # use std::net::SocketAddr; /// # use std::sync::Arc; - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::session::{AddressTranslator, TranslationError}; /// # use scylla::transport::host_filter::DcHostFilter; /// /// # async fn example() -> Result<(), Box> { /// // The session will only connect to nodes from "my-local-dc" - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .host_filter(Arc::new(DcHostFilter::new("my-local-dc".to_string()))) /// .build() @@ -799,9 +799,9 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .refresh_metadata_on_auto_schema_agreement(true) /// .build() @@ -815,7 +815,7 @@ impl GenericSessionBuilder { } /// Set the number of attempts to fetch [TracingInfo](crate::tracing::TracingInfo) - /// in [`Session::get_tracing_info`]. + /// in [`LegacySession::get_tracing_info`]. /// The default is 5 attempts. /// /// Tracing info might not be available immediately on queried node - that's why @@ -827,10 +827,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use std::num::NonZeroU32; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_attempts(NonZeroU32::new(10).unwrap()) /// .build() @@ -844,7 +844,7 @@ impl GenericSessionBuilder { } /// Set the delay between attempts to fetch [TracingInfo](crate::tracing::TracingInfo) - /// in [`Session::get_tracing_info`]. + /// in [`LegacySession::get_tracing_info`]. /// The default is 3 milliseconds. /// /// Tracing info might not be available immediately on queried node - that's why @@ -856,10 +856,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_interval(Duration::from_millis(50)) /// .build() @@ -873,14 +873,14 @@ impl GenericSessionBuilder { } /// Set the consistency level of fetching [TracingInfo](crate::tracing::TracingInfo) - /// in [`Session::get_tracing_info`]. + /// in [`LegacySession::get_tracing_info`]. /// The default is [`Consistency::One`]. /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder, statement::Consistency}; + /// # use scylla::{LegacySession, SessionBuilder, statement::Consistency}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_consistency(Consistency::One) /// .build() @@ -908,10 +908,10 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .write_coalescing(false) // Enabled by default /// .build() @@ -933,9 +933,9 @@ impl GenericSessionBuilder { /// means that the metadata is refreshed every 20 seconds. /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .cluster_metadata_refresh_interval(std::time::Duration::from_secs(20)) /// .build() @@ -956,13 +956,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::SelfIdentity; /// # async fn example() -> Result<(), Box> { /// let (app_major, app_minor, app_patch) = (2, 1, 3); /// let app_version = format!("{app_major}.{app_minor}.{app_patch}"); /// - /// let session: Session = SessionBuilder::new() + /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .custom_identity( /// SelfIdentity::new() diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index d4222d3b5..b596e05b2 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -22,7 +22,7 @@ use crate::utils::test_utils::{ use crate::CachingSession; use crate::ExecutionProfile; use crate::LegacyQueryResult; -use crate::{Session, SessionBuilder}; +use crate::{LegacySession, SessionBuilder}; use assert_matches::assert_matches; use futures::{FutureExt, StreamExt, TryStreamExt}; use itertools::Itertools; @@ -714,7 +714,7 @@ async fn test_use_keyspace() { )); // Make sure that use_keyspace on SessionBuiler works - let session2: Session = create_new_session_builder() + let session2: LegacySession = create_new_session_builder() .use_keyspace(ks.clone(), false) .build() .await @@ -956,7 +956,7 @@ async fn test_tracing() { test_tracing_batch(&session, ks.clone()).await; } -async fn test_tracing_query(session: &Session, ks: String) { +async fn test_tracing_query(session: &LegacySession, ks: String) { // A query without tracing enabled has no tracing uuid in result let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); let untraced_query_result: LegacyQueryResult = @@ -976,7 +976,7 @@ async fn test_tracing_query(session: &Session, ks: String) { assert_in_tracing_table(session, traced_query_result.tracing_id.unwrap()).await; } -async fn test_tracing_execute(session: &Session, ks: String) { +async fn test_tracing_execute(session: &LegacySession, ks: String) { // Executing a prepared statement without tracing enabled has no tracing uuid in result let untraced_prepared = session .prepare(format!("SELECT * FROM {}.tab", ks)) @@ -1008,7 +1008,7 @@ async fn test_tracing_execute(session: &Session, ks: String) { assert_in_tracing_table(session, traced_prepared_result.tracing_id.unwrap()).await; } -async fn test_tracing_prepare(session: &Session, ks: String) { +async fn test_tracing_prepare(session: &LegacySession, ks: String) { // Preparing a statement without tracing enabled has no tracing uuids in result let untraced_prepared = session .prepare(format!("SELECT * FROM {}.tab", ks)) @@ -1030,7 +1030,7 @@ async fn test_tracing_prepare(session: &Session, ks: String) { } } -async fn test_get_tracing_info(session: &Session, ks: String) { +async fn test_get_tracing_info(session: &LegacySession, ks: String) { // A query with tracing enabled has a tracing uuid in result let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; @@ -1045,7 +1045,7 @@ async fn test_get_tracing_info(session: &Session, ks: String) { assert!(!tracing_info.nodes().is_empty()); } -async fn test_tracing_query_iter(session: &Session, ks: String) { +async fn test_tracing_query_iter(session: &LegacySession, ks: String) { // A query without tracing enabled has no tracing ids let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); @@ -1080,7 +1080,7 @@ async fn test_tracing_query_iter(session: &Session, ks: String) { } } -async fn test_tracing_execute_iter(session: &Session, ks: String) { +async fn test_tracing_execute_iter(session: &LegacySession, ks: String) { // A prepared statement without tracing enabled has no tracing ids let untraced_prepared = session .prepare(format!("SELECT * FROM {}.tab", ks)) @@ -1121,7 +1121,7 @@ async fn test_tracing_execute_iter(session: &Session, ks: String) { } } -async fn test_tracing_batch(session: &Session, ks: String) { +async fn test_tracing_batch(session: &LegacySession, ks: String) { // A batch without tracing enabled has no tracing id let mut untraced_batch: Batch = Default::default(); untraced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); @@ -1141,7 +1141,7 @@ async fn test_tracing_batch(session: &Session, ks: String) { assert_in_tracing_table(session, traced_batch_result.tracing_id.unwrap()).await; } -async fn assert_in_tracing_table(session: &Session, tracing_uuid: Uuid) { +async fn assert_in_tracing_table(session: &LegacySession, tracing_uuid: Uuid) { let mut traces_query = Query::new("SELECT * FROM system_traces.sessions WHERE session_id = ?"); traces_query.config.consistency = Some(Consistency::One); @@ -2009,7 +2009,7 @@ async fn test_prepared_partitioner() { ); } -async fn rename(session: &Session, rename_str: &str) { +async fn rename(session: &LegacySession, rename_str: &str) { session .query_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), ()) .await @@ -2338,7 +2338,7 @@ async fn test_views_in_schema_info() { ) } -async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[(i32, i32)]) { +async fn assert_test_batch_table_rows_contain(sess: &LegacySession, expected_rows: &[(i32, i32)]) { let selected_rows: BTreeSet<(i32, i32)> = sess .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await @@ -2583,7 +2583,7 @@ async fn test_batch_lwts() { } async fn test_batch_lwts_for_scylla( - session: &Session, + session: &LegacySession, batch: &Batch, batch_res: LegacyQueryResult, ) { @@ -2627,7 +2627,7 @@ async fn test_batch_lwts_for_scylla( } async fn test_batch_lwts_for_cassandra( - session: &Session, + session: &LegacySession, batch: &Batch, batch_res: LegacyQueryResult, ) { @@ -2917,7 +2917,7 @@ async fn test_manual_primary_key_computation() { session.use_keyspace(&ks, true).await.unwrap(); async fn assert_tokens_equal( - session: &Session, + session: &LegacySession, prepared: &PreparedStatement, serialized_pk_values_in_pk_order: &SerializedValues, all_values_in_query_order: impl SerializeRow, diff --git a/scylla/src/transport/silent_prepare_batch_test.rs b/scylla/src/transport/silent_prepare_batch_test.rs index ece8d1d3f..f8c7fb328 100644 --- a/scylla/src/transport/silent_prepare_batch_test.rs +++ b/scylla/src/transport/silent_prepare_batch_test.rs @@ -2,7 +2,7 @@ use crate::{ batch::Batch, prepared_statement::PreparedStatement, test_utils::{create_new_session_builder, setup_tracing, unique_keyspace_name}, - Session, + LegacySession, }; use std::collections::BTreeSet; @@ -91,7 +91,7 @@ async fn test_quietly_prepare_batch() { } } -async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[(i32, i32)]) { +async fn assert_test_batch_table_rows_contain(sess: &LegacySession, expected_rows: &[(i32, i32)]) { let selected_rows: BTreeSet<(i32, i32)> = sess .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await diff --git a/scylla/src/utils/test_utils.rs b/scylla/src/utils/test_utils.rs index 6c52fde35..6f9f2a9ec 100644 --- a/scylla/src/utils/test_utils.rs +++ b/scylla/src/utils/test_utils.rs @@ -1,6 +1,6 @@ #[cfg(test)] use crate::transport::session_builder::{GenericSessionBuilder, SessionBuilderKind}; -use crate::Session; +use crate::LegacySession; #[cfg(test)] use std::{num::NonZeroU32, time::Duration}; use std::{ @@ -25,7 +25,7 @@ pub fn unique_keyspace_name() -> String { } #[cfg(test)] -pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { +pub(crate) async fn supports_feature(session: &LegacySession, feature: &str) -> bool { // Cassandra doesn't have a concept of features, so first detect // if there is the `supported_features` column in system.local @@ -92,7 +92,7 @@ pub fn create_new_session_builder() -> GenericSessionBuilder bool { +pub async fn scylla_supports_tablets(session: &LegacySession) -> bool { let result = session .query_unpaged( "select column_name from system_schema.columns where diff --git a/scylla/tests/integration/consistency.rs b/scylla/tests/integration/consistency.rs index f12f2d867..4a3b1306f 100644 --- a/scylla/tests/integration/consistency.rs +++ b/scylla/tests/integration/consistency.rs @@ -6,7 +6,7 @@ use scylla::prepared_statement::PreparedStatement; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::routing::{Shard, Token}; use scylla::test_utils::unique_keyspace_name; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::transport::NodeRef; use scylla_cql::frame::response::result::TableSpec; use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; @@ -59,7 +59,7 @@ fn pairs_of_all_consistencies() -> impl Iterator, @@ -81,7 +81,7 @@ async fn query_consistency_set_directly( } async fn execute_consistency_set_directly( - session: &Session, + session: &LegacySession, prepared: &PreparedStatement, c: Consistency, sc: Option, @@ -94,7 +94,7 @@ async fn execute_consistency_set_directly( } async fn batch_consistency_set_directly( - session: &Session, + session: &LegacySession, batch: &Batch, c: Consistency, sc: Option, @@ -107,7 +107,7 @@ async fn batch_consistency_set_directly( // The following functions perform a request with consistencies set on a per-statement execution profile. async fn query_consistency_set_on_exec_profile( - session: &Session, + session: &LegacySession, query: &Query, profile: ExecutionProfileHandle, ) { @@ -118,7 +118,7 @@ async fn query_consistency_set_on_exec_profile( } async fn execute_consistency_set_on_exec_profile( - session: &Session, + session: &LegacySession, prepared: &PreparedStatement, profile: ExecutionProfileHandle, ) { @@ -129,7 +129,7 @@ async fn execute_consistency_set_on_exec_profile( } async fn batch_consistency_set_on_exec_profile( - session: &Session, + session: &LegacySession, batch: &Batch, profile: ExecutionProfileHandle, ) { diff --git a/scylla/tests/integration/lwt_optimisation.rs b/scylla/tests/integration/lwt_optimisation.rs index ca56cff93..f0d59f1f0 100644 --- a/scylla/tests/integration/lwt_optimisation.rs +++ b/scylla/tests/integration/lwt_optimisation.rs @@ -2,7 +2,7 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::test_utils::scylla_supports_tablets; use scylla::test_utils::unique_keyspace_name; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::{ExecutionProfile, SessionBuilder}; use scylla_cql::frame::protocol_features::ProtocolFeatures; use scylla_cql::frame::types; @@ -52,7 +52,7 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima .into_handle(); // DB preparation phase - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .default_execution_profile_handle(handle) .address_translator(Arc::new(translation_map)) diff --git a/scylla/tests/integration/retries.rs b/scylla/tests/integration/retries.rs index 43cbf5807..f6e1711ac 100644 --- a/scylla/tests/integration/retries.rs +++ b/scylla/tests/integration/retries.rs @@ -1,7 +1,7 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::speculative_execution::SimpleSpeculativeExecutionPolicy; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::ExecutionProfile; use scylla::SessionBuilder; use scylla::{query::Query, test_utils::unique_keyspace_name}; @@ -27,7 +27,7 @@ async fn speculative_execution_is_fired() { max_retry_count: 2, retry_interval: Duration::from_millis(10), }))).retry_policy(Arc::new(FallthroughRetryPolicy)).build(); - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .default_execution_profile_handle(simple_speculative_no_retry_profile.into_handle()) .address_translator(Arc::new(translation_map)) @@ -104,7 +104,7 @@ async fn retries_occur() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .build() @@ -183,7 +183,7 @@ async fn speculative_execution_panic_regression_test() { .retry_policy(Arc::new(FallthroughRetryPolicy)) .build(); // DB preparation phase - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .default_execution_profile_handle(profile.into_handle()) diff --git a/scylla/tests/integration/self_identity.rs b/scylla/tests/integration/self_identity.rs index cba46f717..d68bb0add 100644 --- a/scylla/tests/integration/self_identity.rs +++ b/scylla/tests/integration/self_identity.rs @@ -1,5 +1,5 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::{Session, SessionBuilder}; +use scylla::{LegacySession, SessionBuilder}; use scylla_cql::frame::request::options; use scylla_cql::frame::types; use std::sync::Arc; @@ -50,7 +50,7 @@ async fn test_given_self_identity(self_identity: SelfIdentity<'static>) { )])); // DB preparation phase - let _session: Session = SessionBuilder::new() + let _session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .custom_identity(self_identity.clone()) diff --git a/scylla/tests/integration/silent_prepare_query.rs b/scylla/tests/integration/silent_prepare_query.rs index d814f70a8..ffb200c7c 100644 --- a/scylla/tests/integration/silent_prepare_query.rs +++ b/scylla/tests/integration/silent_prepare_query.rs @@ -1,5 +1,5 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use scylla::{query::Query, test_utils::unique_keyspace_name}; use scylla_proxy::{ @@ -19,7 +19,7 @@ async fn test_prepare_query_with_values() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .build() @@ -70,7 +70,7 @@ async fn test_query_with_no_values() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .build() diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index 1c84569e7..eee25f908 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -1,5 +1,5 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::transport::session::Session; +use scylla::transport::session::LegacySession; use scylla::SessionBuilder; use scylla::{prepared_statement::PreparedStatement, test_utils::unique_keyspace_name}; use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; @@ -20,7 +20,7 @@ async fn test_skip_result_metadata() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: Session = SessionBuilder::new() + let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .build() @@ -51,7 +51,7 @@ async fn test_skip_result_metadata() { } async fn test_with_flags_predicate( - session: &Session, + session: &LegacySession, prepared: &PreparedStatement, rx: &mut tokio::sync::mpsc::UnboundedReceiver<(ResponseFrame, Option)>, predicate: impl FnOnce(i32) -> bool diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index 2bdf96987..67fb2fd88 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -16,9 +16,7 @@ use scylla::test_utils::unique_keyspace_name; use scylla::transport::ClusterData; use scylla::transport::Node; use scylla::transport::NodeRef; -use scylla::ExecutionProfile; -use scylla::LegacyQueryResult; -use scylla::Session; +use scylla::{ExecutionProfile, LegacyQueryResult, LegacySession}; use scylla::transport::errors::QueryError; use scylla_proxy::{ @@ -42,7 +40,7 @@ struct Tablet { replicas: Vec<(Arc, i32)>, } -async fn get_tablets(session: &Session, ks: &str, table: &str) -> Vec { +async fn get_tablets(session: &LegacySession, ks: &str, table: &str) -> Vec { let cluster_data = session.get_cluster_data(); let endpoints = cluster_data.get_nodes_info(); for endpoint in endpoints.iter() { @@ -181,7 +179,7 @@ impl LoadBalancingPolicy for SingleTargetLBP { } async fn send_statement_everywhere( - session: &Session, + session: &LegacySession, cluster: &ClusterData, statement: &PreparedStatement, values: &dyn SerializeRow, @@ -207,7 +205,7 @@ async fn send_statement_everywhere( } async fn send_unprepared_query_everywhere( - session: &Session, + session: &LegacySession, cluster: &ClusterData, query: &Query, ) -> Result, QueryError> { @@ -249,7 +247,7 @@ fn count_tablet_feedbacks( .count() } -async fn prepare_schema(session: &Session, ks: &str, table: &str, tablet_count: usize) { +async fn prepare_schema(session: &LegacySession, ks: &str, table: &str, tablet_count: usize) { session .query_unpaged( format!( From ed572553c78bf0df3ca27b42163746bbf94945b2 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Thu, 16 Mar 2023 18:19:06 +0100 Subject: [PATCH 04/25] session: make generic and introduce "session kind" parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The LegacySession and the upcoming Session will differ on a small number of methods, but otherwise will share remaining ones. In order to reduce boilerplate the (Legacy)Session is converted into a generic, with a type parameter indicating the kind of the API it supports (legacy or the current one). The common methods will be implemented for GenericSession for any K, and methods specific to the particular kind will only be implemented for GenericSession for that particular K. Co-authored-by: Wojciech Przytuła --- scylla/src/transport/session.rs | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 42a959684..9bd18bd05 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -27,6 +27,7 @@ use std::borrow::Borrow; use std::collections::HashMap; use std::fmt::Display; use std::future::Future; +use std::marker::PhantomData; use std::net::SocketAddr; use std::num::NonZeroU32; use std::str::FromStr; @@ -85,6 +86,14 @@ use crate::authentication::AuthenticatorProvider; #[cfg(feature = "ssl")] use openssl::ssl::SslContext; +mod sealed { + // This is a sealed trait - its whole purpose is to be unnameable. + // This means we need to disable the check. + #[allow(unknown_lints)] // Rust 1.70 (our MSRV) doesn't know this lint + #[allow(unnameable_types)] + pub trait Sealed {} +} + pub(crate) const TABLET_CHANNEL_SIZE: usize = 8192; const TRACING_QUERY_PAGE_SIZE: i32 = 1024; @@ -154,8 +163,17 @@ impl AddressTranslator for HashMap<&'static str, &'static str> { } } +pub trait DeserializationApiKind: sealed::Sealed {} + +pub enum LegacyDeserializationApi {} +impl sealed::Sealed for LegacyDeserializationApi {} +impl DeserializationApiKind for LegacyDeserializationApi {} + /// `Session` manages connections to the cluster and allows to perform queries -pub struct LegacySession { +pub struct GenericSession +where + DeserializationApi: DeserializationApiKind, +{ cluster: Cluster, default_execution_profile_handle: ExecutionProfileHandle, schema_agreement_interval: Duration, @@ -167,11 +185,17 @@ pub struct LegacySession { tracing_info_fetch_attempts: NonZeroU32, tracing_info_fetch_interval: Duration, tracing_info_fetch_consistency: Consistency, + _phantom_deser_api: PhantomData, } +pub type LegacySession = GenericSession; + /// This implementation deliberately omits some details from Cluster in order /// to avoid cluttering the print with much information of little usability. -impl std::fmt::Debug for LegacySession { +impl std::fmt::Debug for GenericSession +where + DeserApi: DeserializationApiKind, +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Session") .field("cluster", &ClusterNeatDebug(&self.cluster)) @@ -458,7 +482,7 @@ impl LegacySession { /// # Ok(()) /// # } /// ``` - pub async fn connect(config: SessionConfig) -> Result { + pub async fn connect(config: SessionConfig) -> Result { let known_nodes = config.known_nodes; #[cfg(feature = "cloud")] @@ -532,7 +556,7 @@ impl LegacySession { let default_execution_profile_handle = config.default_execution_profile_handle; - let session = LegacySession { + let session = Self { cluster, default_execution_profile_handle, schema_agreement_interval: config.schema_agreement_interval, @@ -545,6 +569,7 @@ impl LegacySession { tracing_info_fetch_attempts: config.tracing_info_fetch_attempts, tracing_info_fetch_interval: config.tracing_info_fetch_interval, tracing_info_fetch_consistency: config.tracing_info_fetch_consistency, + _phantom_deser_api: PhantomData, }; if let Some(keyspace_name) = config.used_keyspace { From 8e36957e257b716e7924a9bf72df973da6e213a0 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Thu, 16 Mar 2023 18:20:10 +0100 Subject: [PATCH 05/25] session: move query-related methods to a separate block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both Session and LegacySession will support methods that allow sending queries/prepared statements/batches and will share most of the implementation - it's just that return types will be slightly different. This commit moves the core of those methods to private methods `do_xyz` for every `xyz` method from the API. This will allow to implement the public methods for both API kinds with minimal boilerplate. Co-authored-by: Wojciech Przytuła --- scylla/src/transport/session.rs | 785 +++++++++++++++++--------------- 1 file changed, 424 insertions(+), 361 deletions(-) diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 9bd18bd05..ef39cab26 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -457,130 +457,7 @@ pub(crate) enum RunQueryResult { Completed(ResT), } -/// Represents a CQL session, which can be used to communicate -/// with the database -impl LegacySession { - /// Estabilishes a CQL session with the database - /// - /// Usually it's easier to use [SessionBuilder](crate::transport::session_builder::SessionBuilder) - /// instead of calling `Session::connect` directly, because it's more convenient. - /// # Arguments - /// * `config` - Connection configuration - known nodes, Compression, etc. - /// Must contain at least one known node. - /// - /// # Example - /// ```rust - /// # use std::error::Error; - /// # async fn check_only_compiles() -> Result<(), Box> { - /// use scylla::{LegacySession, SessionConfig}; - /// use scylla::transport::KnownNode; - /// - /// let mut config = SessionConfig::new(); - /// config.known_nodes.push(KnownNode::Hostname("127.0.0.1:9042".to_string())); - /// - /// let session: LegacySession = LegacySession::connect(config).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn connect(config: SessionConfig) -> Result { - let known_nodes = config.known_nodes; - - #[cfg(feature = "cloud")] - let cloud_known_nodes: Option> = - if let Some(ref cloud_config) = config.cloud_config { - let cloud_servers = cloud_config - .get_datacenters() - .iter() - .map(|(dc_name, dc_data)| { - InternalKnownNode::CloudEndpoint(CloudEndpoint { - hostname: dc_data.get_server().to_owned(), - datacenter: dc_name.clone(), - }) - }) - .collect(); - Some(cloud_servers) - } else { - None - }; - - #[cfg(not(feature = "cloud"))] - let cloud_known_nodes: Option> = None; - - let known_nodes = cloud_known_nodes - .unwrap_or_else(|| known_nodes.into_iter().map(|node| node.into()).collect()); - - // Ensure there is at least one known node - if known_nodes.is_empty() { - return Err(NewSessionError::EmptyKnownNodesList); - } - - let (tablet_sender, tablet_receiver) = tokio::sync::mpsc::channel(TABLET_CHANNEL_SIZE); - - let connection_config = ConnectionConfig { - compression: config.compression, - tcp_nodelay: config.tcp_nodelay, - tcp_keepalive_interval: config.tcp_keepalive_interval, - #[cfg(feature = "ssl")] - ssl_config: config.ssl_context.map(SslConfig::new_with_global_context), - authenticator: config.authenticator.clone(), - connect_timeout: config.connect_timeout, - event_sender: None, - default_consistency: Default::default(), - address_translator: config.address_translator, - #[cfg(feature = "cloud")] - cloud_config: config.cloud_config, - enable_write_coalescing: config.enable_write_coalescing, - keepalive_interval: config.keepalive_interval, - keepalive_timeout: config.keepalive_timeout, - tablet_sender: Some(tablet_sender), - identity: config.identity, - }; - - let pool_config = PoolConfig { - connection_config, - pool_size: config.connection_pool_size, - can_use_shard_aware_port: !config.disallow_shard_aware_port, - keepalive_interval: config.keepalive_interval, - }; - - let cluster = Cluster::new( - known_nodes, - pool_config, - config.keyspaces_to_fetch, - config.fetch_schema_metadata, - config.host_filter, - config.cluster_metadata_refresh_interval, - tablet_receiver, - ) - .await?; - - let default_execution_profile_handle = config.default_execution_profile_handle; - - let session = Self { - cluster, - default_execution_profile_handle, - schema_agreement_interval: config.schema_agreement_interval, - metrics: Arc::new(Metrics::new()), - schema_agreement_timeout: config.schema_agreement_timeout, - schema_agreement_automatic_waiting: config.schema_agreement_automatic_waiting, - refresh_metadata_on_auto_schema_agreement: config - .refresh_metadata_on_auto_schema_agreement, - keyspace_name: ArcSwapOption::default(), // will be set by use_keyspace - tracing_info_fetch_attempts: config.tracing_info_fetch_attempts, - tracing_info_fetch_interval: config.tracing_info_fetch_interval, - tracing_info_fetch_consistency: config.tracing_info_fetch_consistency, - _phantom_deser_api: PhantomData, - }; - - if let Some(keyspace_name) = config.used_keyspace { - session - .use_keyspace(keyspace_name, config.keyspace_case_sensitive) - .await?; - } - - Ok(session) - } - +impl GenericSession { /// Sends a request to the database and receives a response.\ /// Performs an unpaged query, i.e. all results are received in a single response. /// @@ -645,15 +522,7 @@ impl LegacySession { query: impl Into, values: impl SerializeRow, ) -> Result { - let query = query.into(); - let (result, paging_state_response) = self - .query(&query, values, None, PagingState::start()) - .await?; - if !paging_state_response.finished() { - error!("Unpaged unprepared query returned a non-empty paging state! This is a driver-side or server-side bug."); - return Err(ProtocolError::NonfinishedPagingState.into()); - } - Ok(result) + self.do_query_unpaged(&query.into(), values).await } /// Queries a single page from the database, optionally continuing from a saved point. @@ -706,13 +575,425 @@ impl LegacySession { /// ``` pub async fn query_single_page( &self, - query: impl Into, + query: impl Into, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + self.do_query_single_page(&query.into(), values, paging_state) + .await + } + + /// Run an unprepared query with paging\ + /// This method will query all pages of the result\ + /// + /// Returns an async iterator (stream) over all received rows\ + /// Page size can be specified in the [Query] passed to the function + /// + /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` + /// trait returns false). In such case, query first needs to be prepared (on a single connection), so + /// driver will initially perform 2 round trips instead of 1. Please use [`Session::execute_iter()`] instead. + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. + /// + /// # Arguments + /// * `query` - statement to be executed, can be just a `&str` or the [Query] struct. + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values. + /// + /// # Example + /// + /// ```rust + /// # use scylla::LegacySession; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// use scylla::IntoTypedRows; + /// use futures::stream::StreamExt; + /// + /// let mut rows_stream = session + /// .query_iter("SELECT a, b FROM ks.t", &[]) + /// .await? + /// .into_typed::<(i32, i32)>(); + /// + /// while let Some(next_row_res) = rows_stream.next().await { + /// let (a, b): (i32, i32) = next_row_res?; + /// println!("a, b: {}, {}", a, b); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn query_iter( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_query_iter(query, values).await + } + + /// Execute a prepared statement. Requires a [PreparedStatement] + /// generated using [`Session::prepare`](Session::prepare).\ + /// Performs an unpaged query, i.e. all results are received in a single response. + /// + /// As all results come in one response (no paging is done!), the memory footprint and latency may be huge + /// for statements returning rows (i.e. SELECTs)! Prefer this method for non-SELECTs, and for SELECTs + /// it is best to use paged queries: + /// - to receive multiple pages and transparently iterate through them, use [execute_iter](Session::execute_iter). + /// - to manually receive multiple pages and iterate through them, use [execute_single_page](Session::execute_single_page). + /// + /// Prepared queries are much faster than simple queries: + /// * Database doesn't need to parse the query + /// * They are properly load balanced using token aware routing + /// + /// > ***Warning***\ + /// > For token/shard aware load balancing to work properly, all partition key values + /// > must be sent as bound values + /// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance)). + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information. + /// + /// # Arguments + /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values + /// + /// # Example + /// ```rust + /// # use scylla::LegacySession; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// use scylla::prepared_statement::PreparedStatement; + /// + /// // Prepare the query for later execution + /// let prepared: PreparedStatement = session + /// .prepare("INSERT INTO ks.tab (a) VALUES(?)") + /// .await?; + /// + /// // Run the prepared query with some values, just like a simple query. + /// let to_insert: i32 = 12345; + /// session.execute_unpaged(&prepared, (to_insert,)).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn execute_unpaged( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + ) -> Result { + self.do_execute_unpaged(prepared, values).await + } + + /// Executes a prepared statement, restricting results to single page. + /// Optionally continues fetching results from a saved point. + /// + /// # Arguments + /// + /// * `prepared` - a statement prepared with [prepare](crate::Session::prepare) + /// * `values` - values bound to the query + /// * `paging_state` - continuation based on a paging state received from a previous paged query or None + /// + /// # Example + /// + /// ```rust + /// # use scylla::LegacySession; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// use std::ops::ControlFlow; + /// use scylla::query::Query; + /// use scylla::statement::{PagingState, PagingStateResponse}; + /// + /// let paged_prepared = session + /// .prepare( + /// Query::new("SELECT a, b FROM ks.tbl") + /// .with_page_size(100.try_into().unwrap()), + /// ) + /// .await?; + /// + /// // Manual paging in a loop, prepared statement. + /// let mut paging_state = PagingState::start(); + /// loop { + /// let (res, paging_state_response) = session + /// .execute_single_page(&paged_prepared, &[], paging_state) + /// .await?; + /// + /// // Do something with a single page of results. + /// for row in res.rows_typed::<(i32, String)>()? { + /// let (a, b) = row?; + /// } + /// + /// match paging_state_response.into_paging_control_flow() { + /// ControlFlow::Break(()) => { + /// // No more pages to be fetched. + /// break; + /// } + /// ControlFlow::Continue(new_paging_state) => { + /// // Update paging continuation from the paging state, so that query + /// // will be resumed from where it ended the last time. + /// paging_state = new_paging_state; + /// } + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn execute_single_page( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + self.do_execute_single_page(prepared, values, paging_state) + .await + } + + /// Run a prepared query with paging.\ + /// This method will query all pages of the result.\ + /// + /// Returns an async iterator (stream) over all received rows.\ + /// Page size can be specified in the [PreparedStatement] passed to the function. + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. + /// + /// # Arguments + /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) + /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values + /// + /// # Example + /// + /// ```rust + /// # use scylla::LegacySession; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// use scylla::prepared_statement::PreparedStatement; + /// use scylla::IntoTypedRows; + /// use futures::stream::StreamExt; + /// + /// // Prepare the query for later execution + /// let prepared: PreparedStatement = session + /// .prepare("SELECT a, b FROM ks.t") + /// .await?; + /// + /// // Execute the query and receive all pages + /// let mut rows_stream = session + /// .execute_iter(prepared, &[]) + /// .await? + /// .into_typed::<(i32, i32)>(); + /// + /// while let Some(next_row_res) = rows_stream.next().await { + /// let (a, b): (i32, i32) = next_row_res?; + /// println!("a, b: {}, {}", a, b); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn execute_iter( + &self, + prepared: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_execute_iter(prepared, values).await + } + + /// Perform a batch query\ + /// Batch contains many `simple` or `prepared` queries which are executed at once\ + /// Batch doesn't return any rows + /// + /// Batch values must contain values for each of the queries + /// + /// Avoid using non-empty values (`SerializeRow::is_empty()` return false) for simple queries + /// inside the batch. Such queries will first need to be prepared, so the driver will need to + /// send (numer_of_unprepared_queries_with_values + 1) requests instead of 1 request, severly + /// affecting performance. + /// + /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information + /// + /// # Arguments + /// * `batch` - [Batch] to be performed + /// * `values` - List of values for each query, it's the easiest to use a tuple of tuples + /// + /// # Example + /// ```rust + /// # use scylla::LegacySession; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// use scylla::batch::Batch; + /// + /// let mut batch: Batch = Default::default(); + /// + /// // A query with two bound values + /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(?, ?)"); + /// + /// // A query with one bound value + /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(3, ?)"); + /// + /// // A query with no bound values + /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(5, 6)"); + /// + /// // Batch values is a tuple of 3 tuples containing values for each query + /// let batch_values = ((1_i32, 2_i32), // Tuple with two values for the first query + /// (4_i32,), // Tuple with one value for the second query + /// ()); // Empty tuple/unit for the third query + /// + /// // Run the batch + /// session.batch(&batch, batch_values).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn batch( + &self, + batch: &Batch, + values: impl BatchValues, + ) -> Result { + self.do_batch(batch, values).await + } +} + +/// Represents a CQL session, which can be used to communicate +/// with the database +impl GenericSession +where + DeserApi: DeserializationApiKind, +{ + /// Estabilishes a CQL session with the database + /// + /// Usually it's easier to use [SessionBuilder](crate::transport::session_builder::SessionBuilder) + /// instead of calling `Session::connect` directly, because it's more convenient. + /// # Arguments + /// * `config` - Connection configuration - known nodes, Compression, etc. + /// Must contain at least one known node. + /// + /// # Example + /// ```rust + /// # use std::error::Error; + /// # async fn check_only_compiles() -> Result<(), Box> { + /// use scylla::{LegacySession, SessionConfig}; + /// use scylla::transport::KnownNode; + /// + /// let mut config = SessionConfig::new(); + /// config.known_nodes.push(KnownNode::Hostname("127.0.0.1:9042".to_string())); + /// + /// let session: LegacySession = LegacySession::connect(config).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn connect(config: SessionConfig) -> Result { + let known_nodes = config.known_nodes; + + #[cfg(feature = "cloud")] + let cloud_known_nodes: Option> = + if let Some(ref cloud_config) = config.cloud_config { + let cloud_servers = cloud_config + .get_datacenters() + .iter() + .map(|(dc_name, dc_data)| { + InternalKnownNode::CloudEndpoint(CloudEndpoint { + hostname: dc_data.get_server().to_owned(), + datacenter: dc_name.clone(), + }) + }) + .collect(); + Some(cloud_servers) + } else { + None + }; + + #[cfg(not(feature = "cloud"))] + let cloud_known_nodes: Option> = None; + + let known_nodes = cloud_known_nodes + .unwrap_or_else(|| known_nodes.into_iter().map(|node| node.into()).collect()); + + // Ensure there is at least one known node + if known_nodes.is_empty() { + return Err(NewSessionError::EmptyKnownNodesList); + } + + let (tablet_sender, tablet_receiver) = tokio::sync::mpsc::channel(TABLET_CHANNEL_SIZE); + + let connection_config = ConnectionConfig { + compression: config.compression, + tcp_nodelay: config.tcp_nodelay, + tcp_keepalive_interval: config.tcp_keepalive_interval, + #[cfg(feature = "ssl")] + ssl_config: config.ssl_context.map(SslConfig::new_with_global_context), + authenticator: config.authenticator.clone(), + connect_timeout: config.connect_timeout, + event_sender: None, + default_consistency: Default::default(), + address_translator: config.address_translator, + #[cfg(feature = "cloud")] + cloud_config: config.cloud_config, + enable_write_coalescing: config.enable_write_coalescing, + keepalive_interval: config.keepalive_interval, + keepalive_timeout: config.keepalive_timeout, + tablet_sender: Some(tablet_sender), + identity: config.identity, + }; + + let pool_config = PoolConfig { + connection_config, + pool_size: config.connection_pool_size, + can_use_shard_aware_port: !config.disallow_shard_aware_port, + keepalive_interval: config.keepalive_interval, + }; + + let cluster = Cluster::new( + known_nodes, + pool_config, + config.keyspaces_to_fetch, + config.fetch_schema_metadata, + config.host_filter, + config.cluster_metadata_refresh_interval, + tablet_receiver, + ) + .await?; + + let default_execution_profile_handle = config.default_execution_profile_handle; + + let session = Self { + cluster, + default_execution_profile_handle, + schema_agreement_interval: config.schema_agreement_interval, + metrics: Arc::new(Metrics::new()), + schema_agreement_timeout: config.schema_agreement_timeout, + schema_agreement_automatic_waiting: config.schema_agreement_automatic_waiting, + refresh_metadata_on_auto_schema_agreement: config + .refresh_metadata_on_auto_schema_agreement, + keyspace_name: ArcSwapOption::default(), // will be set by use_keyspace + tracing_info_fetch_attempts: config.tracing_info_fetch_attempts, + tracing_info_fetch_interval: config.tracing_info_fetch_interval, + tracing_info_fetch_consistency: config.tracing_info_fetch_consistency, + _phantom_deser_api: PhantomData, + }; + + if let Some(keyspace_name) = config.used_keyspace { + session + .use_keyspace(keyspace_name, config.keyspace_case_sensitive) + .await?; + } + + Ok(session) + } + + async fn do_query_unpaged( + &self, + query: &Query, + values: impl SerializeRow, + ) -> Result { + let (result, paging_state_response) = self + .query(query, values, None, PagingState::start()) + .await?; + if !paging_state_response.finished() { + error!("Unpaged unprepared query returned a non-empty paging state! This is a driver-side or server-side bug."); + return Err(ProtocolError::NonfinishedPagingState.into()); + } + Ok(result) + } + + async fn do_query_single_page( + &self, + query: &Query, values: impl SerializeRow, paging_state: PagingState, ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { - let query = query.into(); self.query( - &query, + query, values, Some(query.get_validated_page_size()), paging_state, @@ -864,44 +1145,7 @@ impl LegacySession { Ok(()) } - /// Run an unprepared query with paging\ - /// This method will query all pages of the result\ - /// - /// Returns an async iterator (stream) over all received rows\ - /// Page size can be specified in the [Query] passed to the function - /// - /// It is discouraged to use this method with non-empty values argument (`is_empty()` method from `SerializeRow` - /// trait returns false). In such case, query first needs to be prepared (on a single connection), so - /// driver will initially perform 2 round trips instead of 1. Please use [`Session::execute_iter()`] instead. - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. - /// - /// # Arguments - /// * `query` - statement to be executed, can be just a `&str` or the [Query] struct. - /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values. - /// - /// # Example - /// - /// ```rust - /// # use scylla::LegacySession; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { - /// use scylla::IntoTypedRows; - /// use futures::stream::StreamExt; - /// - /// let mut rows_stream = session - /// .query_iter("SELECT a, b FROM ks.t", &[]) - /// .await? - /// .into_typed::<(i32, i32)>(); - /// - /// while let Some(next_row_res) = rows_stream.next().await { - /// let (a, b): (i32, i32) = next_row_res?; - /// println!("a, b: {}, {}", a, b); - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn query_iter( + async fn do_query_iter( &self, query: impl Into, values: impl SerializeRow, @@ -1032,50 +1276,7 @@ impl LegacySession { .as_deref() } - /// Execute a prepared statement. Requires a [PreparedStatement] - /// generated using [`Session::prepare`](Session::prepare).\ - /// Performs an unpaged query, i.e. all results are received in a single response. - /// - /// As all results come in one response (no paging is done!), the memory footprint and latency may be huge - /// for statements returning rows (i.e. SELECTs)! Prefer this method for non-SELECTs, and for SELECTs - /// it is best to use paged queries: - /// - to receive multiple pages and transparently iterate through them, use [execute_iter](Session::execute_iter). - /// - to manually receive multiple pages and iterate through them, use [execute_single_page](Session::execute_single_page). - /// - /// Prepared queries are much faster than simple queries: - /// * Database doesn't need to parse the query - /// * They are properly load balanced using token aware routing - /// - /// > ***Warning***\ - /// > For token/shard aware load balancing to work properly, all partition key values - /// > must be sent as bound values - /// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance)). - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information. - /// - /// # Arguments - /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) - /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values - /// - /// # Example - /// ```rust - /// # use scylla::LegacySession; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { - /// use scylla::prepared_statement::PreparedStatement; - /// - /// // Prepare the query for later execution - /// let prepared: PreparedStatement = session - /// .prepare("INSERT INTO ks.tab (a) VALUES(?)") - /// .await?; - /// - /// // Run the prepared query with some values, just like a simple query. - /// let to_insert: i32 = 12345; - /// session.execute_unpaged(&prepared, (to_insert,)).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn execute_unpaged( + async fn do_execute_unpaged( &self, prepared: &PreparedStatement, values: impl SerializeRow, @@ -1091,60 +1292,7 @@ impl LegacySession { Ok(result) } - /// Executes a prepared statement, restricting results to single page. - /// Optionally continues fetching results from a saved point. - /// - /// # Arguments - /// - /// * `prepared` - a statement prepared with [prepare](crate::Session::prepare) - /// * `values` - values bound to the query - /// * `paging_state` - continuation based on a paging state received from a previous paged query or None - /// - /// # Example - /// - /// ```rust - /// # use scylla::LegacySession; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { - /// use std::ops::ControlFlow; - /// use scylla::query::Query; - /// use scylla::statement::{PagingState, PagingStateResponse}; - /// - /// let paged_prepared = session - /// .prepare( - /// Query::new("SELECT a, b FROM ks.tbl") - /// .with_page_size(100.try_into().unwrap()), - /// ) - /// .await?; - /// - /// // Manual paging in a loop, prepared statement. - /// let mut paging_state = PagingState::start(); - /// loop { - /// let (res, paging_state_response) = session - /// .execute_single_page(&paged_prepared, &[], paging_state) - /// .await?; - /// - /// // Do something with a single page of results. - /// for row in res.rows_typed::<(i32, String)>()? { - /// let (a, b) = row?; - /// } - /// - /// match paging_state_response.into_paging_control_flow() { - /// ControlFlow::Break(()) => { - /// // No more pages to be fetched. - /// break; - /// } - /// ControlFlow::Continue(new_paging_state) => { - /// // Update paging continuation from the paging state, so that query - /// // will be resumed from where it ended the last time. - /// paging_state = new_paging_state; - /// } - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn execute_single_page( + async fn do_execute_single_page( &self, prepared: &PreparedStatement, values: impl SerializeRow, @@ -1267,47 +1415,7 @@ impl LegacySession { Ok((result, paging_state)) } - /// Run a prepared query with paging.\ - /// This method will query all pages of the result.\ - /// - /// Returns an async iterator (stream) over all received rows.\ - /// Page size can be specified in the [PreparedStatement] passed to the function. - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information. - /// - /// # Arguments - /// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare) - /// * `values` - values bound to the query, the easiest way is to use a tuple of bound values - /// - /// # Example - /// - /// ```rust - /// # use scylla::LegacySession; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { - /// use scylla::prepared_statement::PreparedStatement; - /// use scylla::IntoTypedRows; - /// use futures::stream::StreamExt; - /// - /// // Prepare the query for later execution - /// let prepared: PreparedStatement = session - /// .prepare("SELECT a, b FROM ks.t") - /// .await?; - /// - /// // Execute the query and receive all pages - /// let mut rows_stream = session - /// .execute_iter(prepared, &[]) - /// .await? - /// .into_typed::<(i32, i32)>(); - /// - /// while let Some(next_row_res) = rows_stream.next().await { - /// let (a, b): (i32, i32) = next_row_res?; - /// println!("a, b: {}, {}", a, b); - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn execute_iter( + async fn do_execute_iter( &self, prepared: impl Into, values: impl SerializeRow, @@ -1331,52 +1439,7 @@ impl LegacySession { .map(QueryPager::into_legacy) } - /// Perform a batch request.\ - /// Batch contains many `simple` or `prepared` queries which are executed at once.\ - /// Batch doesn't return any rows. - /// - /// Batch values must contain values for each of the queries. - /// - /// Avoid using non-empty values (`SerializeRow::is_empty()` return false) for unprepared statements - /// inside the batch. Such statements will first need to be prepared, so the driver will need to - /// send (numer_of_unprepared_statements_with_values + 1) requests instead of 1 request, severly - /// affecting performance. - /// - /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information. - /// - /// # Arguments - /// * `batch` - [Batch] to be performed - /// * `values` - List of values for each query, it's the easiest to use a tuple of tuples - /// - /// # Example - /// ```rust - /// # use scylla::LegacySession; - /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { - /// use scylla::batch::Batch; - /// - /// let mut batch: Batch = Default::default(); - /// - /// // A query with two bound values - /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(?, ?)"); - /// - /// // A query with one bound value - /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(3, ?)"); - /// - /// // A query with no bound values - /// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(5, 6)"); - /// - /// // Batch values is a tuple of 3 tuples containing values for each query - /// let batch_values = ((1_i32, 2_i32), // Tuple with two values for the first query - /// (4_i32,), // Tuple with one value for the second query - /// ()); // Empty tuple/unit for the third query - /// - /// // Run the batch - /// session.batch(&batch, batch_values).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn batch( + async fn do_batch( &self, batch: &Batch, values: impl BatchValues, @@ -1646,8 +1709,8 @@ impl LegacySession { traces_events_query.set_page_size(TRACING_QUERY_PAGE_SIZE); let (traces_session_res, traces_events_res) = tokio::try_join!( - self.query_unpaged(traces_session_query, (tracing_id,)), - self.query_unpaged(traces_events_query, (tracing_id,)) + self.do_query_unpaged(&traces_session_query, (tracing_id,)), + self.do_query_unpaged(&traces_events_query, (tracing_id,)) )?; // Get tracing info From 6daf83341f5f639d2b3cd750e7a1b7c8ff915bde Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Thu, 16 Mar 2023 18:20:29 +0100 Subject: [PATCH 06/25] session: re-introduce the Session type as an alias MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds Session as an alias over GenericSession. No methods (apart from the common ones) are added to it yet. Co-authored-by: Wojciech Przytuła --- scylla/src/lib.rs | 2 +- scylla/src/transport/session.rs | 5 +++++ scylla/src/transport/session_builder.rs | 8 ++++---- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index 0ecdc09f0..52a4ffc74 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -260,7 +260,7 @@ pub use transport::caching_session::CachingSession; pub use transport::execution_profile::ExecutionProfile; pub use transport::legacy_query_result::LegacyQueryResult; pub use transport::query_result::{QueryResult, QueryRowsResult}; -pub use transport::session::{IntoTypedRows, LegacySession, SessionConfig}; +pub use transport::session::{IntoTypedRows, LegacySession, Session, SessionConfig}; pub use transport::session_builder::SessionBuilder; #[cfg(feature = "cloud")] diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index ef39cab26..ad9e6b21d 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -165,6 +165,10 @@ impl AddressTranslator for HashMap<&'static str, &'static str> { pub trait DeserializationApiKind: sealed::Sealed {} +pub enum CurrentDeserializationApi {} +impl sealed::Sealed for CurrentDeserializationApi {} +impl DeserializationApiKind for CurrentDeserializationApi {} + pub enum LegacyDeserializationApi {} impl sealed::Sealed for LegacyDeserializationApi {} impl DeserializationApiKind for LegacyDeserializationApi {} @@ -188,6 +192,7 @@ where _phantom_deser_api: PhantomData, } +pub type Session = GenericSession; pub type LegacySession = GenericSession; /// This implementation deliberately omits some details from Cluster in order diff --git a/scylla/src/transport/session_builder.rs b/scylla/src/transport/session_builder.rs index 15e10e098..da3de24fc 100644 --- a/scylla/src/transport/session_builder.rs +++ b/scylla/src/transport/session_builder.rs @@ -493,7 +493,7 @@ impl GenericSessionBuilder { /// Set keyspace to be used on all connections.\ /// Each connection will send `"USE "` before sending any requests.\ - /// This can be later changed with [`Session::use_keyspace`] + /// This can be later changed with [`crate::Session::use_keyspace`] /// /// # Example /// ``` @@ -815,7 +815,7 @@ impl GenericSessionBuilder { } /// Set the number of attempts to fetch [TracingInfo](crate::tracing::TracingInfo) - /// in [`LegacySession::get_tracing_info`]. + /// in [`Session::get_tracing_info`](crate::Session::get_tracing_info). /// The default is 5 attempts. /// /// Tracing info might not be available immediately on queried node - that's why @@ -844,7 +844,7 @@ impl GenericSessionBuilder { } /// Set the delay between attempts to fetch [TracingInfo](crate::tracing::TracingInfo) - /// in [`LegacySession::get_tracing_info`]. + /// in [`Session::get_tracing_info`](crate::Session::get_tracing_info). /// The default is 3 milliseconds. /// /// Tracing info might not be available immediately on queried node - that's why @@ -873,7 +873,7 @@ impl GenericSessionBuilder { } /// Set the consistency level of fetching [TracingInfo](crate::tracing::TracingInfo) - /// in [`LegacySession::get_tracing_info`]. + /// in [`Session::get_tracing_info`](crate::Session::get_tracing_info). /// The default is [`Consistency::One`]. /// /// # Example From c1416dd3bc7daf7f897f9741c3dda27989d207c7 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Thu, 16 Mar 2023 11:54:43 +0100 Subject: [PATCH 07/25] session_builder: rename build->build_legacy and then reintroduce MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit renames the SessionBuilder::build method to build_legacy, and then reintroduces the build method so that it returns the new Session (not LegacySession). All the examples, tests, documentation will gradually be migrated to use SessionBuilder::build again in following commits. Co-authored-by: Wojciech Przytuła --- examples/allocations.rs | 5 +- examples/auth.rs | 2 +- examples/basic.rs | 2 +- examples/cloud.rs | 2 +- examples/compare-tokens.rs | 2 +- examples/cql-time-types.rs | 2 +- examples/cqlsh-rs.rs | 2 +- examples/custom_deserialization.rs | 2 +- examples/custom_load_balancing_policy.rs | 2 +- examples/execution_profile.rs | 4 +- examples/get_by_name.rs | 2 +- examples/logging.rs | 2 +- examples/logging_log.rs | 2 +- examples/parallel-prepared.rs | 2 +- examples/parallel.rs | 2 +- examples/query_history.rs | 2 +- examples/schema_agreement.rs | 2 +- examples/select-paging.rs | 2 +- examples/speculative-execution.rs | 2 +- examples/tls.rs | 2 +- examples/tower.rs | 2 +- examples/tracing.rs | 4 +- examples/user-defined-type.rs | 2 +- examples/value_list.rs | 2 +- scylla/src/history.rs | 6 +- scylla/src/lib.rs | 2 +- scylla/src/transport/authenticate_test.rs | 4 +- scylla/src/transport/caching_session.rs | 2 +- scylla/src/transport/connection.rs | 4 +- scylla/src/transport/cql_collections_test.rs | 2 +- scylla/src/transport/cql_types_test.rs | 10 +- scylla/src/transport/cql_value_test.rs | 4 +- scylla/src/transport/execution_profile.rs | 4 +- .../transport/large_batch_statements_test.rs | 2 +- .../src/transport/load_balancing/default.rs | 2 +- scylla/src/transport/session.rs | 2 +- scylla/src/transport/session_builder.rs | 114 ++++++++++++------ scylla/src/transport/session_test.rs | 98 ++++++++------- .../transport/silent_prepare_batch_test.rs | 2 +- scylla/tests/integration/consistency.rs | 6 +- .../tests/integration/execution_profiles.rs | 2 +- scylla/tests/integration/lwt_optimisation.rs | 2 +- scylla/tests/integration/new_session.rs | 2 +- scylla/tests/integration/retries.rs | 6 +- scylla/tests/integration/self_identity.rs | 2 +- scylla/tests/integration/shards.rs | 2 +- .../tests/integration/silent_prepare_query.rs | 4 +- .../integration/skip_metadata_optimization.rs | 2 +- scylla/tests/integration/tablets.rs | 6 +- 49 files changed, 192 insertions(+), 155 deletions(-) diff --git a/examples/allocations.rs b/examples/allocations.rs index 039d21e01..f87f7641f 100644 --- a/examples/allocations.rs +++ b/examples/allocations.rs @@ -128,7 +128,10 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", args.node); - let session: LegacySession = SessionBuilder::new().known_node(args.node).build().await?; + let session: LegacySession = SessionBuilder::new() + .known_node(args.node) + .build_legacy() + .await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/auth.rs b/examples/auth.rs index 22fbee007..ded1115f3 100644 --- a/examples/auth.rs +++ b/examples/auth.rs @@ -10,7 +10,7 @@ async fn main() -> Result<()> { let session = SessionBuilder::new() .known_node(uri) .user("cassandra", "cassandra") - .build() + .build_legacy() .await .unwrap(); diff --git a/examples/basic.rs b/examples/basic.rs index 48d97b713..ad8570db5 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/cloud.rs b/examples/cloud.rs index 63265e41f..5859ef12e 100644 --- a/examples/cloud.rs +++ b/examples/cloud.rs @@ -12,7 +12,7 @@ async fn main() -> Result<()> { .unwrap_or("examples/config_data.yaml".to_owned()); let session = CloudSessionBuilder::new(Path::new(&config_path)) .unwrap() - .build() + .build_legacy() .await .unwrap(); diff --git a/examples/compare-tokens.rs b/examples/compare-tokens.rs index e302b9f83..4863608ff 100644 --- a/examples/compare-tokens.rs +++ b/examples/compare-tokens.rs @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/cql-time-types.rs b/examples/cql-time-types.rs index 8a8cedb66..1b9e475d4 100644 --- a/examples/cql-time-types.rs +++ b/examples/cql-time-types.rs @@ -16,7 +16,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index 0b9cd5a18..a4371909a 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -202,7 +202,7 @@ async fn main() -> Result<()> { let session: LegacySession = SessionBuilder::new() .known_node(uri) .compression(Some(Compression::Lz4)) - .build() + .build_legacy() .await?; let config = Config::builder() diff --git a/examples/custom_deserialization.rs b/examples/custom_deserialization.rs index 976afe468..7bd694c81 100644 --- a/examples/custom_deserialization.rs +++ b/examples/custom_deserialization.rs @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session diff --git a/examples/custom_load_balancing_policy.rs b/examples/custom_load_balancing_policy.rs index 9fa505384..e70ed0213 100644 --- a/examples/custom_load_balancing_policy.rs +++ b/examples/custom_load_balancing_policy.rs @@ -71,7 +71,7 @@ async fn main() -> Result<()> { let _session: LegacySession = SessionBuilder::new() .known_node(uri) .default_execution_profile_handle(profile.into_handle()) - .build() + .build_legacy() .await?; Ok(()) diff --git a/examples/execution_profile.rs b/examples/execution_profile.rs index 944245660..46ae8e03f 100644 --- a/examples/execution_profile.rs +++ b/examples/execution_profile.rs @@ -45,13 +45,13 @@ async fn main() -> Result<()> { let session1: LegacySession = SessionBuilder::new() .known_node(&uri) .default_execution_profile_handle(handle1.clone()) - .build() + .build_legacy() .await?; let session2: LegacySession = SessionBuilder::new() .known_node(&uri) .default_execution_profile_handle(handle2.clone()) - .build() + .build_legacy() .await?; // As default execution profile is not provided explicitly, session 3 uses a predefined one. diff --git a/examples/get_by_name.rs b/examples/get_by_name.rs index 2f3996e5e..a0a21b855 100644 --- a/examples/get_by_name.rs +++ b/examples/get_by_name.rs @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/logging.rs b/examples/logging.rs index 00071c4cd..37e534b8c 100644 --- a/examples/logging.rs +++ b/examples/logging.rs @@ -16,7 +16,7 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); info!("Connecting to {}", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session.query_unpaged("USE examples_ks", &[]).await?; diff --git a/examples/logging_log.rs b/examples/logging_log.rs index 9e8c81687..a1f962419 100644 --- a/examples/logging_log.rs +++ b/examples/logging_log.rs @@ -18,7 +18,7 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); info!("Connecting to {}", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session.query_unpaged("USE examples_ks", &[]).await?; diff --git a/examples/parallel-prepared.rs b/examples/parallel-prepared.rs index e848b305c..531f6d7b4 100644 --- a/examples/parallel-prepared.rs +++ b/examples/parallel-prepared.rs @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/parallel.rs b/examples/parallel.rs index 3cf191661..5e3f119fb 100644 --- a/examples/parallel.rs +++ b/examples/parallel.rs @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/query_history.rs b/examples/query_history.rs index 61ea56723..710f9616d 100644 --- a/examples/query_history.rs +++ b/examples/query_history.rs @@ -15,7 +15,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/schema_agreement.rs b/examples/schema_agreement.rs index bfc5ed01b..9b9369ac9 100644 --- a/examples/schema_agreement.rs +++ b/examples/schema_agreement.rs @@ -16,7 +16,7 @@ async fn main() -> Result<()> { let session: LegacySession = SessionBuilder::new() .known_node(uri) .schema_agreement_interval(Duration::from_secs(1)) // check every second for schema agreement if not agreed first check - .build() + .build_legacy() .await?; let schema_version = session.await_schema_agreement().await?; diff --git a/examples/select-paging.rs b/examples/select-paging.rs index 36d40f62d..f9027675a 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/speculative-execution.rs b/examples/speculative-execution.rs index 13513c1d9..c53285cac 100644 --- a/examples/speculative-execution.rs +++ b/examples/speculative-execution.rs @@ -23,7 +23,7 @@ async fn main() -> Result<()> { let session: LegacySession = SessionBuilder::new() .known_node(uri) .default_execution_profile_handle(speculative_profile.into_handle()) - .build() + .build_legacy() .await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/tls.rs b/examples/tls.rs index c41e5e7f9..1bb354e56 100644 --- a/examples/tls.rs +++ b/examples/tls.rs @@ -47,7 +47,7 @@ async fn main() -> Result<()> { let session: LegacySession = SessionBuilder::new() .known_node(uri) .ssl_context(Some(context_builder.build())) - .build() + .build_legacy() .await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/tower.rs b/examples/tower.rs index 5f89890fc..0b6085e00 100644 --- a/examples/tower.rs +++ b/examples/tower.rs @@ -35,7 +35,7 @@ async fn main() -> anyhow::Result<()> { session: Arc::new( scylla::SessionBuilder::new() .known_node(uri) - .build() + .build_legacy() .await?, ), }; diff --git a/examples/tracing.rs b/examples/tracing.rs index 2ce7b2e61..435e356c7 100644 --- a/examples/tracing.rs +++ b/examples/tracing.rs @@ -23,7 +23,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); let session: LegacySession = SessionBuilder::new() .known_node(uri.as_str()) - .build() + .build_legacy() .await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -117,7 +117,7 @@ async fn main() -> Result<()> { .tracing_info_fetch_attempts(NonZeroU32::new(8).unwrap()) .tracing_info_fetch_interval(Duration::from_millis(100)) .tracing_info_fetch_consistency(Consistency::One) - .build() + .build_legacy() .await?; let _custom_info: TracingInfo = session.get_tracing_info(&query_tracing_id).await?; diff --git a/examples/user-defined-type.rs b/examples/user-defined-type.rs index e8be4b2f9..9e01586a7 100644 --- a/examples/user-defined-type.rs +++ b/examples/user-defined-type.rs @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/value_list.rs b/examples/value_list.rs index e72d488b4..ce997b70e 100644 --- a/examples/value_list.rs +++ b/examples/value_list.rs @@ -8,7 +8,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build().await?; + let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/scylla/src/history.rs b/scylla/src/history.rs index d4693163e..62ee1ad4d 100644 --- a/scylla/src/history.rs +++ b/scylla/src/history.rs @@ -917,7 +917,7 @@ mod tests { #[tokio::test] async fn successful_query_history() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let mut query = Query::new("SELECT * FROM system.local"); let history_collector = Arc::new(HistoryCollector::new()); @@ -984,7 +984,7 @@ mod tests { #[tokio::test] async fn failed_query_history() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let mut query = Query::new("This isnt even CQL"); let history_collector = Arc::new(HistoryCollector::new()); @@ -1021,7 +1021,7 @@ mod tests { #[tokio::test] async fn iterator_query_history() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index 52a4ffc74..f8f5d4ea9 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -25,7 +25,7 @@ //! let session: LegacySession = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .known_node("1.2.3.4:9876") -//! .build() +//! .build_legacy() //! .await?; //! //! Ok(()) diff --git a/scylla/src/transport/authenticate_test.rs b/scylla/src/transport/authenticate_test.rs index 78e72dea4..75d628ce4 100644 --- a/scylla/src/transport/authenticate_test.rs +++ b/scylla/src/transport/authenticate_test.rs @@ -16,7 +16,7 @@ async fn authenticate_superuser() { let session = crate::SessionBuilder::new() .known_node(uri) .user("cassandra", "cassandra") - .build() + .build_legacy() .await .unwrap(); let ks = unique_keyspace_name(); @@ -75,7 +75,7 @@ async fn custom_authentication() { let session = crate::SessionBuilder::new() .known_node(uri) .authenticator_provider(Arc::new(CustomAuthenticatorProvider)) - .build() + .build_legacy() .await .unwrap(); let ks = unique_keyspace_name(); diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 2b0fcc05e..5b0f920f0 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -234,7 +234,7 @@ mod tests { async fn new_for_test(with_tablet_support: bool) -> LegacySession { let session = create_new_session_builder() - .build() + .build_legacy() .await .expect("Could not create session"); let ks = unique_keyspace_name(); diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 188984393..018f4e75b 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -2443,7 +2443,7 @@ mod tests { // Preparation phase let session = SessionBuilder::new() .known_node_addr(addr) - .build() + .build_legacy() .await .unwrap(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); @@ -2534,7 +2534,7 @@ mod tests { // Preparation phase let session = SessionBuilder::new() .known_node_addr(addr) - .build() + .build_legacy() .await .unwrap(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); diff --git a/scylla/src/transport/cql_collections_test.rs b/scylla/src/transport/cql_collections_test.rs index fe2a8a8d2..6322e6e92 100644 --- a/scylla/src/transport/cql_collections_test.rs +++ b/scylla/src/transport/cql_collections_test.rs @@ -6,7 +6,7 @@ use scylla_cql::types::serialize::value::SerializeValue; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; async fn connect() -> LegacySession { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/src/transport/cql_types_test.rs index 32dd11638..1429b498b 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/src/transport/cql_types_test.rs @@ -23,7 +23,7 @@ async fn init_test_maybe_without_tablets( type_name: &str, supports_tablets: bool, ) -> LegacySession { - let session: LegacySession = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); let mut create_ks = format!( @@ -168,7 +168,7 @@ async fn test_cql_varint() { ]; let table_name = "cql_varint_tests"; - let session: LegacySession = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1234,7 +1234,7 @@ async fn test_timeuuid() { #[tokio::test] async fn test_timeuuid_ordering() { setup_tracing(); - let session: LegacySession = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1466,7 +1466,7 @@ async fn test_udt_after_schema_update() { let table_name = "udt_tests"; let type_name = "usertype1"; - let session: LegacySession = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1638,7 +1638,7 @@ async fn test_udt_with_missing_field() { let table_name = "udt_tests"; let type_name = "usertype1"; - let session: LegacySession = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session diff --git a/scylla/src/transport/cql_value_test.rs b/scylla/src/transport/cql_value_test.rs index be1047ede..d3d2d18e4 100644 --- a/scylla/src/transport/cql_value_test.rs +++ b/scylla/src/transport/cql_value_test.rs @@ -7,7 +7,7 @@ use crate::LegacySession; #[tokio::test] async fn test_cqlvalue_udt() { setup_tracing(); - let session: LegacySession = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session .query_unpaged( @@ -72,7 +72,7 @@ async fn test_cqlvalue_udt() { #[tokio::test] async fn test_cqlvalue_duration() { setup_tracing(); - let session: LegacySession = create_new_session_builder().build().await.unwrap(); + let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session diff --git a/scylla/src/transport/execution_profile.rs b/scylla/src/transport/execution_profile.rs index 2854611b4..421a7da7e 100644 --- a/scylla/src/transport/execution_profile.rs +++ b/scylla/src/transport/execution_profile.rs @@ -30,7 +30,7 @@ //! let session: LegacySession = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .default_execution_profile_handle(handle) -//! .build() +//! .build_legacy() //! .await?; //! # Ok(()) //! # } @@ -128,7 +128,7 @@ //! let session: LegacySession = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .default_execution_profile_handle(handle1.clone()) -//! .build() +//! .build_legacy() //! .await?; //! //! let mut query1 = Query::from("SELECT * FROM ks.table"); diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/src/transport/large_batch_statements_test.rs index 2b394ed32..0e250fc7d 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/src/transport/large_batch_statements_test.rs @@ -13,7 +13,7 @@ use crate::{ #[tokio::test] async fn test_large_batch_statements() { setup_tracing(); - let mut session = create_new_session_builder().build().await.unwrap(); + let mut session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session = create_test_session(session, &ks).await; diff --git a/scylla/src/transport/load_balancing/default.rs b/scylla/src/transport/load_balancing/default.rs index 51db7f97f..beffebb1f 100644 --- a/scylla/src/transport/load_balancing/default.rs +++ b/scylla/src/transport/load_balancing/default.rs @@ -3860,7 +3860,7 @@ mod latency_awareness { let session = create_new_session_builder() .default_execution_profile_handle(handle) - .build() + .build_legacy() .await .unwrap(); diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index ad9e6b21d..f193949ca 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -1604,7 +1604,7 @@ where /// # use scylla::{LegacySession, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// # let session = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; + /// # let session = SessionBuilder::new().known_node("127.0.0.1:9042").build_legacy().await?; /// session /// .query_unpaged("INSERT INTO my_keyspace.tab (a) VALUES ('test1')", &[]) /// .await?; diff --git a/scylla/src/transport/session_builder.rs b/scylla/src/transport/session_builder.rs index da3de24fc..73725d83e 100644 --- a/scylla/src/transport/session_builder.rs +++ b/scylla/src/transport/session_builder.rs @@ -2,7 +2,10 @@ use super::connection::SelfIdentity; use super::execution_profile::ExecutionProfileHandle; -use super::session::{AddressTranslator, LegacySession, SessionConfig}; +use super::session::{ + AddressTranslator, CurrentDeserializationApi, GenericSession, LegacyDeserializationApi, + SessionConfig, +}; use super::Compression; #[cfg(feature = "cloud")] @@ -65,7 +68,7 @@ pub type CloudSessionBuilder = GenericSessionBuilder; /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) -/// .build() +/// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -96,7 +99,10 @@ impl GenericSessionBuilder { /// ``` /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; + /// let session: LegacySession = SessionBuilder::new() + /// .known_node("127.0.0.1:9042") + /// .build_legacy() + /// .await?; /// # Ok(()) /// # } /// ``` @@ -104,7 +110,10 @@ impl GenericSessionBuilder { /// ``` /// # use scylla::{LegacySession, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new().known_node("db1.example.com").build().await?; + /// let session: LegacySession = SessionBuilder::new() + /// .known_node("db1.example.com") + /// .build_legacy() + /// .await?; /// # Ok(()) /// # } /// ``` @@ -121,7 +130,7 @@ impl GenericSessionBuilder { /// # async fn example() -> Result<(), Box> { /// let session: LegacySession = SessionBuilder::new() /// .known_node_addr(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 9042)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -138,7 +147,7 @@ impl GenericSessionBuilder { /// # async fn example() -> Result<(), Box> { /// let session: LegacySession = SessionBuilder::new() /// .known_nodes(["127.0.0.1:9042", "db1.example.com"]) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -159,7 +168,7 @@ impl GenericSessionBuilder { /// /// let session: LegacySession = SessionBuilder::new() /// .known_nodes_addr([addr1, addr2]) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -184,7 +193,7 @@ impl GenericSessionBuilder { /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) /// .user("cassandra", "cassandra") - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -236,7 +245,7 @@ impl GenericSessionBuilder { /// .use_keyspace("my_keyspace_name", false) /// .user("cassandra", "cassandra") /// .authenticator_provider(Arc::new(CustomAuthenticatorProvider)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -276,7 +285,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .address_translator(Arc::new(IdentityTranslator)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -298,7 +307,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .address_translator(Arc::new(translation_rules)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -329,7 +338,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .ssl_context(Some(context_builder.build())) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -383,7 +392,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -404,7 +413,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .schema_agreement_interval(Duration::from_secs(5)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -428,7 +437,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .default_execution_profile_handle(execution_profile.into_handle()) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -451,7 +460,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tcp_nodelay(true) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -474,7 +483,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tcp_keepalive_interval(std::time::Duration::from_secs(42)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -503,7 +512,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -514,7 +523,10 @@ impl GenericSessionBuilder { self } - /// Builds the Session after setting all the options + /// Builds the Session after setting all the options. + /// + /// The new session object uses the legacy deserialization API. If you wish + /// to use the new API, use [`SessionBuilder::build`]. /// /// # Example /// ``` @@ -524,13 +536,39 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) + /// .build_legacy() // Turns SessionBuilder into Session + /// .await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn build_legacy( + &self, + ) -> Result, NewSessionError> { + GenericSession::connect(self.config.clone()).await + } + + /// Builds the Session after setting all the options. + /// + /// The new session object uses the new deserialization API. If you wish + /// to use the old API, use [`SessionBuilder::build_legacy`]. + /// + /// # Example + /// ``` + /// # use scylla::{Session, SessionBuilder}; + /// # use scylla::transport::Compression; + /// # async fn example() -> Result<(), Box> { + /// let session: Session = SessionBuilder::new() + /// .known_node("127.0.0.1:9042") + /// .compression(Some(Compression::Snappy)) /// .build() // Turns SessionBuilder into Session /// .await?; /// # Ok(()) /// # } /// ``` - pub async fn build(&self) -> Result { - LegacySession::connect(self.config.clone()).await + pub async fn build( + &self, + ) -> Result, NewSessionError> { + GenericSession::connect(self.config.clone()).await } /// Changes connection timeout @@ -545,7 +583,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .connection_timeout(Duration::from_secs(30)) - /// .build() // Turns SessionBuilder into Session + /// .build_legacy() // Turns SessionBuilder into Session /// .await?; /// # Ok(()) /// # } @@ -570,7 +608,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .pool_size(PoolSize::PerHost(NonZeroUsize::new(4).unwrap())) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -609,7 +647,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .disallow_shard_aware_port(true) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -629,7 +667,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keyspaces_to_fetch(["my_keyspace"]) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -652,7 +690,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .fetch_schema_metadata(true) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -675,7 +713,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keepalive_interval(std::time::Duration::from_secs(42)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -705,7 +743,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keepalive_timeout(std::time::Duration::from_secs(42)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -732,7 +770,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .schema_agreement_timeout(std::time::Duration::from_secs(120)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -752,7 +790,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .auto_await_schema_agreement(false) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -784,7 +822,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .host_filter(Arc::new(DcHostFilter::new("my-local-dc".to_string()))) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -804,7 +842,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .refresh_metadata_on_auto_schema_agreement(true) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -833,7 +871,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_attempts(NonZeroU32::new(10).unwrap()) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -862,7 +900,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_interval(Duration::from_millis(50)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -883,7 +921,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_consistency(Consistency::One) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -914,7 +952,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .write_coalescing(false) // Enabled by default - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -938,7 +976,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .cluster_metadata_refresh_interval(std::time::Duration::from_secs(20)) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } @@ -970,7 +1008,7 @@ impl GenericSessionBuilder { /// .with_application_name("my-app") /// .with_application_version(app_version) /// ) - /// .build() + /// .build_legacy() /// .await?; /// # Ok(()) /// # } diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index b596e05b2..c3eb0de7f 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -55,7 +55,10 @@ async fn test_connection_failure() { .remote_handle(); tokio::spawn(fut); - let res = SessionBuilder::new().known_node_addr(addr).build().await; + let res = SessionBuilder::new() + .known_node_addr(addr) + .build_legacy() + .await; match res { Ok(_) => panic!("Unexpected success"), Err(err) => println!("Connection error (it was expected): {:?}", err), @@ -65,7 +68,7 @@ async fn test_connection_failure() { #[tokio::test] async fn test_unprepared_statement() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -167,7 +170,7 @@ async fn test_unprepared_statement() { #[tokio::test] async fn test_prepared_statement() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -368,7 +371,7 @@ async fn test_counter_batch() { use scylla_cql::frame::request::batch::BatchType; setup_tracing(); - let session = Arc::new(create_new_session_builder().build().await.unwrap()); + let session = Arc::new(create_new_session_builder().build_legacy().await.unwrap()); let ks = unique_keyspace_name(); // Need to disable tablets in this test because they don't support counters yet. @@ -423,7 +426,7 @@ async fn test_counter_batch() { #[tokio::test] async fn test_batch() { setup_tracing(); - let session = Arc::new(create_new_session_builder().build().await.unwrap()); + let session = Arc::new(create_new_session_builder().build_legacy().await.unwrap()); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -525,7 +528,7 @@ async fn test_batch() { #[tokio::test] async fn test_token_calculation() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -584,7 +587,7 @@ async fn test_token_calculation() { #[tokio::test] async fn test_token_awareness() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); // Need to disable tablets in this test because they make token routing @@ -646,7 +649,7 @@ async fn test_token_awareness() { #[tokio::test] async fn test_use_keyspace() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -716,7 +719,7 @@ async fn test_use_keyspace() { // Make sure that use_keyspace on SessionBuiler works let session2: LegacySession = create_new_session_builder() .use_keyspace(ks.clone(), false) - .build() + .build_legacy() .await .unwrap(); @@ -737,7 +740,7 @@ async fn test_use_keyspace() { #[tokio::test] async fn test_use_keyspace_case_sensitivity() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks_lower = unique_keyspace_name().to_lowercase(); let ks_upper = ks_lower.to_uppercase(); @@ -810,7 +813,7 @@ async fn test_use_keyspace_case_sensitivity() { #[tokio::test] async fn test_raw_use_keyspace() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -862,7 +865,7 @@ async fn test_raw_use_keyspace() { #[tokio::test] async fn test_fetch_system_keyspace() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let prepared_statement = session .prepare("SELECT * FROM system_schema.keyspaces") @@ -879,7 +882,7 @@ async fn test_fetch_system_keyspace() { #[tokio::test] async fn test_db_errors() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); // SyntaxError on bad query @@ -934,7 +937,7 @@ async fn test_db_errors() { #[tokio::test] async fn test_tracing() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -1176,21 +1179,14 @@ async fn assert_in_tracing_table(session: &LegacySession, tracing_uuid: Uuid) { #[tokio::test] async fn test_await_schema_agreement() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let _schema_version = session.await_schema_agreement().await.unwrap(); } -#[tokio::test] -async fn test_await_timed_schema_agreement() { - setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); - session.await_schema_agreement().await.unwrap(); -} - #[tokio::test] async fn test_timestamp() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -1309,7 +1305,7 @@ async fn test_request_timeout() { .into_handle(); { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let mut query: Query = Query::new("SELECT * FROM system_schema.tables"); query.set_request_timeout(Some(Duration::from_millis(1))); @@ -1332,7 +1328,7 @@ async fn test_request_timeout() { { let timeouting_session = create_new_session_builder() .default_execution_profile_handle(fast_timeouting_profile_handle) - .build() + .build_legacy() .await .unwrap(); @@ -1368,7 +1364,7 @@ async fn test_request_timeout() { #[tokio::test] async fn test_prepared_config() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let mut query = Query::new("SELECT * FROM system_schema.tables"); query.set_is_idempotent(true); @@ -1455,7 +1451,7 @@ fn udt_type_c_def(ks: &str) -> Arc { #[tokio::test] async fn test_schema_types_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1614,7 +1610,7 @@ async fn test_schema_types_in_metadata() { #[tokio::test] async fn test_user_defined_types_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1678,7 +1674,7 @@ async fn test_user_defined_types_in_metadata() { #[tokio::test] async fn test_column_kinds_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1724,7 +1720,7 @@ async fn test_column_kinds_in_metadata() { #[tokio::test] async fn test_primary_key_ordering_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1773,7 +1769,7 @@ async fn test_table_partitioner_in_metadata() { return; } - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); // This test uses CDC which is not yet compatible with Scylla's tablets. @@ -1819,7 +1815,7 @@ async fn test_turning_off_schema_fetching() { setup_tracing(); let session = create_new_session_builder() .fetch_schema_metadata(false) - .build() + .build_legacy() .await .unwrap(); let ks = unique_keyspace_name(); @@ -1895,7 +1891,7 @@ async fn test_turning_off_schema_fetching() { #[tokio::test] async fn test_named_bind_markers() { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1952,7 +1948,7 @@ async fn test_named_bind_markers() { #[tokio::test] async fn test_prepared_partitioner() { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); // This test uses CDC which is not yet compatible with Scylla's tablets. @@ -2034,7 +2030,7 @@ async fn rename_caching(session: &CachingSession, rename_str: &str) { async fn test_unprepared_reprepare_in_execute() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2095,7 +2091,7 @@ async fn test_unprepared_reprepare_in_execute() { async fn test_unusual_valuelists() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2163,7 +2159,7 @@ async fn test_unusual_valuelists() { async fn test_unprepared_reprepare_in_batch() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2228,7 +2224,7 @@ async fn test_unprepared_reprepare_in_batch() { async fn test_unprepared_reprepare_in_caching_session_execute() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2288,7 +2284,7 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { async fn test_views_in_schema_info() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2359,7 +2355,7 @@ async fn assert_test_batch_table_rows_contain(sess: &LegacySession, expected_row #[tokio::test] async fn test_prepare_batch() { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2456,7 +2452,7 @@ async fn test_prepare_batch() { #[tokio::test] async fn test_refresh_metadata_after_schema_agreement() { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2494,7 +2490,7 @@ async fn test_refresh_metadata_after_schema_agreement() { #[tokio::test] async fn test_rate_limit_exceeded_exception() { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); // Typed errors in RPC were introduced along with per-partition rate limiting. // There is no dedicated feature for per-partition rate limiting, so we are @@ -2542,7 +2538,7 @@ async fn test_rate_limit_exceeded_exception() { // Batches containing LWT queries (IF col = som) return rows with information whether the queries were applied. #[tokio::test] async fn test_batch_lwts() { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); let mut create_ks = format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class': 'NetworkTopologyStrategy', 'replication_factor': 1}}", ks); @@ -2669,7 +2665,7 @@ async fn test_keyspaces_to_fetch() { let ks1 = unique_keyspace_name(); let ks2 = unique_keyspace_name(); - let session_default = create_new_session_builder().build().await.unwrap(); + let session_default = create_new_session_builder().build_legacy().await.unwrap(); for ks in [&ks1, &ks2] { session_default .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) @@ -2688,7 +2684,7 @@ async fn test_keyspaces_to_fetch() { let session1 = create_new_session_builder() .keyspaces_to_fetch([&ks1]) - .build() + .build_legacy() .await .unwrap(); assert!(session1.get_cluster_data().keyspaces.contains_key(&ks1)); @@ -2696,7 +2692,7 @@ async fn test_keyspaces_to_fetch() { let session_all = create_new_session_builder() .keyspaces_to_fetch([] as [String; 0]) - .build() + .build_legacy() .await .unwrap(); assert!(session_all.get_cluster_data().keyspaces.contains_key(&ks1)); @@ -2738,7 +2734,7 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { let session = create_new_session_builder() .default_execution_profile_handle(handle) - .build() + .build_legacy() .await .unwrap(); @@ -2781,7 +2777,7 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { #[tokio::test] async fn test_iter_methods_with_modification_statements() { - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2822,7 +2818,7 @@ async fn test_get_keyspace_name() { // Create the keyspace // No keyspace is set in config, so get_keyspace() should return None. - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); assert_eq!(session.get_keyspace(), None); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); assert_eq!(session.get_keyspace(), None); @@ -2847,7 +2843,7 @@ async fn test_get_keyspace_name() { #[tokio::test] async fn simple_strategy_test() { let ks = unique_keyspace_name(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); session .query_unpaged( @@ -2912,7 +2908,7 @@ async fn simple_strategy_test() { async fn test_manual_primary_key_computation() { // Setup session let ks = unique_keyspace_name(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(&ks, true).await.unwrap(); diff --git a/scylla/src/transport/silent_prepare_batch_test.rs b/scylla/src/transport/silent_prepare_batch_test.rs index f8c7fb328..c7ae8e83d 100644 --- a/scylla/src/transport/silent_prepare_batch_test.rs +++ b/scylla/src/transport/silent_prepare_batch_test.rs @@ -9,7 +9,7 @@ use std::collections::BTreeSet; #[tokio::test] async fn test_quietly_prepare_batch() { setup_tracing(); - let session = create_new_session_builder().build().await.unwrap(); + let session = create_new_session_builder().build_legacy().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); diff --git a/scylla/tests/integration/consistency.rs b/scylla/tests/integration/consistency.rs index 4a3b1306f..5531973ec 100644 --- a/scylla/tests/integration/consistency.rs +++ b/scylla/tests/integration/consistency.rs @@ -159,7 +159,7 @@ async fn check_for_all_consistencies_and_setting_options< let session = session_builder .clone() .default_execution_profile_handle(base_for_every_profile.clone().build().into_handle()) - .build() + .build_legacy() .await .unwrap(); create_schema(&session, ks).await; @@ -212,7 +212,7 @@ async fn check_for_all_consistencies_and_setting_options< let session_with_consistencies = session_builder .clone() .default_execution_profile_handle(handle) - .build() + .build_legacy() .await .unwrap(); session_with_consistencies @@ -473,7 +473,7 @@ async fn consistency_allows_for_paxos_selects() { let session = SessionBuilder::new() .known_node(uri.as_str()) - .build() + .build_legacy() .await .unwrap(); diff --git a/scylla/tests/integration/execution_profiles.rs b/scylla/tests/integration/execution_profiles.rs index 0a49bae78..46ca7c7a2 100644 --- a/scylla/tests/integration/execution_profiles.rs +++ b/scylla/tests/integration/execution_profiles.rs @@ -159,7 +159,7 @@ async fn test_execution_profiles() { .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .default_execution_profile_handle(profile1.into_handle()) - .build() + .build_legacy() .await .unwrap(); let ks = unique_keyspace_name(); diff --git a/scylla/tests/integration/lwt_optimisation.rs b/scylla/tests/integration/lwt_optimisation.rs index f0d59f1f0..dd1e855d8 100644 --- a/scylla/tests/integration/lwt_optimisation.rs +++ b/scylla/tests/integration/lwt_optimisation.rs @@ -56,7 +56,7 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima .known_node(proxy_uris[0].as_str()) .default_execution_profile_handle(handle) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); diff --git a/scylla/tests/integration/new_session.rs b/scylla/tests/integration/new_session.rs index 6f734f0da..d28fa1d7a 100644 --- a/scylla/tests/integration/new_session.rs +++ b/scylla/tests/integration/new_session.rs @@ -16,7 +16,7 @@ async fn proceed_if_only_some_hostnames_are_invalid() { let session = SessionBuilder::new() .known_nodes([uri1, uri2, uri3]) - .build() + .build_legacy() .await .unwrap(); session diff --git a/scylla/tests/integration/retries.rs b/scylla/tests/integration/retries.rs index f6e1711ac..e8bd5477c 100644 --- a/scylla/tests/integration/retries.rs +++ b/scylla/tests/integration/retries.rs @@ -31,7 +31,7 @@ async fn speculative_execution_is_fired() { .known_node(proxy_uris[0].as_str()) .default_execution_profile_handle(simple_speculative_no_retry_profile.into_handle()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); @@ -107,7 +107,7 @@ async fn retries_occur() { let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); @@ -187,7 +187,7 @@ async fn speculative_execution_panic_regression_test() { .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .default_execution_profile_handle(profile.into_handle()) - .build() + .build_legacy() .await .unwrap(); diff --git a/scylla/tests/integration/self_identity.rs b/scylla/tests/integration/self_identity.rs index d68bb0add..e7378c6e2 100644 --- a/scylla/tests/integration/self_identity.rs +++ b/scylla/tests/integration/self_identity.rs @@ -54,7 +54,7 @@ async fn test_given_self_identity(self_identity: SelfIdentity<'static>) { .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .custom_identity(self_identity.clone()) - .build() + .build_legacy() .await .unwrap(); diff --git a/scylla/tests/integration/shards.rs b/scylla/tests/integration/shards.rs index b22cfc397..3a4e0ecb1 100644 --- a/scylla/tests/integration/shards.rs +++ b/scylla/tests/integration/shards.rs @@ -32,7 +32,7 @@ async fn test_consistent_shard_awareness() { let session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); let ks = unique_keyspace_name(); diff --git a/scylla/tests/integration/silent_prepare_query.rs b/scylla/tests/integration/silent_prepare_query.rs index ffb200c7c..64da40406 100644 --- a/scylla/tests/integration/silent_prepare_query.rs +++ b/scylla/tests/integration/silent_prepare_query.rs @@ -22,7 +22,7 @@ async fn test_prepare_query_with_values() { let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); @@ -73,7 +73,7 @@ async fn test_query_with_no_values() { let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index eee25f908..a50e33b8b 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -23,7 +23,7 @@ async fn test_skip_result_metadata() { let session: LegacySession = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index 67fb2fd88..a51d009c1 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -294,7 +294,7 @@ async fn test_default_policy_is_tablet_aware() { let session = scylla::SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); @@ -425,7 +425,7 @@ async fn test_tablet_feedback_not_sent_for_unprepared_queries() { let session = scylla::SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); @@ -497,7 +497,7 @@ async fn test_lwt_optimization_works_with_tablets() { let session = scylla::SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build() + .build_legacy() .await .unwrap(); From e9d4719f6eb9466b3728c3a22d27b60503aa939c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Thu, 8 Aug 2024 12:43:07 +0200 Subject: [PATCH 08/25] tests: scylla_supports_tablets[_legacy] suffix This is a temporary measure. The tests are going to be modernised in parts, which is why for some time we are going to need both functions: one for LegacySession and another for modern Session. --- scylla/src/transport/caching_session.rs | 6 ++++-- scylla/src/transport/cql_types_test.rs | 6 ++++-- scylla/src/transport/session_test.rs | 14 +++++++------- scylla/src/utils/test_utils.rs | 2 +- scylla/tests/integration/lwt_optimisation.rs | 4 ++-- scylla/tests/integration/shards.rs | 4 ++-- scylla/tests/integration/tablets.rs | 6 +++--- 7 files changed, 23 insertions(+), 19 deletions(-) diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 5b0f920f0..5e77c48df 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -221,7 +221,9 @@ where mod tests { use crate::query::Query; use crate::statement::PagingState; - use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; + use crate::test_utils::{ + create_new_session_builder, scylla_supports_tablets_legacy, setup_tracing, + }; use crate::transport::partitioner::PartitionerName; use crate::utils::test_utils::unique_keyspace_name; use crate::{ @@ -243,7 +245,7 @@ mod tests { "CREATE KEYSPACE IF NOT EXISTS {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}" ); - if !with_tablet_support && scylla_supports_tablets(&session).await { + if !with_tablet_support && scylla_supports_tablets_legacy(&session).await { create_ks += " AND TABLETS = {'enabled': false}"; } diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/src/transport/cql_types_test.rs index 1429b498b..4be1244eb 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/src/transport/cql_types_test.rs @@ -3,7 +3,9 @@ use crate::cql_to_rust::FromCqlVal; use crate::frame::response::result::CqlValue; use crate::frame::value::{Counter, CqlDate, CqlTime, CqlTimestamp}; use crate::macros::FromUserType; -use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; +use crate::test_utils::{ + create_new_session_builder, scylla_supports_tablets_legacy, setup_tracing, +}; use crate::transport::session::LegacySession; use crate::utils::test_utils::unique_keyspace_name; use itertools::Itertools; @@ -32,7 +34,7 @@ async fn init_test_maybe_without_tablets( ks ); - if !supports_tablets && scylla_supports_tablets(&session).await { + if !supports_tablets && scylla_supports_tablets_legacy(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index c3eb0de7f..8923c85fb 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -6,7 +6,7 @@ use crate::query::Query; use crate::retry_policy::{QueryInfo, RetryDecision, RetryPolicy, RetrySession}; use crate::routing::Token; use crate::statement::Consistency; -use crate::test_utils::{scylla_supports_tablets, setup_tracing}; +use crate::test_utils::{scylla_supports_tablets_legacy, setup_tracing}; use crate::tracing::TracingInfo; use crate::transport::errors::{BadKeyspaceName, BadQuery, DbError, QueryError}; use crate::transport::partitioner::{ @@ -377,7 +377,7 @@ async fn test_counter_batch() { // Need to disable tablets in this test because they don't support counters yet. // (https://github.com/scylladb/scylladb/commit/c70f321c6f581357afdf3fd8b4fe8e5c5bb9736e). let mut create_ks = format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } @@ -596,7 +596,7 @@ async fn test_token_awareness() { let mut create_ks = format!( "CREATE KEYSPACE IF NOT EXISTS {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}" ); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } @@ -1776,7 +1776,7 @@ async fn test_table_partitioner_in_metadata() { let mut create_ks = format!( "CREATE KEYSPACE {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}" ); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " AND TABLETS = {'enabled': false}"; } @@ -1954,7 +1954,7 @@ async fn test_prepared_partitioner() { // This test uses CDC which is not yet compatible with Scylla's tablets. let mut create_ks = format!( "CREATE KEYSPACE IF NOT EXISTS {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}"); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } @@ -2542,7 +2542,7 @@ async fn test_batch_lwts() { let ks = unique_keyspace_name(); let mut create_ks = format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class': 'NetworkTopologyStrategy', 'replication_factor': 1}}", ks); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, &[]).await.unwrap(); @@ -2742,7 +2742,7 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { let cluster_size = session.get_cluster_data().get_nodes_info().len(); let ks = unique_keyspace_name(); let mut create_ks = format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class': 'NetworkTopologyStrategy', 'replication_factor': {}}}", ks, cluster_size + 1); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, ()).await.unwrap(); diff --git a/scylla/src/utils/test_utils.rs b/scylla/src/utils/test_utils.rs index 6f9f2a9ec..a5c0dadab 100644 --- a/scylla/src/utils/test_utils.rs +++ b/scylla/src/utils/test_utils.rs @@ -92,7 +92,7 @@ pub fn create_new_session_builder() -> GenericSessionBuilder bool { +pub async fn scylla_supports_tablets_legacy(session: &LegacySession) -> bool { let result = session .query_unpaged( "select column_name from system_schema.columns where diff --git a/scylla/tests/integration/lwt_optimisation.rs b/scylla/tests/integration/lwt_optimisation.rs index dd1e855d8..508c953bd 100644 --- a/scylla/tests/integration/lwt_optimisation.rs +++ b/scylla/tests/integration/lwt_optimisation.rs @@ -1,6 +1,6 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; use scylla::retry_policy::FallthroughRetryPolicy; -use scylla::test_utils::scylla_supports_tablets; +use scylla::test_utils::scylla_supports_tablets_legacy; use scylla::test_utils::unique_keyspace_name; use scylla::transport::session::LegacySession; use scylla::{ExecutionProfile, SessionBuilder}; @@ -70,7 +70,7 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima // Create schema let ks = unique_keyspace_name(); let mut create_ks = format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, &[]).await.unwrap(); diff --git a/scylla/tests/integration/shards.rs b/scylla/tests/integration/shards.rs index 3a4e0ecb1..c3db91ca2 100644 --- a/scylla/tests/integration/shards.rs +++ b/scylla/tests/integration/shards.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::test_utils::scylla_supports_tablets; +use scylla::test_utils::scylla_supports_tablets_legacy; use scylla::{test_utils::unique_keyspace_name, SessionBuilder}; use tokio::sync::mpsc; @@ -39,7 +39,7 @@ async fn test_consistent_shard_awareness() { /* Prepare schema */ let mut create_ks = format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks); - if scylla_supports_tablets(&session).await { + if scylla_supports_tablets_legacy(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, &[]).await.unwrap(); diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index a51d009c1..b570f0548 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -298,7 +298,7 @@ async fn test_default_policy_is_tablet_aware() { .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets(&session).await { + if !scylla::test_utils::scylla_supports_tablets_legacy(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } @@ -429,7 +429,7 @@ async fn test_tablet_feedback_not_sent_for_unprepared_queries() { .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets(&session).await { + if !scylla::test_utils::scylla_supports_tablets_legacy(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } @@ -501,7 +501,7 @@ async fn test_lwt_optimization_works_with_tablets() { .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets(&session).await { + if !scylla::test_utils::scylla_supports_tablets_legacy(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } From 37ff7c66fe9756215f5e4c7fc9c710d89cdf214b Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Mon, 13 Mar 2023 14:04:10 +0100 Subject: [PATCH 09/25] session: partly de-genericise internal query/exec functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The query/execute/batch statements are generic over the statement. They started by converting the statement to corresponding type (query/execute/batch) and then continued without the need for generics. However, those functions used to be non-trivial and would have to be monomorphised for every type of the arguments passed to the method, increasing compilation time more than necessary. Now that most of the implementation was moved to do_query etc. methods, we can partly restrict the generic part to the public query/execute/batch methods which convert the input statement to required type and then call the do_query etc. methods. This commit does just that - partly de-genericises do_query and friends, while leaving query and friends fully generic as they used to. Co-authored-by: Wojciech Przytuła --- scylla/src/transport/session.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index f193949ca..e45ae2077 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -630,7 +630,7 @@ impl GenericSession { query: impl Into, values: impl SerializeRow, ) -> Result { - self.do_query_iter(query, values).await + self.do_query_iter(query.into(), values).await } /// Execute a prepared statement. Requires a [PreparedStatement] @@ -792,7 +792,7 @@ impl GenericSession { prepared: impl Into, values: impl SerializeRow, ) -> Result { - self.do_execute_iter(prepared, values).await + self.do_execute_iter(prepared.into(), values).await } /// Perform a batch query\ @@ -1152,11 +1152,9 @@ where async fn do_query_iter( &self, - query: impl Into, + query: Query, values: impl SerializeRow, ) -> Result { - let query: Query = query.into(); - let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -1422,10 +1420,9 @@ where async fn do_execute_iter( &self, - prepared: impl Into, + prepared: PreparedStatement, values: impl SerializeRow, ) -> Result { - let prepared = prepared.into(); let serialized_values = prepared.serialize_values(&values)?; let execution_profile = prepared From c229ae5fac83ee1f1f70aacd033aad8ab767550f Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Mon, 13 Mar 2023 14:09:43 +0100 Subject: [PATCH 10/25] session: return new QueryResult from internal methods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit QueryResult can be converted to LegacyQueryResult, but not the other way around. In order to support both APIs, internal methods (do_query, do_execute, etc.) need to be changed so that they return the new QueryResult. Co-authored-by: Wojciech Przytuła --- scylla/src/transport/legacy_query_result.rs | 10 --- scylla/src/transport/session.rs | 79 ++++++++++++--------- 2 files changed, 47 insertions(+), 42 deletions(-) diff --git a/scylla/src/transport/legacy_query_result.rs b/scylla/src/transport/legacy_query_result.rs index 5b26f380c..46818a297 100644 --- a/scylla/src/transport/legacy_query_result.rs +++ b/scylla/src/transport/legacy_query_result.rs @@ -56,16 +56,6 @@ pub struct LegacyQueryResult { } impl LegacyQueryResult { - pub(crate) fn mock_empty() -> Self { - Self { - rows: None, - warnings: Vec::new(), - tracing_id: None, - metadata: None, - serialized_size: 0, - } - } - /// Returns the number of received rows.\ /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). pub fn rows_num(&self) -> Result { diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index e45ae2077..42e482ebb 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -527,7 +527,10 @@ impl GenericSession { query: impl Into, values: impl SerializeRow, ) -> Result { - self.do_query_unpaged(&query.into(), values).await + Ok(self + .do_query_unpaged(&query.into(), values) + .await? + .into_legacy_result()?) } /// Queries a single page from the database, optionally continuing from a saved point. @@ -584,8 +587,10 @@ impl GenericSession { values: impl SerializeRow, paging_state: PagingState, ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { - self.do_query_single_page(&query.into(), values, paging_state) - .await + let (result, paging_state_response) = self + .do_query_single_page(&query.into(), values, paging_state) + .await?; + Ok((result.into_legacy_result()?, paging_state_response)) } /// Run an unprepared query with paging\ @@ -630,7 +635,9 @@ impl GenericSession { query: impl Into, values: impl SerializeRow, ) -> Result { - self.do_query_iter(query.into(), values).await + self.do_query_iter(query.into(), values) + .await + .map(QueryPager::into_legacy) } /// Execute a prepared statement. Requires a [PreparedStatement] @@ -681,7 +688,10 @@ impl GenericSession { prepared: &PreparedStatement, values: impl SerializeRow, ) -> Result { - self.do_execute_unpaged(prepared, values).await + Ok(self + .do_execute_unpaged(prepared, values) + .await? + .into_legacy_result()?) } /// Executes a prepared statement, restricting results to single page. @@ -743,8 +753,10 @@ impl GenericSession { values: impl SerializeRow, paging_state: PagingState, ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { - self.do_execute_single_page(prepared, values, paging_state) - .await + let (result, paging_state_response) = self + .do_execute_single_page(prepared, values, paging_state) + .await?; + Ok((result.into_legacy_result()?, paging_state_response)) } /// Run a prepared query with paging.\ @@ -792,7 +804,9 @@ impl GenericSession { prepared: impl Into, values: impl SerializeRow, ) -> Result { - self.do_execute_iter(prepared.into(), values).await + self.do_execute_iter(prepared.into(), values) + .await + .map(QueryPager::into_legacy) } /// Perform a batch query\ @@ -845,7 +859,7 @@ impl GenericSession { batch: &Batch, values: impl BatchValues, ) -> Result { - self.do_batch(batch, values).await + Ok(self.do_batch(batch, values).await?.into_legacy_result()?) } } @@ -980,7 +994,7 @@ where &self, query: &Query, values: impl SerializeRow, - ) -> Result { + ) -> Result { let (result, paging_state_response) = self .query(query, values, None, PagingState::start()) .await?; @@ -996,7 +1010,7 @@ where query: &Query, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { self.query( query, values, @@ -1023,7 +1037,7 @@ where values: impl SerializeRow, page_size: Option, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -1109,10 +1123,10 @@ where self.handle_set_keyspace_response(&response).await?; self.handle_auto_await_schema_agreement(&response).await?; - let (result, paging_state) = response.into_query_result_and_paging_state()?; + let (result, paging_state_response) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); - let result = result.into_legacy_result()?; - Ok((result, paging_state)) + + Ok((result, paging_state_response)) } async fn handle_set_keyspace_response( @@ -1154,7 +1168,7 @@ where &self, query: Query, values: impl SerializeRow, - ) -> Result { + ) -> Result { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -1168,7 +1182,6 @@ where self.metrics.clone(), ) .await - .map(QueryPager::into_legacy) } else { // Making QueryPager::new_for_query work with values is too hard (if even possible) // so instead of sending one prepare to a specific connection on each iterator query, @@ -1183,7 +1196,6 @@ where metrics: self.metrics.clone(), }) .await - .map(QueryPager::into_legacy) } } @@ -1283,7 +1295,7 @@ where &self, prepared: &PreparedStatement, values: impl SerializeRow, - ) -> Result { + ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let (result, paging_state) = self .execute(prepared, &serialized_values, None, PagingState::start()) @@ -1300,7 +1312,7 @@ where prepared: &PreparedStatement, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let serialized_values = prepared.serialize_values(&values)?; let page_size = prepared.get_validated_page_size(); self.execute(prepared, &serialized_values, Some(page_size), paging_state) @@ -1323,7 +1335,7 @@ where serialized_values: &SerializedValues, page_size: Option, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { let values_ref = &serialized_values; let paging_state_ref = &paging_state; @@ -1412,17 +1424,17 @@ where self.handle_set_keyspace_response(&response).await?; self.handle_auto_await_schema_agreement(&response).await?; - let (result, paging_state) = response.into_query_result_and_paging_state()?; + let (result, paging_state_response) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); - let result = result.into_legacy_result()?; - Ok((result, paging_state)) + + Ok((result, paging_state_response)) } async fn do_execute_iter( &self, prepared: PreparedStatement, values: impl SerializeRow, - ) -> Result { + ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let execution_profile = prepared @@ -1438,14 +1450,13 @@ where metrics: self.metrics.clone(), }) .await - .map(QueryPager::into_legacy) } async fn do_batch( &self, batch: &Batch, values: impl BatchValues, - ) -> Result { + ) -> Result { // Shard-awareness behavior for batch will be to pick shard based on first batch statement's shard // If users batch statements by shard, they will be rewarded with full shard awareness @@ -1522,10 +1533,10 @@ where .await?; let result = match run_query_result { - RunQueryResult::IgnoredWriteError => LegacyQueryResult::mock_empty(), + RunQueryResult::IgnoredWriteError => QueryResult::mock_empty(), RunQueryResult::Completed(result) => { span.record_result_fields(&result); - result.into_legacy_result()? + result } }; @@ -1717,6 +1728,7 @@ where // Get tracing info let maybe_tracing_info: Option = traces_session_res + .into_legacy_result()? .maybe_first_row_typed() .map_err(|err| match err { MaybeFirstRowTypedError::RowsExpected(e) => { @@ -1733,9 +1745,12 @@ where }; // Get tracing events - let tracing_event_rows = traces_events_res.rows_typed().map_err(|err| { - ProtocolError::Tracing(TracingProtocolError::TracesEventsNotRows(err)) - })?; + let tracing_event_rows = traces_events_res + .into_legacy_result()? + .rows_typed() + .map_err(|err| { + ProtocolError::Tracing(TracingProtocolError::TracesEventsNotRows(err)) + })?; for event in tracing_event_rows { let tracing_event: TracingEvent = event.map_err(|err| { From 22f28cd645298ff674cbe8097dc6c5b093a173b6 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Mon, 13 Mar 2023 17:34:26 +0100 Subject: [PATCH 11/25] session: add interface methods for the new deser API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements methods related to sending queries for the new Session. Co-authored-by: Wojciech Przytuła --- scylla/src/transport/session.rs | 120 ++++++++++++++++++++++++-------- 1 file changed, 91 insertions(+), 29 deletions(-) diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 42e482ebb..9e6cf17ad 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -462,7 +462,7 @@ pub(crate) enum RunQueryResult { Completed(ResT), } -impl GenericSession { +impl GenericSession { /// Sends a request to the database and receives a response.\ /// Performs an unpaged query, i.e. all results are received in a single response. /// @@ -526,11 +526,8 @@ impl GenericSession { &self, query: impl Into, values: impl SerializeRow, - ) -> Result { - Ok(self - .do_query_unpaged(&query.into(), values) - .await? - .into_legacy_result()?) + ) -> Result { + self.do_query_unpaged(&query.into(), values).await } /// Queries a single page from the database, optionally continuing from a saved point. @@ -586,11 +583,9 @@ impl GenericSession { query: impl Into, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { - let (result, paging_state_response) = self - .do_query_single_page(&query.into(), values, paging_state) - .await?; - Ok((result.into_legacy_result()?, paging_state_response)) + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + self.do_query_single_page(&query.into(), values, paging_state) + .await } /// Run an unprepared query with paging\ @@ -634,10 +629,8 @@ impl GenericSession { &self, query: impl Into, values: impl SerializeRow, - ) -> Result { - self.do_query_iter(query.into(), values) - .await - .map(QueryPager::into_legacy) + ) -> Result { + self.do_query_iter(query.into(), values).await } /// Execute a prepared statement. Requires a [PreparedStatement] @@ -687,11 +680,8 @@ impl GenericSession { &self, prepared: &PreparedStatement, values: impl SerializeRow, - ) -> Result { - Ok(self - .do_execute_unpaged(prepared, values) - .await? - .into_legacy_result()?) + ) -> Result { + self.do_execute_unpaged(prepared, values).await } /// Executes a prepared statement, restricting results to single page. @@ -752,11 +742,9 @@ impl GenericSession { prepared: &PreparedStatement, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { - let (result, paging_state_response) = self - .do_execute_single_page(prepared, values, paging_state) - .await?; - Ok((result.into_legacy_result()?, paging_state_response)) + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + self.do_execute_single_page(prepared, values, paging_state) + .await } /// Run a prepared query with paging.\ @@ -803,10 +791,8 @@ impl GenericSession { &self, prepared: impl Into, values: impl SerializeRow, - ) -> Result { - self.do_execute_iter(prepared.into(), values) - .await - .map(QueryPager::into_legacy) + ) -> Result { + self.do_execute_iter(prepared.into(), values).await } /// Perform a batch query\ @@ -854,6 +840,82 @@ impl GenericSession { /// # Ok(()) /// # } /// ``` + pub async fn batch( + &self, + batch: &Batch, + values: impl BatchValues, + ) -> Result { + self.do_batch(batch, values).await + } +} + +impl GenericSession { + pub async fn query_unpaged( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + Ok(self + .do_query_unpaged(&query.into(), values) + .await? + .into_legacy_result()?) + } + + pub async fn query_single_page( + &self, + query: impl Into, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + let (result, paging_state_response) = self + .do_query_single_page(&query.into(), values, paging_state) + .await?; + Ok((result.into_legacy_result()?, paging_state_response)) + } + + pub async fn query_iter( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_query_iter(query.into(), values) + .await + .map(QueryPager::into_legacy) + } + + pub async fn execute_unpaged( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + ) -> Result { + Ok(self + .do_execute_unpaged(prepared, values) + .await? + .into_legacy_result()?) + } + + pub async fn execute_single_page( + &self, + prepared: &PreparedStatement, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { + let (result, paging_state_response) = self + .do_execute_single_page(prepared, values, paging_state) + .await?; + Ok((result.into_legacy_result()?, paging_state_response)) + } + + pub async fn execute_iter( + &self, + prepared: impl Into, + values: impl SerializeRow, + ) -> Result { + self.do_execute_iter(prepared.into(), values) + .await + .map(QueryPager::into_legacy) + } + pub async fn batch( &self, batch: &Batch, From 2ec2885fb784be3b49fc387ffb69a5bd1094a304 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Tue, 14 Mar 2023 08:47:39 +0100 Subject: [PATCH 12/25] connection: switch to the new deserialization framework MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjusts `Connection::fetch_schema_version` to use the new deserialization API. Connection is meant to be an internal API, so we don't introduce a LegacyConnection for this. `Connection::{query,execute}_iter` will be migrated in a further commit. Co-authored-by: Wojciech Przytuła --- scylla/src/transport/connection.rs | 14 ++++++++++---- scylla/src/transport/errors.rs | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 018f4e75b..6b8cdf7cc 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -46,7 +46,7 @@ use std::{ net::{Ipv4Addr, Ipv6Addr}, }; -use super::errors::{ProtocolError, UseKeyspaceProtocolError}; +use super::errors::{ProtocolError, SchemaVersionFetchError, UseKeyspaceProtocolError}; use super::iterator::{LegacyRowIterator, QueryPager}; use super::locator::tablets::{RawTablet, TabletParsingError}; use super::query_result::QueryResult; @@ -1436,9 +1436,15 @@ impl Connection { let (version_id,) = self .query_unpaged(LOCAL_VERSION) .await? - .into_legacy_result()? - .single_row_typed() - .map_err(ProtocolError::SchemaVersionFetch)?; + .into_rows_result()? + .ok_or(QueryError::ProtocolError( + ProtocolError::SchemaVersionFetch(SchemaVersionFetchError::ResultNotRows), + ))? + .single_row::<(Uuid,)>() + .map_err(|err| { + ProtocolError::SchemaVersionFetch(SchemaVersionFetchError::SingleRowError(err)) + })?; + Ok(version_id) } diff --git a/scylla/src/transport/errors.rs b/scylla/src/transport/errors.rs index d95383054..64f9989bb 100644 --- a/scylla/src/transport/errors.rs +++ b/scylla/src/transport/errors.rs @@ -32,7 +32,7 @@ use thiserror::Error; use crate::{authentication::AuthError, frame::response}; -use super::legacy_query_result::{RowsExpectedError, SingleRowTypedError}; +use super::{legacy_query_result::RowsExpectedError, query_result::SingleRowError}; /// Error that occurred during query execution #[derive(Error, Debug, Clone)] @@ -304,7 +304,7 @@ pub enum ProtocolError { /// A protocol error appeared during schema version fetch. #[error("Schema version fetch protocol error: {0}")] - SchemaVersionFetch(SingleRowTypedError), + SchemaVersionFetch(#[from] SchemaVersionFetchError), /// A result with nonfinished paging state received for unpaged query. #[error("Unpaged query returned a non-empty paging state! This is a driver-side or server-side bug.")] @@ -345,6 +345,16 @@ pub enum UseKeyspaceProtocolError { UnexpectedResponse(CqlResponseKind), } +/// A protocol error that occurred during schema version fetch. +#[derive(Error, Debug, Clone)] +#[non_exhaustive] +pub enum SchemaVersionFetchError { + #[error("Schema version query returned non-rows result")] + ResultNotRows, + #[error(transparent)] + SingleRowError(SingleRowError), +} + /// A protocol error that occurred during tracing info fetch. #[derive(Error, Debug, Clone)] #[non_exhaustive] From b3f4a0437eb9a6042b2448c116e51dc450082aa3 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Tue, 14 Mar 2023 14:52:58 +0100 Subject: [PATCH 13/25] caching_session: make generic over session APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In a similar fashion to Session, CachingSession was also made generic over the session kind. Co-authored-by: Wojciech Przytuła --- scylla/src/lib.rs | 2 +- scylla/src/transport/caching_session.rs | 128 ++++++++++++++++++++---- scylla/src/transport/session_test.rs | 6 +- 3 files changed, 111 insertions(+), 25 deletions(-) diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index f8f5d4ea9..d529e8141 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -256,7 +256,7 @@ pub use statement::query; pub use frame::response::cql_to_rust; pub use frame::response::cql_to_rust::FromRow; -pub use transport::caching_session::CachingSession; +pub use transport::caching_session::{CachingSession, GenericCachingSession, LegacyCachingSession}; pub use transport::execution_profile::ExecutionProfile; pub use transport::legacy_query_result::LegacyQueryResult; pub use transport::query_result::{QueryResult, QueryRowsResult}; diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 5e77c48df..efb8737c2 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -5,7 +5,7 @@ use crate::statement::{PagingState, PagingStateResponse}; use crate::transport::errors::QueryError; use crate::transport::iterator::LegacyRowIterator; use crate::transport::partitioner::PartitionerName; -use crate::{LegacyQueryResult, LegacySession}; +use crate::{LegacyQueryResult, QueryResult}; use bytes::Bytes; use dashmap::DashMap; use futures::future::try_join_all; @@ -16,6 +16,11 @@ use std::collections::hash_map::RandomState; use std::hash::BuildHasher; use std::sync::Arc; +use super::iterator::QueryPager; +use super::session::{ + CurrentDeserializationApi, DeserializationApiKind, GenericSession, LegacyDeserializationApi, +}; + /// Contains just the parts of a prepared statement that were returned /// from the database. All remaining parts (query string, page size, /// consistency, etc.) are taken from the Query passed @@ -31,11 +36,12 @@ struct RawPreparedStatementData { /// Provides auto caching while executing queries #[derive(Debug)] -pub struct CachingSession +pub struct GenericCachingSession where S: Clone + BuildHasher, + DeserializationApi: DeserializationApiKind, { - session: LegacySession, + session: GenericSession, /// The prepared statement cache size /// If a prepared statement is added while the limit is reached, the oldest prepared statement /// is removed from the cache @@ -43,11 +49,15 @@ where cache: DashMap, } -impl CachingSession +pub type CachingSession = GenericCachingSession; +pub type LegacyCachingSession = GenericCachingSession; + +impl GenericCachingSession where S: Default + BuildHasher + Clone, + DeserApi: DeserializationApiKind, { - pub fn from(session: LegacySession, cache_size: usize) -> Self { + pub fn from(session: GenericSession, cache_size: usize) -> Self { Self { session, max_capacity: cache_size, @@ -56,20 +66,88 @@ where } } -impl CachingSession +impl GenericCachingSession where S: BuildHasher + Clone, + DeserApi: DeserializationApiKind, { /// Builds a [`CachingSession`] from a [`Session`], a cache size, and a [`BuildHasher`]., /// using a customer hasher. - pub fn with_hasher(session: LegacySession, cache_size: usize, hasher: S) -> Self { + pub fn with_hasher(session: GenericSession, cache_size: usize, hasher: S) -> Self { Self { session, max_capacity: cache_size, cache: DashMap::with_hasher(hasher), } } +} +impl GenericCachingSession +where + S: BuildHasher + Clone, +{ + /// Does the same thing as [`Session::execute_unpaged`] but uses the prepared statement cache + pub async fn execute_unpaged( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + let query = query.into(); + let prepared = self.add_prepared_statement_owned(query).await?; + self.session.execute_unpaged(&prepared, values).await + } + + /// Does the same thing as [`Session::execute_iter`] but uses the prepared statement cache + pub async fn execute_iter( + &self, + query: impl Into, + values: impl SerializeRow, + ) -> Result { + let query = query.into(); + let prepared = self.add_prepared_statement_owned(query).await?; + self.session.execute_iter(prepared, values).await + } + + /// Does the same thing as [`Session::execute_single_page`] but uses the prepared statement cache + pub async fn execute_single_page( + &self, + query: impl Into, + values: impl SerializeRow, + paging_state: PagingState, + ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + let query = query.into(); + let prepared = self.add_prepared_statement_owned(query).await?; + self.session + .execute_single_page(&prepared, values, paging_state) + .await + } + + /// Does the same thing as [`Session::batch`] but uses the prepared statement cache\ + /// Prepares batch using CachingSession::prepare_batch if needed and then executes it + pub async fn batch( + &self, + batch: &Batch, + values: impl BatchValues, + ) -> Result { + let all_prepared: bool = batch + .statements + .iter() + .all(|stmt| matches!(stmt, BatchStatement::PreparedStatement(_))); + + if all_prepared { + self.session.batch(batch, &values).await + } else { + let prepared_batch: Batch = self.prepare_batch(batch).await?; + + self.session.batch(&prepared_batch, &values).await + } + } +} + +impl GenericCachingSession +where + S: BuildHasher + Clone, +{ /// Does the same thing as [`Session::execute_unpaged`] but uses the prepared statement cache pub async fn execute_unpaged( &self, @@ -126,7 +204,13 @@ where self.session.batch(&prepared_batch, &values).await } } +} +impl GenericCachingSession +where + S: BuildHasher + Clone, + DeserApi: DeserializationApiKind, +{ /// Prepares all statements within the batch and returns a new batch where every /// statement is prepared. /// Uses the prepared statements cache. @@ -212,7 +296,7 @@ where self.max_capacity } - pub fn get_session(&self) -> &LegacySession { + pub fn get_session(&self) -> &GenericSession { &self.session } } @@ -229,7 +313,7 @@ mod tests { use crate::{ batch::{Batch, BatchStatement}, prepared_statement::PreparedStatement, - CachingSession, LegacySession, + LegacyCachingSession, LegacySession, }; use futures::TryStreamExt; use std::collections::BTreeSet; @@ -273,8 +357,8 @@ mod tests { session } - async fn create_caching_session() -> CachingSession { - let session = CachingSession::from(new_for_test(true).await, 2); + async fn create_caching_session() -> LegacyCachingSession { + let session = LegacyCachingSession::from(new_for_test(true).await, 2); // Add a row, this makes it easier to check if the caching works combined with the regular execute fn on Session session @@ -385,7 +469,7 @@ mod tests { } async fn assert_test_batch_table_rows_contain( - sess: &CachingSession, + sess: &LegacyCachingSession, expected_rows: &[(i32, i32)], ) { let selected_rows: BTreeSet<(i32, i32)> = sess @@ -431,18 +515,18 @@ mod tests { } } - let _session: CachingSession = - CachingSession::from(new_for_test(true).await, 2); - let _session: CachingSession = - CachingSession::from(new_for_test(true).await, 2); - let _session: CachingSession = - CachingSession::with_hasher(new_for_test(true).await, 2, Default::default()); + let _session: LegacyCachingSession = + LegacyCachingSession::from(new_for_test(true).await, 2); + let _session: LegacyCachingSession = + LegacyCachingSession::from(new_for_test(true).await, 2); + let _session: LegacyCachingSession = + LegacyCachingSession::with_hasher(new_for_test(true).await, 2, Default::default()); } #[tokio::test] async fn test_batch() { setup_tracing(); - let session: CachingSession = create_caching_session().await; + let session: LegacyCachingSession = create_caching_session().await; session .execute_unpaged( @@ -565,7 +649,8 @@ mod tests { #[tokio::test] async fn test_parameters_caching() { setup_tracing(); - let session: CachingSession = CachingSession::from(new_for_test(true).await, 100); + let session: LegacyCachingSession = + LegacyCachingSession::from(new_for_test(true).await, 100); session .execute_unpaged("CREATE TABLE tbl (a int PRIMARY KEY, b int)", ()) @@ -618,7 +703,8 @@ mod tests { } // This test uses CDC which is not yet compatible with Scylla's tablets. - let session: CachingSession = CachingSession::from(new_for_test(false).await, 100); + let session: LegacyCachingSession = + LegacyCachingSession::from(new_for_test(false).await, 100); session .execute_unpaged( diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 8923c85fb..1dddb7d40 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -19,8 +19,8 @@ use crate::transport::topology::{ use crate::utils::test_utils::{ create_new_session_builder, supports_feature, unique_keyspace_name, }; -use crate::CachingSession; use crate::ExecutionProfile; +use crate::LegacyCachingSession; use crate::LegacyQueryResult; use crate::{LegacySession, SessionBuilder}; use assert_matches::assert_matches; @@ -2012,7 +2012,7 @@ async fn rename(session: &LegacySession, rename_str: &str) { .unwrap(); } -async fn rename_caching(session: &CachingSession, rename_str: &str) { +async fn rename_caching(session: &LegacyCachingSession, rename_str: &str) { session .execute_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), &()) .await @@ -2230,7 +2230,7 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - let caching_session: CachingSession = CachingSession::from(session, 64); + let caching_session: LegacyCachingSession = LegacyCachingSession::from(session, 64); caching_session .execute_unpaged( From 001b5bbcfccfd18666225ae1df27483de2d423da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Wed, 6 Nov 2024 18:18:07 +0100 Subject: [PATCH 14/25] caching_session: fix docstring references After Session was made generic wrt deserialization API, the references got broken. --- scylla/src/transport/caching_session.rs | 33 ++++++++++++++++--------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index efb8737c2..841c4e7b9 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -71,8 +71,8 @@ where S: BuildHasher + Clone, DeserApi: DeserializationApiKind, { - /// Builds a [`CachingSession`] from a [`Session`], a cache size, and a [`BuildHasher`]., - /// using a customer hasher. + /// Builds a [`CachingSession`] from a [`Session`](GenericSession), a cache size, + /// and a [`BuildHasher`], using a customer hasher. pub fn with_hasher(session: GenericSession, cache_size: usize, hasher: S) -> Self { Self { session, @@ -86,7 +86,8 @@ impl GenericCachingSession where S: BuildHasher + Clone, { - /// Does the same thing as [`Session::execute_unpaged`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_unpaged`](GenericSession::execute_unpaged) + /// but uses the prepared statement cache. pub async fn execute_unpaged( &self, query: impl Into, @@ -97,7 +98,8 @@ where self.session.execute_unpaged(&prepared, values).await } - /// Does the same thing as [`Session::execute_iter`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_iter`](GenericSession::execute_iter) + /// but uses the prepared statement cache. pub async fn execute_iter( &self, query: impl Into, @@ -108,7 +110,8 @@ where self.session.execute_iter(prepared, values).await } - /// Does the same thing as [`Session::execute_single_page`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_single_page`](GenericSession::execute_single_page) + /// but uses the prepared statement cache. pub async fn execute_single_page( &self, query: impl Into, @@ -122,8 +125,10 @@ where .await } - /// Does the same thing as [`Session::batch`] but uses the prepared statement cache\ - /// Prepares batch using CachingSession::prepare_batch if needed and then executes it + /// Does the same thing as [`Session::batch`](GenericSession::batch) but uses the + /// prepared statement cache.\ + /// Prepares batch using [`CachingSession::prepare_batch`](GenericCachingSession::prepare_batch) + /// if needed and then executes it. pub async fn batch( &self, batch: &Batch, @@ -148,7 +153,8 @@ impl GenericCachingSession where S: BuildHasher + Clone, { - /// Does the same thing as [`Session::execute_unpaged`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_unpaged`](GenericSession::execute_unpaged) + /// but uses the prepared statement cache. pub async fn execute_unpaged( &self, query: impl Into, @@ -159,7 +165,8 @@ where self.session.execute_unpaged(&prepared, values).await } - /// Does the same thing as [`Session::execute_iter`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_iter`](GenericSession::execute_iter) + /// but uses the prepared statement cache. pub async fn execute_iter( &self, query: impl Into, @@ -170,7 +177,8 @@ where self.session.execute_iter(prepared, values).await } - /// Does the same thing as [`Session::execute_single_page`] but uses the prepared statement cache + /// Does the same thing as [`Session::execute_single_page`](GenericSession::execute_single_page) + /// but uses the prepared statement cache. pub async fn execute_single_page( &self, query: impl Into, @@ -184,8 +192,9 @@ where .await } - /// Does the same thing as [`Session::batch`] but uses the prepared statement cache\ - /// Prepares batch using CachingSession::prepare_batch if needed and then executes it + /// Does the same thing as [`Session::batch`](GenericSession::batch) but uses + /// the prepared statement cache.\ + /// Prepares batch using CachingSession::prepare_batch if needed and then executes it. pub async fn batch( &self, batch: &Batch, From db6bee0ed21978f606aef0015928cdcea27f9442 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Tue, 14 Mar 2023 14:53:08 +0100 Subject: [PATCH 15/25] caching_session: modernize tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjusts the CachingSession tests to use the new deserialization interface. Co-authored-by: Wojciech Przytuła --- scylla/src/transport/caching_session.rs | 65 +++++++++++++++---------- scylla/src/utils/test_utils.rs | 21 +++++++- 2 files changed, 58 insertions(+), 28 deletions(-) diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index 841c4e7b9..d6e9db700 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -314,22 +314,22 @@ where mod tests { use crate::query::Query; use crate::statement::PagingState; - use crate::test_utils::{ - create_new_session_builder, scylla_supports_tablets_legacy, setup_tracing, - }; + use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; use crate::transport::partitioner::PartitionerName; + use crate::transport::session::Session; use crate::utils::test_utils::unique_keyspace_name; use crate::{ batch::{Batch, BatchStatement}, prepared_statement::PreparedStatement, - LegacyCachingSession, LegacySession, + CachingSession, }; use futures::TryStreamExt; + use scylla_cql::frame::response::result::Row; use std::collections::BTreeSet; - async fn new_for_test(with_tablet_support: bool) -> LegacySession { + async fn new_for_test(with_tablet_support: bool) -> Session { let session = create_new_session_builder() - .build_legacy() + .build() .await .expect("Could not create session"); let ks = unique_keyspace_name(); @@ -338,7 +338,7 @@ mod tests { "CREATE KEYSPACE IF NOT EXISTS {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}" ); - if !with_tablet_support && scylla_supports_tablets_legacy(&session).await { + if !with_tablet_support && scylla_supports_tablets(&session).await { create_ks += " AND TABLETS = {'enabled': false}"; } @@ -366,8 +366,8 @@ mod tests { session } - async fn create_caching_session() -> LegacyCachingSession { - let session = LegacyCachingSession::from(new_for_test(true).await, 2); + async fn create_caching_session() -> CachingSession { + let session = CachingSession::from(new_for_test(true).await, 2); // Add a row, this makes it easier to check if the caching works combined with the regular execute fn on Session session @@ -428,17 +428,20 @@ mod tests { .execute_unpaged("select * from test_table", &[]) .await .unwrap(); + let result_rows = result.into_rows_result().unwrap().unwrap(); assert_eq!(1, session.cache.len()); - assert_eq!(1, result.rows_num().unwrap()); + assert_eq!(1, result_rows.rows_num()); let result = session .execute_unpaged("select * from test_table", &[]) .await .unwrap(); + let result_rows = result.into_rows_result().unwrap().unwrap(); + assert_eq!(1, session.cache.len()); - assert_eq!(1, result.rows_num().unwrap()); + assert_eq!(1, result_rows.rows_num()); } /// Checks that caching works with execute_iter @@ -452,7 +455,10 @@ mod tests { let iter = session .execute_iter("select * from test_table", &[]) .await - .unwrap(); + .unwrap() + .rows_stream::() + .unwrap() + .into_stream(); let rows = iter.try_collect::>().await.unwrap().len(); @@ -474,18 +480,21 @@ mod tests { .unwrap(); assert_eq!(1, session.cache.len()); - assert_eq!(1, result.rows_num().unwrap()); + assert_eq!(1, result.into_rows_result().unwrap().unwrap().rows_num()); } async fn assert_test_batch_table_rows_contain( - sess: &LegacyCachingSession, + sess: &CachingSession, expected_rows: &[(i32, i32)], ) { let selected_rows: BTreeSet<(i32, i32)> = sess .execute_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() - .rows_typed::<(i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -524,18 +533,18 @@ mod tests { } } - let _session: LegacyCachingSession = - LegacyCachingSession::from(new_for_test(true).await, 2); - let _session: LegacyCachingSession = - LegacyCachingSession::from(new_for_test(true).await, 2); - let _session: LegacyCachingSession = - LegacyCachingSession::with_hasher(new_for_test(true).await, 2, Default::default()); + let _session: CachingSession = + CachingSession::from(new_for_test(true).await, 2); + let _session: CachingSession = + CachingSession::from(new_for_test(true).await, 2); + let _session: CachingSession = + CachingSession::with_hasher(new_for_test(true).await, 2, Default::default()); } #[tokio::test] async fn test_batch() { setup_tracing(); - let session: LegacyCachingSession = create_caching_session().await; + let session: CachingSession = create_caching_session().await; session .execute_unpaged( @@ -658,8 +667,7 @@ mod tests { #[tokio::test] async fn test_parameters_caching() { setup_tracing(); - let session: LegacyCachingSession = - LegacyCachingSession::from(new_for_test(true).await, 100); + let session: CachingSession = CachingSession::from(new_for_test(true).await, 100); session .execute_unpaged("CREATE TABLE tbl (a int PRIMARY KEY, b int)", ()) @@ -695,7 +703,11 @@ mod tests { .execute_unpaged("SELECT b, WRITETIME(b) FROM tbl", ()) .await .unwrap() - .rows_typed_or_empty::<(i32, i64)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i64)>() + .unwrap() .collect::, _>>() .unwrap(); @@ -712,8 +724,7 @@ mod tests { } // This test uses CDC which is not yet compatible with Scylla's tablets. - let session: LegacyCachingSession = - LegacyCachingSession::from(new_for_test(false).await, 100); + let session: CachingSession = CachingSession::from(new_for_test(false).await, 100); session .execute_unpaged( diff --git a/scylla/src/utils/test_utils.rs b/scylla/src/utils/test_utils.rs index a5c0dadab..e6286c607 100644 --- a/scylla/src/utils/test_utils.rs +++ b/scylla/src/utils/test_utils.rs @@ -1,6 +1,8 @@ +use scylla_cql::frame::response::result::Row; + #[cfg(test)] use crate::transport::session_builder::{GenericSessionBuilder, SessionBuilderKind}; -use crate::LegacySession; +use crate::{LegacySession, Session}; #[cfg(test)] use std::{num::NonZeroU32, time::Duration}; use std::{ @@ -106,6 +108,23 @@ pub async fn scylla_supports_tablets_legacy(session: &LegacySession) -> bool { result.single_row().is_ok() } +pub async fn scylla_supports_tablets(session: &Session) -> bool { + let result = session + .query_unpaged( + "select column_name from system_schema.columns where + keyspace_name = 'system_schema' + and table_name = 'scylla_keyspaces' + and column_name = 'initial_tablets'", + &[], + ) + .await + .unwrap() + .into_rows_result() + .unwrap(); + + result.map_or(false, |rows_result| rows_result.single_row::().is_ok()) +} + #[cfg(test)] pub(crate) fn setup_tracing() { let _ = tracing_subscriber::fmt::fmt() From 6d9d9712a3dfeae5bf303550d1c5e630b718d5bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 12 Mar 2024 15:53:56 +0100 Subject: [PATCH 16/25] connection: migrate query_iter to new deserialization framework The Connection::query_iter method is changed to use the new deserialization framework. All the internal uses of it in topology.rs are adjusted. Co-authored-by: Piotr Dulikowski --- scylla/src/transport/connection.rs | 15 ++-- scylla/src/transport/errors.rs | 12 +-- scylla/src/transport/topology.rs | 128 ++++++++++++++++------------- 3 files changed, 86 insertions(+), 69 deletions(-) diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 6b8cdf7cc..7f6eec8c3 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -47,7 +47,7 @@ use std::{ }; use super::errors::{ProtocolError, SchemaVersionFetchError, UseKeyspaceProtocolError}; -use super::iterator::{LegacyRowIterator, QueryPager}; +use super::iterator::QueryPager; use super::locator::tablets::{RawTablet, TabletParsingError}; use super::query_result::QueryResult; use super::session::AddressTranslator; @@ -1182,7 +1182,7 @@ impl Connection { pub(crate) async fn query_iter( self: Arc, query: Query, - ) -> Result { + ) -> Result { let consistency = query .config .determine_consistency(self.config.default_consistency); @@ -1190,7 +1190,6 @@ impl Connection { QueryPager::new_for_connection_query_iter(query, self, consistency, serial_consistency) .await - .map(QueryPager::into_legacy) } /// Executes a prepared statements and fetches its results over multiple pages, using @@ -1199,7 +1198,7 @@ impl Connection { self: Arc, prepared_statement: PreparedStatement, values: SerializedValues, - ) -> Result { + ) -> Result { let consistency = prepared_statement .config .determine_consistency(self.config.default_consistency); @@ -1213,7 +1212,6 @@ impl Connection { serial_consistency, ) .await - .map(QueryPager::into_legacy) } #[allow(dead_code)] @@ -2479,6 +2477,8 @@ mod tests { .query_iter(select_query.clone()) .await .unwrap() + .rows_stream::<(i32,)>() + .unwrap() .try_collect::>() .await .unwrap(); @@ -2503,7 +2503,8 @@ mod tests { .query_iter(select_query.clone()) .await .unwrap() - .into_typed::<(i32,)>() + .rows_stream::<(i32,)>() + .unwrap() .map(|ret| ret.unwrap().0) .collect::>() .await; @@ -2517,6 +2518,8 @@ mod tests { )) .await .unwrap() + .rows_stream::<()>() + .unwrap() .try_collect::>() .await .unwrap(); diff --git a/scylla/src/transport/errors.rs b/scylla/src/transport/errors.rs index 64f9989bb..478d03f09 100644 --- a/scylla/src/transport/errors.rs +++ b/scylla/src/transport/errors.rs @@ -25,7 +25,7 @@ use scylla_cql::{ response::CqlResponseKind, value::SerializeValuesError, }, - types::serialize::SerializationError, + types::{deserialize::TypeCheckError, serialize::SerializationError}, }; use thiserror::Error; @@ -436,7 +436,7 @@ pub enum PeersMetadataError { pub enum KeyspacesMetadataError { /// system_schema.keyspaces has invalid column type. #[error("system_schema.keyspaces has invalid column type: {0}")] - SchemaKeyspacesInvalidColumnType(FromRowError), + SchemaKeyspacesInvalidColumnType(TypeCheckError), /// Bad keyspace replication strategy. #[error("Bad keyspace <{keyspace}> replication strategy: {error}")] @@ -474,7 +474,7 @@ pub enum KeyspaceStrategyError { pub enum UdtMetadataError { /// system_schema.types has invalid column type. #[error("system_schema.types has invalid column type: {0}")] - SchemaTypesInvalidColumnType(FromRowError), + SchemaTypesInvalidColumnType(TypeCheckError), /// Circular UDT dependency detected. #[error("Detected circular dependency between user defined types - toposort is impossible!")] @@ -487,11 +487,11 @@ pub enum UdtMetadataError { pub enum TablesMetadataError { /// system_schema.tables has invalid column type. #[error("system_schema.tables has invalid column type: {0}")] - SchemaTablesInvalidColumnType(FromRowError), + SchemaTablesInvalidColumnType(TypeCheckError), /// system_schema.columns has invalid column type. #[error("system_schema.columns has invalid column type: {0}")] - SchemaColumnsInvalidColumnType(FromRowError), + SchemaColumnsInvalidColumnType(TypeCheckError), /// Unknown column kind. #[error("Unknown column kind '{column_kind}' for {keyspace_name}.{table_name}.{column_name}")] @@ -509,7 +509,7 @@ pub enum TablesMetadataError { pub enum ViewsMetadataError { /// system_schema.views has invalid column type. #[error("system_schema.views has invalid column type: {0}")] - SchemaViewsInvalidColumnType(FromRowError), + SchemaViewsInvalidColumnType(TypeCheckError), } /// Error caused by caller creating an invalid query diff --git a/scylla/src/transport/topology.rs b/scylla/src/transport/topology.rs index 93a80d2fa..0e0cc2caa 100644 --- a/scylla/src/transport/topology.rs +++ b/scylla/src/transport/topology.rs @@ -13,8 +13,10 @@ use futures::stream::{self, StreamExt, TryStreamExt}; use futures::Stream; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; -use scylla_cql::frame::response::result::Row; -use scylla_macros::FromRow; +use scylla_cql::frame::frame_errors::RowsParseError; +use scylla_cql::types::deserialize::row::DeserializeRow; +use scylla_cql::types::deserialize::TypeCheckError; +use scylla_macros::DeserializeRow; use std::borrow::BorrowMut; use std::cell::Cell; use std::collections::HashMap; @@ -765,11 +767,13 @@ async fn query_metadata( Ok(Metadata { peers, keyspaces }) } -#[derive(FromRow)] -#[scylla_crate = "scylla_cql"] +#[derive(DeserializeRow)] +#[scylla(crate = "scylla_cql")] struct NodeInfoRow { host_id: Option, + #[scylla(rename = "rpc_address")] untranslated_ip_addr: IpAddr, + #[scylla(rename = "data_center")] datacenter: Option, rack: Option, tokens: Option>, @@ -799,6 +803,13 @@ async fn query_peers(conn: &Arc, connect_port: u16) -> Result() + .map_err(RowsParseError::from)?; + Ok::<_, QueryError>(rows_stream) + }) .into_stream() .try_flatten() .and_then(|row_result| future::ok((NodeInfoSource::Peer, row_result))); @@ -809,6 +820,13 @@ async fn query_peers(conn: &Arc, connect_port: u16) -> Result() + .map_err(RowsParseError::from)?; + Ok::<_, QueryError>(rows_stream) + }) .into_stream() .try_flatten() .and_then(|row_result| future::ok((NodeInfoSource::Local, row_result))); @@ -819,9 +837,8 @@ async fn query_peers(conn: &Arc, connect_port: u16) -> Result create_peer_from_row(source, row, local_address).await, + match row_result { + Ok((source, row)) => create_peer_from_row(source, row, local_address).await, Err(err) => { warn!( "system.peers or system.local has an invalid row, skipping it: {}", @@ -905,15 +922,19 @@ async fn create_peer_from_row( })) } -fn query_filter_keyspace_name<'a>( +fn query_filter_keyspace_name<'a, R>( conn: &Arc, query_str: &'a str, keyspaces_to_fetch: &'a [String], -) -> impl Stream> + 'a { + convert_typecheck_error: impl FnOnce(TypeCheckError) -> MetadataError + 'a, +) -> impl Stream> + 'a +where + R: for<'r> DeserializeRow<'r, 'r> + 'static, +{ let conn = conn.clone(); let fut = async move { - if keyspaces_to_fetch.is_empty() { + let pager = if keyspaces_to_fetch.is_empty() { let mut query = Query::new(query_str); query.set_page_size(METADATA_QUERY_PAGE_SIZE); @@ -928,7 +949,11 @@ fn query_filter_keyspace_name<'a>( let prepared = conn.prepare(&query).await?; let serialized_values = prepared.serialize_values(&keyspaces)?; conn.execute_iter(prepared, serialized_values).await - } + }?; + + let stream: super::iterator::TypedRowStream = + pager.rows_stream::().map_err(convert_typecheck_error)?; + Ok::<_, QueryError>(stream) }; fut.into_stream().try_flatten() } @@ -938,10 +963,15 @@ async fn query_keyspaces( keyspaces_to_fetch: &[String], fetch_schema: bool, ) -> Result, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::<(String, HashMap)>( conn, "select keyspace_name, replication from system_schema.keyspaces", keyspaces_to_fetch, + |err| { + MetadataError::Keyspaces(KeyspacesMetadataError::SchemaKeyspacesInvalidColumnType( + err, + )) + }, ); let (mut all_tables, mut all_views, mut all_user_defined_types) = if fetch_schema { @@ -956,12 +986,7 @@ async fn query_keyspaces( }; rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, strategy_map) = row.into_typed::<(String, _)>().map_err(|err| { - MetadataError::Keyspaces(KeyspacesMetadataError::SchemaKeyspacesInvalidColumnType( - err, - )) - })?; + let (keyspace_name, strategy_map) = row_result?; let strategy: Strategy = strategy_from_string_map(strategy_map).map_err(|error| { MetadataError::Keyspaces(KeyspacesMetadataError::Strategy { @@ -988,8 +1013,8 @@ async fn query_keyspaces( .await } -#[derive(FromRow, Debug)] -#[scylla_crate = "crate"] +#[derive(DeserializeRow, Debug)] +#[scylla(crate = "crate")] struct UdtRow { keyspace_name: String, type_name: String, @@ -1031,21 +1056,16 @@ async fn query_user_defined_types( conn: &Arc, keyspaces_to_fetch: &[String], ) -> Result>>, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::( conn, "select keyspace_name, type_name, field_names, field_types from system_schema.types", keyspaces_to_fetch, + |err| MetadataError::Udts(UdtMetadataError::SchemaTypesInvalidColumnType(err)), ); let mut udt_rows: Vec = rows .map(|row_result| { - let row = row_result?; - let udt_row = row - .into_typed::() - .map_err(|err| { - MetadataError::Udts(UdtMetadataError::SchemaTypesInvalidColumnType(err)) - })? - .try_into()?; + let udt_row = row_result?.try_into()?; Ok::<_, QueryError>(udt_row) }) @@ -1355,21 +1375,17 @@ async fn query_tables( keyspaces_to_fetch: &[String], udts: &HashMap>>, ) -> Result>, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::<(String, String)>( conn, "SELECT keyspace_name, table_name FROM system_schema.tables", keyspaces_to_fetch, + |err| MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)), ); let mut result = HashMap::new(); let mut tables = query_tables_schema(conn, keyspaces_to_fetch, udts).await?; rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, table_name) = row.into_typed().map_err(|err| { - MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)) - })?; - - let keyspace_and_table_name = (keyspace_name, table_name); + let keyspace_and_table_name = row_result?; let table = tables.remove(&keyspace_and_table_name).unwrap_or(Table { columns: HashMap::new(), @@ -1396,20 +1412,18 @@ async fn query_views( keyspaces_to_fetch: &[String], udts: &HashMap>>, ) -> Result>, QueryError> { - let rows = query_filter_keyspace_name( + let rows = query_filter_keyspace_name::<(String, String, String)>( conn, "SELECT keyspace_name, view_name, base_table_name FROM system_schema.views", keyspaces_to_fetch, + |err| MetadataError::Views(ViewsMetadataError::SchemaViewsInvalidColumnType(err)), ); let mut result = HashMap::new(); let mut tables = query_tables_schema(conn, keyspaces_to_fetch, udts).await?; rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, view_name, base_table_name) = row.into_typed().map_err(|err| { - MetadataError::Views(ViewsMetadataError::SchemaViewsInvalidColumnType(err)) - })?; + let (keyspace_name, view_name, base_table_name) = row_result?; let keyspace_and_view_name = (keyspace_name, view_name); @@ -1447,24 +1461,18 @@ async fn query_tables_schema( // This column shouldn't be exposed to the user but is currently exposed in system tables. const THRIFT_EMPTY_TYPE: &str = "empty"; - let rows = query_filter_keyspace_name(conn, - "select keyspace_name, table_name, column_name, kind, position, type from system_schema.columns", keyspaces_to_fetch + type RowType = (String, String, String, String, i32, String); + + let rows = query_filter_keyspace_name::(conn, + "select keyspace_name, table_name, column_name, kind, position, type from system_schema.columns", keyspaces_to_fetch, |err| { + MetadataError::Tables(TablesMetadataError::SchemaColumnsInvalidColumnType(err)) + } ); let mut tables_schema = HashMap::new(); rows.map(|row_result| { - let row = row_result?; - let (keyspace_name, table_name, column_name, kind, position, type_): ( - String, - String, - String, - String, - i32, - String, - ) = row.into_typed().map_err(|err| { - MetadataError::Tables(TablesMetadataError::SchemaColumnsInvalidColumnType(err)) - })?; + let (keyspace_name, table_name, column_name, kind, position, type_) = row_result?; if type_ == THRIFT_EMPTY_TYPE { return Ok::<_, QueryError>(()); @@ -1674,15 +1682,21 @@ async fn query_table_partitioners( let rows = conn .clone() .query_iter(partitioner_query) + .map(|pager_res| { + let pager = pager_res?; + let stream = pager + .rows_stream::<(String, String, Option)>() + .map_err(|err| { + MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)) + })?; + Ok::<_, QueryError>(stream) + }) .into_stream() .try_flatten(); let result = rows .map(|row_result| { - let (keyspace_name, table_name, partitioner) = - row_result?.into_typed().map_err(|err| { - MetadataError::Tables(TablesMetadataError::SchemaTablesInvalidColumnType(err)) - })?; + let (keyspace_name, table_name, partitioner) = row_result?; Ok::<_, QueryError>(((keyspace_name, table_name), partitioner)) }) .try_collect::>() From f3aae016719a336928c883d4fee6a457e6acad77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Thu, 7 Nov 2024 18:13:53 +0100 Subject: [PATCH 17/25] topology: reduce `query_filter_keyspace_name` monomorphisation penalty `query_filter_keyspace_name` is monomorphised into 5 different functions, with considerably large body each. To reduce code bloat (and potentially have better caching by calling the same code, not distinct), the large common part is degenericised and extracted. --- scylla/src/transport/topology.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/scylla/src/transport/topology.rs b/scylla/src/transport/topology.rs index 0e0cc2caa..7f3f6e41f 100644 --- a/scylla/src/transport/topology.rs +++ b/scylla/src/transport/topology.rs @@ -5,6 +5,7 @@ use crate::transport::connection::{Connection, ConnectionConfig}; use crate::transport::connection_pool::{NodeConnectionPool, PoolConfig, PoolSize}; use crate::transport::errors::{DbError, NewSessionError, QueryError}; use crate::transport::host_filter::HostFilter; +use crate::transport::iterator::QueryPager; use crate::transport::node::resolve_contact_points; use crate::utils::parse::{ParseErrorCause, ParseResult, ParserState}; @@ -933,8 +934,15 @@ where { let conn = conn.clone(); - let fut = async move { - let pager = if keyspaces_to_fetch.is_empty() { + // This function is extracted to reduce monomorphisation penalty: + // query_filter_keyspace_name() is going to be monomorphised into 5 distinct functions, + // so it's better to extract the common part. + async fn make_keyspace_filtered_query_pager( + conn: Arc, + query_str: &str, + keyspaces_to_fetch: &[String], + ) -> Result { + if keyspaces_to_fetch.is_empty() { let mut query = Query::new(query_str); query.set_page_size(METADATA_QUERY_PAGE_SIZE); @@ -949,8 +957,11 @@ where let prepared = conn.prepare(&query).await?; let serialized_values = prepared.serialize_values(&keyspaces)?; conn.execute_iter(prepared, serialized_values).await - }?; + } + } + let fut = async move { + let pager = make_keyspace_filtered_query_pager(conn, query_str, keyspaces_to_fetch).await?; let stream: super::iterator::TypedRowStream = pager.rows_stream::().map_err(convert_typecheck_error)?; Ok::<_, QueryError>(stream) From 2b5f386d8f8fbff883673682ab2cff9f07467084 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Fri, 17 Mar 2023 13:41:49 +0100 Subject: [PATCH 18/25] {session,tracing}: switch to the new deser framework for tracing info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjusts the Session::try_getting_tracing_info method to use the new deserialization framework. Co-authored-by: Wojciech Przytuła --- scylla/src/tracing.rs | 61 ++++----------------------------- scylla/src/transport/errors.rs | 28 ++++++++++----- scylla/src/transport/session.rs | 47 ++++++++++++++----------- 3 files changed, 53 insertions(+), 83 deletions(-) diff --git a/scylla/src/tracing.rs b/scylla/src/tracing.rs index 53019e786..459eb81e4 100644 --- a/scylla/src/tracing.rs +++ b/scylla/src/tracing.rs @@ -1,15 +1,14 @@ +use crate::frame::value::CqlTimestamp; use itertools::Itertools; use scylla_cql::frame::value::CqlTimeuuid; +use scylla_macros::DeserializeRow; use std::collections::HashMap; use std::net::IpAddr; -use crate::cql_to_rust::{FromRow, FromRowError}; -use crate::frame::response::result::Row; -use crate::frame::value::CqlTimestamp; - /// Tracing info retrieved from `system_traces.sessions` /// with all events from `system_traces.events` -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, DeserializeRow, Clone, PartialEq, Eq)] +#[scylla(crate = "crate")] pub struct TracingInfo { pub client: Option, pub command: Option, @@ -20,11 +19,13 @@ pub struct TracingInfo { /// started_at is a timestamp - time since unix epoch pub started_at: Option, + #[scylla(skip)] pub events: Vec, } /// A single event happening during a traced query -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, DeserializeRow, Clone, PartialEq, Eq)] +#[scylla(crate = "crate")] pub struct TracingEvent { pub event_id: CqlTimeuuid, pub activity: Option, @@ -53,51 +54,3 @@ pub(crate) const TRACES_SESSION_QUERY_STR: &str = pub(crate) const TRACES_EVENTS_QUERY_STR: &str = "SELECT event_id, activity, source, source_elapsed, thread \ FROM system_traces.events WHERE session_id = ?"; - -// Converts a row received by performing TRACES_SESSION_QUERY_STR to TracingInfo -impl FromRow for TracingInfo { - fn from_row(row: Row) -> Result { - let (client, command, coordinator, duration, parameters, request, started_at) = - <( - Option, - Option, - Option, - Option, - Option>, - Option, - Option, - )>::from_row(row)?; - - Ok(TracingInfo { - client, - command, - coordinator, - duration, - parameters, - request, - started_at, - events: Vec::new(), - }) - } -} - -// Converts a row received by performing TRACES_SESSION_QUERY_STR to TracingInfo -impl FromRow for TracingEvent { - fn from_row(row: Row) -> Result { - let (event_id, activity, source, source_elapsed, thread) = <( - CqlTimeuuid, - Option, - Option, - Option, - Option, - )>::from_row(row)?; - - Ok(TracingEvent { - event_id, - activity, - source, - source_elapsed, - thread, - }) - } -} diff --git a/scylla/src/transport/errors.rs b/scylla/src/transport/errors.rs index 478d03f09..349d968a4 100644 --- a/scylla/src/transport/errors.rs +++ b/scylla/src/transport/errors.rs @@ -13,7 +13,6 @@ use std::{ }; use scylla_cql::{ - cql_to_rust::FromRowError, frame::{ frame_errors::{ CqlAuthChallengeParseError, CqlAuthSuccessParseError, CqlAuthenticateParseError, @@ -25,14 +24,17 @@ use scylla_cql::{ response::CqlResponseKind, value::SerializeValuesError, }, - types::{deserialize::TypeCheckError, serialize::SerializationError}, + types::{ + deserialize::{DeserializationError, TypeCheckError}, + serialize::SerializationError, + }, }; use thiserror::Error; use crate::{authentication::AuthError, frame::response}; -use super::{legacy_query_result::RowsExpectedError, query_result::SingleRowError}; +use super::query_result::SingleRowError; /// Error that occurred during query execution #[derive(Error, Debug, Clone)] @@ -360,20 +362,28 @@ pub enum SchemaVersionFetchError { #[non_exhaustive] pub enum TracingProtocolError { /// Response to system_traces.session is not RESULT:Rows. - #[error("Response to system_traces.session is not RESULT:Rows: {0}")] - TracesSessionNotRows(RowsExpectedError), + #[error("Response to system_traces.session is not RESULT:Rows")] + TracesSessionNotRows, /// system_traces.session has invalid column type. #[error("system_traces.session has invalid column type: {0}")] - TracesSessionInvalidColumnType(FromRowError), + TracesSessionInvalidColumnType(TypeCheckError), + + /// Response to system_traces.session failed to deserialize. + #[error("Response to system_traces.session failed to deserialize: {0}")] + TracesSessionDeserializationFailed(DeserializationError), /// Response to system_traces.events is not RESULT:Rows. - #[error("Response to system_traces.events is not RESULT:Rows: {0}")] - TracesEventsNotRows(RowsExpectedError), + #[error("Response to system_traces.events is not RESULT:Rows")] + TracesEventsNotRows, /// system_traces.events has invalid column type. #[error("system_traces.events has invalid column type: {0}")] - TracesEventsInvalidColumnType(FromRowError), + TracesEventsInvalidColumnType(TypeCheckError), + + /// Response to system_traces.events failed to deserialize. + #[error("Response to system_traces.events failed to deserialize: {0}")] + TracesEventsDeserializationFailed(DeserializationError), /// All tracing queries returned an empty result. #[error( diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 9e6cf17ad..b53cc282b 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -46,11 +46,12 @@ use super::connection::SslConfig; use super::errors::TracingProtocolError; use super::execution_profile::{ExecutionProfile, ExecutionProfileHandle, ExecutionProfileInner}; use super::iterator::QueryPager; -use super::legacy_query_result::MaybeFirstRowTypedError; #[cfg(feature = "cloud")] use super::node::CloudEndpoint; use super::node::{InternalKnownNode, KnownNode}; use super::partitioner::PartitionerName; +use super::query_result::MaybeFirstRowError; +use super::query_result::RowsError; use super::topology::UntranslatedPeer; use super::{NodeRef, SelfIdentity}; use crate::frame::response::result; @@ -58,7 +59,7 @@ use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::routing::{Shard, Token}; use crate::statement::{Consistency, PageSize, PagingState, PagingStateResponse}; -use crate::tracing::{TracingEvent, TracingInfo}; +use crate::tracing::TracingInfo; use crate::transport::cluster::{Cluster, ClusterData, ClusterNeatDebug}; use crate::transport::connection::{Connection, ConnectionConfig, VerifiedKeyspaceName}; use crate::transport::connection_pool::PoolConfig; @@ -1790,15 +1791,18 @@ where // Get tracing info let maybe_tracing_info: Option = traces_session_res - .into_legacy_result()? - .maybe_first_row_typed() + .into_rows_result()? + .ok_or(ProtocolError::Tracing( + TracingProtocolError::TracesSessionNotRows, + ))? + .maybe_first_row() .map_err(|err| match err { - MaybeFirstRowTypedError::RowsExpected(e) => { - ProtocolError::Tracing(TracingProtocolError::TracesSessionNotRows(e)) - } - MaybeFirstRowTypedError::FromRowError(e) => { + MaybeFirstRowError::TypeCheckFailed(e) => { ProtocolError::Tracing(TracingProtocolError::TracesSessionInvalidColumnType(e)) } + MaybeFirstRowError::DeserializationFailed(e) => ProtocolError::Tracing( + TracingProtocolError::TracesSessionDeserializationFailed(e), + ), })?; let mut tracing_info = match maybe_tracing_info { @@ -1807,20 +1811,23 @@ where }; // Get tracing events - let tracing_event_rows = traces_events_res - .into_legacy_result()? - .rows_typed() - .map_err(|err| { - ProtocolError::Tracing(TracingProtocolError::TracesEventsNotRows(err)) - })?; - - for event in tracing_event_rows { - let tracing_event: TracingEvent = event.map_err(|err| { + let tracing_event_rows_result = + traces_events_res + .into_rows_result()? + .ok_or(ProtocolError::Tracing( + TracingProtocolError::TracesEventsNotRows, + ))?; + let tracing_event_rows = tracing_event_rows_result.rows().map_err(|err| match err { + RowsError::TypeCheckFailed(err) => { ProtocolError::Tracing(TracingProtocolError::TracesEventsInvalidColumnType(err)) - })?; + } + })?; - tracing_info.events.push(tracing_event); - } + tracing_info.events = tracing_event_rows + .collect::>() + .map_err(|err| { + ProtocolError::Tracing(TracingProtocolError::TracesEventsDeserializationFailed(err)) + })?; if tracing_info.events.is_empty() { return Ok(None); From 5919cf9de6d9d580777bd6599855e2c2e1a6e6ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 12 Mar 2024 14:28:51 +0100 Subject: [PATCH 19/25] treewide: switch tests to use the new framework This is a large commit which goes over all existing tests that haven't been migrated in previous commits and adjusts them to use the new deserialization framework. There were lots of changes to be made, but they are mostly independent from each other and very simple. --- scylla/src/history.rs | 17 +- scylla/src/transport/authenticate_test.rs | 4 +- scylla/src/transport/caching_session.rs | 7 +- scylla/src/transport/connection.rs | 9 +- scylla/src/transport/cql_collections_test.rs | 29 +- scylla/src/transport/cql_types_test.rs | 252 ++++++--- scylla/src/transport/cql_value_test.rs | 51 +- .../transport/large_batch_statements_test.rs | 12 +- .../src/transport/load_balancing/default.rs | 2 +- scylla/src/transport/session_test.rs | 513 ++++++++++-------- .../transport/silent_prepare_batch_test.rs | 11 +- scylla/src/utils/test_utils.rs | 23 +- scylla/tests/integration/consistency.rs | 22 +- .../tests/integration/execution_profiles.rs | 2 +- scylla/tests/integration/lwt_optimisation.rs | 10 +- scylla/tests/integration/new_session.rs | 2 +- scylla/tests/integration/retries.rs | 14 +- scylla/tests/integration/self_identity.rs | 6 +- scylla/tests/integration/shards.rs | 6 +- .../tests/integration/silent_prepare_query.rs | 10 +- .../integration/skip_metadata_optimization.rs | 24 +- scylla/tests/integration/tablets.rs | 38 +- 22 files changed, 635 insertions(+), 429 deletions(-) diff --git a/scylla/src/history.rs b/scylla/src/history.rs index 62ee1ad4d..a055f91a3 100644 --- a/scylla/src/history.rs +++ b/scylla/src/history.rs @@ -469,8 +469,8 @@ mod tests { use crate::test_utils::create_new_session_builder; use assert_matches::assert_matches; use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; - use futures::StreamExt; - use scylla_cql::Consistency; + use futures::StreamExt as _; + use scylla_cql::{frame::response::result::Row, Consistency}; // Set a single time for all timestamps within StructuredHistory. // HistoryCollector sets the timestamp to current time which changes with each test. @@ -917,7 +917,7 @@ mod tests { #[tokio::test] async fn successful_query_history() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let mut query = Query::new("SELECT * FROM system.local"); let history_collector = Arc::new(HistoryCollector::new()); @@ -984,7 +984,7 @@ mod tests { #[tokio::test] async fn failed_query_history() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let mut query = Query::new("This isnt even CQL"); let history_collector = Arc::new(HistoryCollector::new()); @@ -1021,7 +1021,7 @@ mod tests { #[tokio::test] async fn iterator_query_history() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) @@ -1045,7 +1045,12 @@ mod tests { let history_collector = Arc::new(HistoryCollector::new()); iter_query.set_history_listener(history_collector.clone()); - let mut rows_iterator = session.query_iter(iter_query, ()).await.unwrap(); + let mut rows_iterator = session + .query_iter(iter_query, ()) + .await + .unwrap() + .rows_stream::() + .unwrap(); while let Some(_row) = rows_iterator.next().await { // Receive rows... } diff --git a/scylla/src/transport/authenticate_test.rs b/scylla/src/transport/authenticate_test.rs index 75d628ce4..78e72dea4 100644 --- a/scylla/src/transport/authenticate_test.rs +++ b/scylla/src/transport/authenticate_test.rs @@ -16,7 +16,7 @@ async fn authenticate_superuser() { let session = crate::SessionBuilder::new() .known_node(uri) .user("cassandra", "cassandra") - .build_legacy() + .build() .await .unwrap(); let ks = unique_keyspace_name(); @@ -75,7 +75,7 @@ async fn custom_authentication() { let session = crate::SessionBuilder::new() .known_node(uri) .authenticator_provider(Arc::new(CustomAuthenticatorProvider)) - .build_legacy() + .build() .await .unwrap(); let ks = unique_keyspace_name(); diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index d6e9db700..192ad6dd4 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -460,7 +460,12 @@ mod tests { .unwrap() .into_stream(); - let rows = iter.try_collect::>().await.unwrap().len(); + let rows = iter + .into_stream() + .try_collect::>() + .await + .unwrap() + .len(); assert_eq!(1, rows); assert_eq!(1, session.cache.len()); diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 7f6eec8c3..7610650c6 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -2447,7 +2447,7 @@ mod tests { // Preparation phase let session = SessionBuilder::new() .known_node_addr(addr) - .build_legacy() + .build() .await .unwrap(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); @@ -2543,7 +2543,7 @@ mod tests { // Preparation phase let session = SessionBuilder::new() .known_node_addr(addr) - .build_legacy() + .build() .await .unwrap(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks.clone()), &[]).await.unwrap(); @@ -2618,9 +2618,10 @@ mod tests { .query_unpaged("SELECT p, v FROM t") .await .unwrap() - .into_legacy_result() + .into_rows_result() .unwrap() - .rows_typed::<(i32, Vec)>() + .unwrap() + .rows::<(i32, Vec)>() .unwrap() .collect::, _>>() .unwrap(); diff --git a/scylla/src/transport/cql_collections_test.rs b/scylla/src/transport/cql_collections_test.rs index 6322e6e92..f37d28a8f 100644 --- a/scylla/src/transport/cql_collections_test.rs +++ b/scylla/src/transport/cql_collections_test.rs @@ -1,12 +1,14 @@ -use crate::cql_to_rust::FromCqlVal; +use crate::transport::session::Session; +use scylla_cql::types::deserialize::value::DeserializeValue; + +use crate::frame::response::result::CqlValue; use crate::test_utils::{create_new_session_builder, setup_tracing}; use crate::utils::test_utils::unique_keyspace_name; -use crate::{frame::response::result::CqlValue, LegacySession}; use scylla_cql::types::serialize::value::SerializeValue; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -async fn connect() -> LegacySession { - let session = create_new_session_builder().build_legacy().await.unwrap(); +async fn connect() -> Session { + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); @@ -14,7 +16,7 @@ async fn connect() -> LegacySession { session } -async fn create_table(session: &LegacySession, table_name: &str, value_type: &str) { +async fn create_table(session: &Session, table_name: &str, value_type: &str) { session .query_unpaged( format!( @@ -28,13 +30,13 @@ async fn create_table(session: &LegacySession, table_name: &str, value_type: &st } async fn insert_and_select( - session: &LegacySession, + session: &Session, table_name: &str, to_insert: &InsertT, expected: &SelectT, ) where InsertT: SerializeValue, - SelectT: FromCqlVal> + PartialEq + std::fmt::Debug, + SelectT: for<'r> DeserializeValue<'r, 'r> + PartialEq + std::fmt::Debug, { session .query_unpaged( @@ -48,7 +50,10 @@ async fn insert_and_select( .query_unpaged(format!("SELECT val FROM {} WHERE p = 0", table_name), ()) .await .unwrap() - .single_row_typed::<(SelectT,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(SelectT,)>() .unwrap() .0; @@ -58,7 +63,7 @@ async fn insert_and_select( #[tokio::test] async fn test_cql_list() { setup_tracing(); - let session: LegacySession = connect().await; + let session: Session = connect().await; let table_name: &str = "test_cql_list_tab"; create_table(&session, table_name, "list").await; @@ -91,7 +96,7 @@ async fn test_cql_list() { #[tokio::test] async fn test_cql_set() { setup_tracing(); - let session: LegacySession = connect().await; + let session: Session = connect().await; let table_name: &str = "test_cql_set_tab"; create_table(&session, table_name, "set").await; @@ -155,7 +160,7 @@ async fn test_cql_set() { #[tokio::test] async fn test_cql_map() { setup_tracing(); - let session: LegacySession = connect().await; + let session: Session = connect().await; let table_name: &str = "test_cql_map_tab"; create_table(&session, table_name, "map").await; @@ -206,7 +211,7 @@ async fn test_cql_map() { #[tokio::test] async fn test_cql_tuple() { setup_tracing(); - let session: LegacySession = connect().await; + let session: Session = connect().await; let table_name: &str = "test_cql_tuple_tab"; create_table(&session, table_name, "tuple").await; diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/src/transport/cql_types_test.rs index 4be1244eb..0a1833fd7 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/src/transport/cql_types_test.rs @@ -1,17 +1,14 @@ use crate as scylla; -use crate::cql_to_rust::FromCqlVal; use crate::frame::response::result::CqlValue; use crate::frame::value::{Counter, CqlDate, CqlTime, CqlTimestamp}; -use crate::macros::FromUserType; -use crate::test_utils::{ - create_new_session_builder, scylla_supports_tablets_legacy, setup_tracing, -}; -use crate::transport::session::LegacySession; +use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; +use crate::transport::session::Session; use crate::utils::test_utils::unique_keyspace_name; use itertools::Itertools; use scylla_cql::frame::value::{CqlTimeuuid, CqlVarint}; +use scylla_cql::types::deserialize::value::DeserializeValue; use scylla_cql::types::serialize::value::SerializeValue; -use scylla_macros::SerializeValue; +use scylla_macros::{DeserializeValue, SerializeValue}; use std::cmp::PartialEq; use std::fmt::Debug; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -24,8 +21,8 @@ async fn init_test_maybe_without_tablets( table_name: &str, type_name: &str, supports_tablets: bool, -) -> LegacySession { - let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); +) -> Session { + let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); let mut create_ks = format!( @@ -34,7 +31,7 @@ async fn init_test_maybe_without_tablets( ks ); - if !supports_tablets && scylla_supports_tablets_legacy(&session).await { + if !supports_tablets && scylla_supports_tablets(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } @@ -63,7 +60,7 @@ async fn init_test_maybe_without_tablets( // Used to prepare a table for test // Creates a new keyspace // Drops and creates table {table_name} (id int PRIMARY KEY, val {type_name}) -async fn init_test(table_name: &str, type_name: &str) -> LegacySession { +async fn init_test(table_name: &str, type_name: &str) -> Session { init_test_maybe_without_tablets(table_name, type_name, true).await } @@ -77,9 +74,9 @@ async fn init_test(table_name: &str, type_name: &str) -> LegacySession { // Expected values and bound values are computed using T::from_str async fn run_tests(tests: &[&str], type_name: &str) where - T: SerializeValue + FromCqlVal + FromStr + Debug + Clone + PartialEq, + T: SerializeValue + for<'r> DeserializeValue<'r, 'r> + FromStr + Debug + Clone + PartialEq, { - let session: LegacySession = init_test(type_name, type_name).await; + let session: Session = init_test(type_name, type_name).await; session.await_schema_agreement().await.unwrap(); for test in tests.iter() { @@ -102,7 +99,10 @@ where .query_unpaged(select_values, &[]) .await .unwrap() - .rows_typed::<(T,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(T,)>() .unwrap() .map(Result::unwrap) .map(|row| row.0) @@ -170,7 +170,7 @@ async fn test_cql_varint() { ]; let table_name = "cql_varint_tests"; - let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); + let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -220,7 +220,10 @@ async fn test_cql_varint() { .execute_unpaged(&prepared_select, &[]) .await .unwrap() - .rows_typed::<(CqlVarint,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(CqlVarint,)>() .unwrap() .map(Result::unwrap) .map(|row| row.0) @@ -280,7 +283,7 @@ async fn test_counter() { // Can't use run_tests, because counters are special and can't be inserted let type_name = "counter"; - let session: LegacySession = init_test_maybe_without_tablets(type_name, type_name, false).await; + let session: Session = init_test_maybe_without_tablets(type_name, type_name, false).await; for (i, test) in tests.iter().enumerate() { let update_bound_value = format!("UPDATE {} SET val = val + ? WHERE id = ?", type_name); @@ -295,7 +298,10 @@ async fn test_counter() { .query_unpaged(select_values, (i as i32,)) .await .unwrap() - .rows_typed::<(Counter,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(Counter,)>() .unwrap() .map(Result::unwrap) .map(|row| row.0) @@ -313,7 +319,7 @@ async fn test_naive_date_04() { use chrono::Datelike; use chrono::NaiveDate; - let session: LegacySession = init_test("chrono_naive_date_tests", "date").await; + let session: Session = init_test("chrono_naive_date_tests", "date").await; let min_naive_date: NaiveDate = NaiveDate::MIN; let min_naive_date_string = min_naive_date.format("%Y-%m-%d").to_string(); @@ -371,7 +377,10 @@ async fn test_naive_date_04() { .query_unpaged("SELECT val from chrono_naive_date_tests", &[]) .await .unwrap() - .rows_typed::<(NaiveDate,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(NaiveDate,)>() .unwrap() .next() .unwrap() @@ -394,7 +403,10 @@ async fn test_naive_date_04() { .query_unpaged("SELECT val from chrono_naive_date_tests", &[]) .await .unwrap() - .single_row_typed::<(NaiveDate,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(NaiveDate,)>() .unwrap(); assert_eq!(read_date, *naive_date); } @@ -406,7 +418,7 @@ async fn test_cql_date() { setup_tracing(); // Tests value::Date which allows to insert dates outside NaiveDate range - let session: LegacySession = init_test("cql_date_tests", "date").await; + let session: Session = init_test("cql_date_tests", "date").await; let tests = [ ("1970-01-01", CqlDate(2_u32.pow(31))), @@ -429,15 +441,14 @@ async fn test_cql_date() { .await .unwrap(); - let read_date: CqlDate = session + let (read_date,): (CqlDate,) = session .query_unpaged("SELECT val from cql_date_tests", &[]) .await .unwrap() - .rows - .unwrap()[0] - .columns[0] - .as_ref() - .map(|cql_val| cql_val.as_cql_date().unwrap()) + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlDate,)>() .unwrap(); assert_eq!(read_date, *date); @@ -467,7 +478,7 @@ async fn test_date_03() { setup_tracing(); use time::{Date, Month::*}; - let session: LegacySession = init_test("time_date_tests", "date").await; + let session: Session = init_test("time_date_tests", "date").await; let tests = [ // Basic test values @@ -520,7 +531,10 @@ async fn test_date_03() { .query_unpaged("SELECT val from time_date_tests", &[]) .await .unwrap() - .first_row_typed::<(Date,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Date,)>() .ok() .map(|val| val.0); @@ -540,7 +554,10 @@ async fn test_date_03() { .query_unpaged("SELECT val from time_date_tests", &[]) .await .unwrap() - .first_row_typed::<(Date,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Date,)>() .unwrap(); assert_eq!(read_date, *date); } @@ -553,7 +570,7 @@ async fn test_cql_time() { // CqlTime is an i64 - nanoseconds since midnight // in range 0..=86399999999999 - let session: LegacySession = init_test("cql_time_tests", "time").await; + let session: Session = init_test("cql_time_tests", "time").await; let max_time: i64 = 24 * 60 * 60 * 1_000_000_000 - 1; assert_eq!(max_time, 86399999999999); @@ -583,7 +600,10 @@ async fn test_cql_time() { .query_unpaged("SELECT val from cql_time_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTime,)>() .unwrap(); assert_eq!(read_time, *time_duration); @@ -601,7 +621,10 @@ async fn test_cql_time() { .query_unpaged("SELECT val from cql_time_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTime,)>() .unwrap(); assert_eq!(read_time, *time_duration); @@ -679,7 +702,10 @@ async fn test_naive_time_04() { .query_unpaged("SELECT val from chrono_time_tests", &[]) .await .unwrap() - .first_row_typed::<(NaiveTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(NaiveTime,)>() .unwrap(); assert_eq!(read_time, *time); @@ -697,7 +723,10 @@ async fn test_naive_time_04() { .query_unpaged("SELECT val from chrono_time_tests", &[]) .await .unwrap() - .first_row_typed::<(NaiveTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(NaiveTime,)>() .unwrap(); assert_eq!(read_time, *time); } @@ -759,7 +788,10 @@ async fn test_time_03() { .query_unpaged("SELECT val from time_time_tests", &[]) .await .unwrap() - .first_row_typed::<(Time,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Time,)>() .unwrap(); assert_eq!(read_time, *time); @@ -777,7 +809,10 @@ async fn test_time_03() { .query_unpaged("SELECT val from time_time_tests", &[]) .await .unwrap() - .first_row_typed::<(Time,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(Time,)>() .unwrap(); assert_eq!(read_time, *time); } @@ -786,7 +821,7 @@ async fn test_time_03() { #[tokio::test] async fn test_cql_timestamp() { setup_tracing(); - let session: LegacySession = init_test("cql_timestamp_tests", "timestamp").await; + let session: Session = init_test("cql_timestamp_tests", "timestamp").await; //let epoch_date = NaiveDate::from_ymd_opt(1970, 1, 1).unwrap(); @@ -830,7 +865,10 @@ async fn test_cql_timestamp() { .query_unpaged("SELECT val from cql_timestamp_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimestamp,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimestamp,)>() .unwrap(); assert_eq!(read_timestamp, *timestamp_duration); @@ -848,7 +886,10 @@ async fn test_cql_timestamp() { .query_unpaged("SELECT val from cql_timestamp_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimestamp,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimestamp,)>() .unwrap(); assert_eq!(read_timestamp, *timestamp_duration); @@ -925,7 +966,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); @@ -943,7 +987,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); } @@ -971,7 +1018,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_1st_half_rounded); @@ -997,7 +1047,10 @@ async fn test_date_time_04() { .query_unpaged("SELECT val from chrono_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(DateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(DateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_2nd_half_rounded); @@ -1086,7 +1139,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); @@ -1104,7 +1160,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, *datetime); } @@ -1132,7 +1191,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_1st_half_rounded); @@ -1158,7 +1220,10 @@ async fn test_offset_date_time_03() { .query_unpaged("SELECT val from time_datetime_tests", &[]) .await .unwrap() - .first_row_typed::<(OffsetDateTime,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(OffsetDateTime,)>() .unwrap(); assert_eq!(read_datetime, nanosecond_precision_2nd_half_rounded); } @@ -1166,7 +1231,7 @@ async fn test_offset_date_time_03() { #[tokio::test] async fn test_timeuuid() { setup_tracing(); - let session: LegacySession = init_test("timeuuid_tests", "timeuuid").await; + let session: Session = init_test("timeuuid_tests", "timeuuid").await; // A few random timeuuids generated manually let tests = [ @@ -1207,7 +1272,10 @@ async fn test_timeuuid() { .query_unpaged("SELECT val from timeuuid_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimeuuid,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimeuuid,)>() .unwrap(); assert_eq!(read_timeuuid.as_bytes(), timeuuid_bytes); @@ -1226,7 +1294,10 @@ async fn test_timeuuid() { .query_unpaged("SELECT val from timeuuid_tests", &[]) .await .unwrap() - .single_row_typed::<(CqlTimeuuid,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(CqlTimeuuid,)>() .unwrap(); assert_eq!(read_timeuuid.as_bytes(), timeuuid_bytes); @@ -1236,7 +1307,7 @@ async fn test_timeuuid() { #[tokio::test] async fn test_timeuuid_ordering() { setup_tracing(); - let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); + let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1295,7 +1366,10 @@ async fn test_timeuuid_ordering() { .query_unpaged("SELECT t FROM tab WHERE p = 0", ()) .await .unwrap() - .rows_typed::<(CqlTimeuuid,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(CqlTimeuuid,)>() .unwrap() .map(|r| r.unwrap().0) .collect(); @@ -1318,7 +1392,7 @@ async fn test_timeuuid_ordering() { #[tokio::test] async fn test_inet() { setup_tracing(); - let session: LegacySession = init_test("inet_tests", "inet").await; + let session: Session = init_test("inet_tests", "inet").await; let tests = [ ("0.0.0.0", IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), @@ -1374,7 +1448,10 @@ async fn test_inet() { .query_unpaged("SELECT val from inet_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(IpAddr,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(IpAddr,)>() .unwrap(); assert_eq!(read_inet, *inet); @@ -1389,7 +1466,10 @@ async fn test_inet() { .query_unpaged("SELECT val from inet_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(IpAddr,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(IpAddr,)>() .unwrap(); assert_eq!(read_inet, *inet); @@ -1399,7 +1479,7 @@ async fn test_inet() { #[tokio::test] async fn test_blob() { setup_tracing(); - let session: LegacySession = init_test("blob_tests", "blob").await; + let session: Session = init_test("blob_tests", "blob").await; let long_blob: Vec = vec![0x11; 1234]; let mut long_blob_str: String = "0x".to_string(); @@ -1440,7 +1520,10 @@ async fn test_blob() { .query_unpaged("SELECT val from blob_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(Vec,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(Vec,)>() .unwrap(); assert_eq!(read_blob, *blob); @@ -1455,7 +1538,10 @@ async fn test_blob() { .query_unpaged("SELECT val from blob_tests WHERE id = 0", &[]) .await .unwrap() - .single_row_typed::<(Vec,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(Vec,)>() .unwrap(); assert_eq!(read_blob, *blob); @@ -1468,7 +1554,7 @@ async fn test_udt_after_schema_update() { let table_name = "udt_tests"; let type_name = "usertype1"; - let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); + let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1516,7 +1602,7 @@ async fn test_udt_after_schema_update() { .await .unwrap(); - #[derive(SerializeValue, FromUserType, Debug, PartialEq)] + #[derive(SerializeValue, DeserializeValue, Debug, PartialEq)] #[scylla(crate = crate)] struct UdtV1 { first: i32, @@ -1543,7 +1629,10 @@ async fn test_udt_after_schema_update() { .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() - .single_row_typed::<(UdtV1,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(UdtV1,)>() .unwrap(); assert_eq!(read_udt, v1); @@ -1560,7 +1649,10 @@ async fn test_udt_after_schema_update() { .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() - .single_row_typed::<(UdtV1,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(UdtV1,)>() .unwrap(); assert_eq!(read_udt, v1); @@ -1570,7 +1662,7 @@ async fn test_udt_after_schema_update() { .await .unwrap(); - #[derive(FromUserType, Debug, PartialEq)] + #[derive(DeserializeValue, Debug, PartialEq)] struct UdtV2 { first: i32, second: bool, @@ -1581,7 +1673,10 @@ async fn test_udt_after_schema_update() { .query_unpaged(format!("SELECT val from {} WHERE id = 0", table_name), &[]) .await .unwrap() - .single_row_typed::<(UdtV2,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(UdtV2,)>() .unwrap(); assert_eq!( @@ -1597,7 +1692,7 @@ async fn test_udt_after_schema_update() { #[tokio::test] async fn test_empty() { setup_tracing(); - let session: LegacySession = init_test("empty_tests", "int").await; + let session: Session = init_test("empty_tests", "int").await; session .query_unpaged( @@ -1611,7 +1706,10 @@ async fn test_empty() { .query_unpaged("SELECT val FROM empty_tests WHERE id = 0", ()) .await .unwrap() - .first_row_typed::<(CqlValue,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(CqlValue,)>() .unwrap(); assert_eq!(empty, CqlValue::Empty); @@ -1628,7 +1726,10 @@ async fn test_empty() { .query_unpaged("SELECT val FROM empty_tests WHERE id = 1", ()) .await .unwrap() - .first_row_typed::<(CqlValue,)>() + .into_rows_result() + .unwrap() + .unwrap() + .first_row::<(CqlValue,)>() .unwrap(); assert_eq!(empty, CqlValue::Empty); @@ -1640,7 +1741,7 @@ async fn test_udt_with_missing_field() { let table_name = "udt_tests"; let type_name = "usertype1"; - let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); + let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1691,14 +1792,14 @@ async fn test_udt_with_missing_field() { let mut id = 0; async fn verify_insert_select_identity( - session: &LegacySession, + session: &Session, table_name: &str, id: i32, element: TQ, expected: TR, ) where TQ: SerializeValue, - TR: FromCqlVal + PartialEq + Debug, + TR: for<'r> DeserializeValue<'r, 'r> + PartialEq + Debug, { session .query_unpaged( @@ -1714,13 +1815,16 @@ async fn test_udt_with_missing_field() { ) .await .unwrap() - .single_row_typed::<(TR,)>() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(TR,)>() .unwrap() .0; assert_eq!(expected, result); } - #[derive(FromUserType, Debug, PartialEq)] + #[derive(DeserializeValue, Debug, PartialEq)] struct UdtFull { first: i32, second: bool, diff --git a/scylla/src/transport/cql_value_test.rs b/scylla/src/transport/cql_value_test.rs index d3d2d18e4..c5c2eedd5 100644 --- a/scylla/src/transport/cql_value_test.rs +++ b/scylla/src/transport/cql_value_test.rs @@ -1,13 +1,16 @@ -use crate::frame::{response::result::CqlValue, value::CqlDuration}; +use assert_matches::assert_matches; + +use crate::frame::response::result::CqlValue; +use crate::frame::value::CqlDuration; use crate::test_utils::{create_new_session_builder, setup_tracing}; use crate::utils::test_utils::unique_keyspace_name; -use crate::LegacySession; +use crate::Session; #[tokio::test] async fn test_cqlvalue_udt() { setup_tracing(); - let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); + let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session .query_unpaged( @@ -54,25 +57,23 @@ async fn test_cqlvalue_udt() { .await .unwrap(); - let rows = session + let rows_result = session .query_unpaged("SELECT my FROM cqlvalue_udt_test", &[]) .await .unwrap() - .rows + .into_rows_result() + .unwrap() .unwrap(); - assert_eq!(rows.len(), 1); - assert_eq!(rows[0].columns.len(), 1); - - let received_udt_cql_value = rows[0].columns[0].as_ref().unwrap(); + let (received_udt_cql_value,) = rows_result.single_row::<(CqlValue,)>().unwrap(); - assert_eq!(received_udt_cql_value, &udt_cql_value); + assert_eq!(received_udt_cql_value, udt_cql_value); } #[tokio::test] async fn test_cqlvalue_duration() { setup_tracing(); - let session: LegacySession = create_new_session_builder().build_legacy().await.unwrap(); + let session: Session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -106,45 +107,51 @@ async fn test_cqlvalue_duration() { session.query_unpaged(query.0, query.1).await.unwrap(); } - let rows = session + let rows_result = session .query_unpaged( "SELECT v FROM cqlvalue_duration_test WHERE pk = ?", (CqlValue::Int(0),), ) .await .unwrap() - .rows + .into_rows_result() + .unwrap() .unwrap(); - assert_eq!(rows.len(), 4); - assert_eq!(rows[0].columns.len(), 1); + let mut rows_iter = rows_result.rows::<(CqlValue,)>().unwrap(); - assert_eq!(rows[0].columns[0].as_ref().unwrap(), &duration_cql_value); + let (first_value,) = rows_iter.next().unwrap().unwrap(); + assert_eq!(first_value, duration_cql_value); + let (second_value,) = rows_iter.next().unwrap().unwrap(); assert_eq!( - rows[1].columns[0].as_ref().unwrap(), - &CqlValue::Duration(CqlDuration { + second_value, + CqlValue::Duration(CqlDuration { months: 0, days: 0, nanoseconds: 320_688_000_000_000, }) ); + let (third_value,) = rows_iter.next().unwrap().unwrap(); assert_eq!( - rows[2].columns[0].as_ref().unwrap(), - &CqlValue::Duration(CqlDuration { + third_value, + CqlValue::Duration(CqlDuration { months: 0, days: 0, nanoseconds: 320_933_000_000_000, }) ); + let (fourth_value,) = rows_iter.next().unwrap().unwrap(); assert_eq!( - rows[3].columns[0].as_ref().unwrap(), - &CqlValue::Duration(CqlDuration { + fourth_value, + CqlValue::Duration(CqlDuration { months: 0, days: 0, nanoseconds: 320_949_000_000_000, }) ); + + assert_matches!(rows_iter.next(), None); } diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/src/transport/large_batch_statements_test.rs index 0e250fc7d..7e8fc482c 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/src/transport/large_batch_statements_test.rs @@ -7,13 +7,13 @@ use crate::transport::errors::{BadQuery, QueryError}; use crate::{ batch::Batch, test_utils::{create_new_session_builder, unique_keyspace_name}, - LegacyQueryResult, LegacySession, + QueryResult, Session, }; #[tokio::test] async fn test_large_batch_statements() { setup_tracing(); - let mut session = create_new_session_builder().build_legacy().await.unwrap(); + let mut session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session = create_test_session(session, &ks).await; @@ -31,7 +31,7 @@ async fn test_large_batch_statements() { ) } -async fn create_test_session(session: LegacySession, ks: &String) -> LegacySession { +async fn create_test_session(session: Session, ks: &String) -> Session { session .query_unpaged( format!("CREATE KEYSPACE {} WITH REPLICATION = {{ 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1 }}",ks), @@ -51,11 +51,7 @@ async fn create_test_session(session: LegacySession, ks: &String) -> LegacySessi session } -async fn write_batch( - session: &LegacySession, - n: usize, - ks: &String, -) -> Result { +async fn write_batch(session: &Session, n: usize, ks: &String) -> Result { let mut batch_query = Batch::new(BatchType::Unlogged); let mut batch_values = Vec::new(); let query = format!("INSERT INTO {}.pairs (dummy, k, v) VALUES (0, ?, ?)", ks); diff --git a/scylla/src/transport/load_balancing/default.rs b/scylla/src/transport/load_balancing/default.rs index beffebb1f..51db7f97f 100644 --- a/scylla/src/transport/load_balancing/default.rs +++ b/scylla/src/transport/load_balancing/default.rs @@ -3860,7 +3860,7 @@ mod latency_awareness { let session = create_new_session_builder() .default_execution_profile_handle(handle) - .build_legacy() + .build() .await .unwrap(); diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 1dddb7d40..1d036ff8f 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -1,17 +1,16 @@ -use crate as scylla; use crate::batch::{Batch, BatchStatement}; -use crate::frame::response::result::Row; use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::retry_policy::{QueryInfo, RetryDecision, RetryPolicy, RetrySession}; use crate::routing::Token; use crate::statement::Consistency; -use crate::test_utils::{scylla_supports_tablets_legacy, setup_tracing}; +use crate::test_utils::{scylla_supports_tablets, setup_tracing}; use crate::tracing::TracingInfo; use crate::transport::errors::{BadKeyspaceName, BadQuery, DbError, QueryError}; use crate::transport::partitioner::{ calculate_token_for_partition_key, Murmur3Partitioner, Partitioner, PartitionerName, }; +use crate::transport::session::Session; use crate::transport::topology::Strategy::NetworkTopologyStrategy; use crate::transport::topology::{ CollectionType, ColumnKind, CqlType, NativeType, UserDefinedType, @@ -20,14 +19,14 @@ use crate::utils::test_utils::{ create_new_session_builder, supports_feature, unique_keyspace_name, }; use crate::ExecutionProfile; -use crate::LegacyCachingSession; -use crate::LegacyQueryResult; -use crate::{LegacySession, SessionBuilder}; +use crate::{self as scylla, QueryResult}; +use crate::{CachingSession, SessionBuilder}; use assert_matches::assert_matches; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{FutureExt, StreamExt as _, TryStreamExt}; use itertools::Itertools; use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; use scylla_cql::frame::response::result::ColumnType; +use scylla_cql::frame::response::result::Row; use scylla_cql::types::serialize::row::{SerializeRow, SerializedValues}; use scylla_cql::types::serialize::value::SerializeValue; use std::collections::BTreeSet; @@ -37,6 +36,8 @@ use std::sync::Arc; use tokio::net::TcpListener; use uuid::Uuid; +use super::query_result::QueryRowsResult; + #[tokio::test] async fn test_connection_failure() { setup_tracing(); @@ -55,10 +56,7 @@ async fn test_connection_failure() { .remote_handle(); tokio::spawn(fut); - let res = SessionBuilder::new() - .known_node_addr(addr) - .build_legacy() - .await; + let res = SessionBuilder::new().known_node_addr(addr).build().await; match res { Ok(_) => panic!("Unexpected success"), Err(err) => println!("Connection error (it was expected): {:?}", err), @@ -68,7 +66,7 @@ async fn test_connection_failure() { #[tokio::test] async fn test_unprepared_statement() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -110,42 +108,40 @@ async fn test_unprepared_statement() { .await .unwrap(); - let (a_idx, _) = query_result.get_column_spec("a").unwrap(); - let (b_idx, _) = query_result.get_column_spec("b").unwrap(); - let (c_idx, _) = query_result.get_column_spec("c").unwrap(); - assert!(query_result.get_column_spec("d").is_none()); + let rows = query_result.into_rows_result().unwrap().unwrap(); - let rs = query_result.rows.unwrap(); + let col_specs = rows.column_specs(); + assert_eq!(col_specs.get_by_name("a").unwrap().0, 0); + assert_eq!(col_specs.get_by_name("b").unwrap().0, 1); + assert_eq!(col_specs.get_by_name("c").unwrap().0, 2); + assert!(col_specs.get_by_name("d").is_none()); + + let mut results = rows + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() + .unwrap(); - let mut results: Vec<(i32, i32, &String)> = rs - .iter() - .map(|r| { - let a = r.columns[a_idx].as_ref().unwrap().as_int().unwrap(); - let b = r.columns[b_idx].as_ref().unwrap().as_int().unwrap(); - let c = r.columns[c_idx].as_ref().unwrap().as_text().unwrap(); - (a, b, c) - }) - .collect(); results.sort(); assert_eq!( results, vec![ - (1, 2, &String::from("abc")), - (1, 4, &String::from("hello")), - (7, 11, &String::from("")) + (1, 2, String::from("abc")), + (1, 4, String::from("hello")), + (7, 11, String::from("")) ] ); let query_result = session .query_iter(format!("SELECT a, b, c FROM {}.t", ks), &[]) .await .unwrap(); - let specs = query_result.get_column_specs(); + let specs = query_result.column_specs(); assert_eq!(specs.len(), 3); for (spec, name) in specs.iter().zip(["a", "b", "c"]) { assert_eq!(spec.name(), name); // Check column name. assert_eq!(spec.table_spec().ks_name(), ks); } - let mut results_from_manual_paging: Vec = vec![]; + let mut results_from_manual_paging = vec![]; let query = Query::new(format!("SELECT a, b, c FROM {}.t", ks)).with_page_size(1); let mut paging_state = PagingState::start(); let mut watchdog = 0; @@ -154,7 +150,15 @@ async fn test_unprepared_statement() { .query_single_page(query.clone(), &[], paging_state) .await .unwrap(); - results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); + let mut page_results = rs_manual + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() + .unwrap(); + results_from_manual_paging.append(&mut page_results); match paging_state_response { PagingStateResponse::HasMorePages { state } => { paging_state = state; @@ -164,13 +168,13 @@ async fn test_unprepared_statement() { } watchdog += 1; } - assert_eq!(results_from_manual_paging, rs); + assert_eq!(results_from_manual_paging, results); } #[tokio::test] async fn test_prepared_statement() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -198,7 +202,7 @@ async fn test_prepared_statement() { .await .unwrap(); let query_result = session.execute_iter(prepared_statement, &[]).await.unwrap(); - let specs = query_result.get_column_specs(); + let specs = query_result.column_specs(); assert_eq!(specs.len(), 3); for (spec, name) in specs.iter().zip(["a", "b", "c"]) { assert_eq!(spec.name(), name); // Check column name. @@ -237,7 +241,10 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT token(a) FROM {}.t2", ks), &[]) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i64,)>() .unwrap(); let token = Token::new(value); let prepared_token = Murmur3Partitioner @@ -256,7 +263,10 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT token(a,b,c) FROM {}.complex_pk", ks), &[]) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i64,)>() .unwrap(); let token = Token::new(value); let prepared_token = Murmur3Partitioner.hash_one( @@ -278,15 +288,17 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT a,b,c FROM {}.t2", ks), &[]) .await .unwrap() - .rows + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() .unwrap(); - let r = rs.first().unwrap(); - let a = r.columns[0].as_ref().unwrap().as_int().unwrap(); - let b = r.columns[1].as_ref().unwrap().as_int().unwrap(); - let c = r.columns[2].as_ref().unwrap().as_text().unwrap(); - assert_eq!((a, b, c), (17, 16, &String::from("I'm prepared!!!"))); + let r = &rs[0]; + assert_eq!(r, &(17, 16, String::from("I'm prepared!!!"))); - let mut results_from_manual_paging: Vec = vec![]; + let mut results_from_manual_paging = vec![]; let query = Query::new(format!("SELECT a, b, c FROM {}.t2", ks)).with_page_size(1); let prepared_paged = session.prepare(query).await.unwrap(); let mut paging_state = PagingState::start(); @@ -296,7 +308,15 @@ async fn test_prepared_statement() { .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); - results_from_manual_paging.append(&mut rs_manual.rows.unwrap()); + let mut page_results = rs_manual + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() + .unwrap() + .collect::, _>>() + .unwrap(); + results_from_manual_paging.append(&mut page_results); match paging_state_response { PagingStateResponse::HasMorePages { state } => { paging_state = state; @@ -313,7 +333,10 @@ async fn test_prepared_statement() { .query_unpaged(format!("SELECT a,b,c,d,e FROM {}.complex_pk", ks), &[]) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i32, i32, String, i32, Option)>() .unwrap(); assert!(e.is_none()); assert_eq!( @@ -321,9 +344,9 @@ async fn test_prepared_statement() { (17, 16, "I'm prepared!!!", 7, None) ); } - // Check that SerializeRow macro works + // Check that SerializeRow and DeserializeRow macros work { - #[derive(scylla::SerializeRow, scylla::FromRow, PartialEq, Debug, Clone)] + #[derive(scylla::SerializeRow, scylla::DeserializeRow, PartialEq, Debug, Clone)] #[scylla(crate = crate)] struct ComplexPk { a: i32, @@ -359,7 +382,10 @@ async fn test_prepared_statement() { ) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row() .unwrap(); assert_eq!(input, output) } @@ -371,13 +397,13 @@ async fn test_counter_batch() { use scylla_cql::frame::request::batch::BatchType; setup_tracing(); - let session = Arc::new(create_new_session_builder().build_legacy().await.unwrap()); + let session = Arc::new(create_new_session_builder().build().await.unwrap()); let ks = unique_keyspace_name(); // Need to disable tablets in this test because they don't support counters yet. // (https://github.com/scylladb/scylladb/commit/c70f321c6f581357afdf3fd8b4fe8e5c5bb9736e). let mut create_ks = format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } @@ -426,7 +452,7 @@ async fn test_counter_batch() { #[tokio::test] async fn test_batch() { setup_tracing(); - let session = Arc::new(create_new_session_builder().build_legacy().await.unwrap()); + let session = Arc::new(create_new_session_builder().build().await.unwrap()); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -480,7 +506,10 @@ async fn test_batch() { .query_unpaged(format!("SELECT a, b, c FROM {}.t_batch", ks), &[]) .await .unwrap() - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() .unwrap() .collect::>() .unwrap(); @@ -517,7 +546,10 @@ async fn test_batch() { ) .await .unwrap() - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() .unwrap() .collect::>() .unwrap(); @@ -528,7 +560,7 @@ async fn test_batch() { #[tokio::test] async fn test_token_calculation() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -570,7 +602,10 @@ async fn test_token_calculation() { ) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row::<(i64,)>() .unwrap(); let token = Token::new(value); let prepared_token = Murmur3Partitioner @@ -587,7 +622,7 @@ async fn test_token_calculation() { #[tokio::test] async fn test_token_awareness() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); // Need to disable tablets in this test because they make token routing @@ -596,7 +631,7 @@ async fn test_token_awareness() { let mut create_ks = format!( "CREATE KEYSPACE IF NOT EXISTS {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}" ); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } @@ -626,7 +661,7 @@ async fn test_token_awareness() { .await .unwrap(); let tracing_info = session - .get_tracing_info(res.tracing_id.as_ref().unwrap()) + .get_tracing_info(res.tracing_id().as_ref().unwrap()) .await .unwrap(); @@ -638,7 +673,7 @@ async fn test_token_awareness() { .execute_iter(prepared_statement.clone(), values) .await .unwrap(); - let tracing_id = iter.get_tracing_ids()[0]; + let tracing_id = iter.tracing_ids()[0]; let tracing_info = session.get_tracing_info(&tracing_id).await.unwrap(); // Again, verify that only one node was involved @@ -649,7 +684,7 @@ async fn test_token_awareness() { #[tokio::test] async fn test_use_keyspace() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -678,7 +713,10 @@ async fn test_use_keyspace() { .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|res| res.unwrap().0) .collect(); @@ -717,9 +755,9 @@ async fn test_use_keyspace() { )); // Make sure that use_keyspace on SessionBuiler works - let session2: LegacySession = create_new_session_builder() + let session2: Session = create_new_session_builder() .use_keyspace(ks.clone(), false) - .build_legacy() + .build() .await .unwrap(); @@ -727,7 +765,10 @@ async fn test_use_keyspace() { .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|res| res.unwrap().0) .collect(); @@ -740,7 +781,7 @@ async fn test_use_keyspace() { #[tokio::test] async fn test_use_keyspace_case_sensitivity() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks_lower = unique_keyspace_name().to_lowercase(); let ks_upper = ks_lower.to_uppercase(); @@ -787,7 +828,10 @@ async fn test_use_keyspace_case_sensitivity() { .query_unpaged("SELECT * from tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|row| row.unwrap().0) .collect(); @@ -802,7 +846,10 @@ async fn test_use_keyspace_case_sensitivity() { .query_unpaged("SELECT * from tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|row| row.unwrap().0) .collect(); @@ -813,7 +860,7 @@ async fn test_use_keyspace_case_sensitivity() { #[tokio::test] async fn test_raw_use_keyspace() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -843,7 +890,10 @@ async fn test_raw_use_keyspace() { .query_unpaged("SELECT * FROM tab", &[]) .await .unwrap() - .rows_typed::<(String,)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(String,)>() .unwrap() .map(|res| res.unwrap().0) .collect(); @@ -865,7 +915,7 @@ async fn test_raw_use_keyspace() { #[tokio::test] async fn test_fetch_system_keyspace() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let prepared_statement = session .prepare("SELECT * FROM system_schema.keyspaces") @@ -882,7 +932,7 @@ async fn test_fetch_system_keyspace() { #[tokio::test] async fn test_db_errors() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); // SyntaxError on bad query @@ -937,7 +987,7 @@ async fn test_db_errors() { #[tokio::test] async fn test_tracing() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -959,39 +1009,38 @@ async fn test_tracing() { test_tracing_batch(&session, ks.clone()).await; } -async fn test_tracing_query(session: &LegacySession, ks: String) { +async fn test_tracing_query(session: &Session, ks: String) { // A query without tracing enabled has no tracing uuid in result let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); - let untraced_query_result: LegacyQueryResult = + let untraced_query_result: QueryResult = session.query_unpaged(untraced_query, &[]).await.unwrap(); - assert!(untraced_query_result.tracing_id.is_none()); + assert!(untraced_query_result.tracing_id().is_none()); // A query with tracing enabled has a tracing uuid in result let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: LegacyQueryResult = - session.query_unpaged(traced_query, &[]).await.unwrap(); - assert!(traced_query_result.tracing_id.is_some()); + let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + assert!(traced_query_result.tracing_id().is_some()); // Querying this uuid from tracing table gives some results - assert_in_tracing_table(session, traced_query_result.tracing_id.unwrap()).await; + assert_in_tracing_table(session, traced_query_result.tracing_id().unwrap()).await; } -async fn test_tracing_execute(session: &LegacySession, ks: String) { +async fn test_tracing_execute(session: &Session, ks: String) { // Executing a prepared statement without tracing enabled has no tracing uuid in result let untraced_prepared = session .prepare(format!("SELECT * FROM {}.tab", ks)) .await .unwrap(); - let untraced_prepared_result: LegacyQueryResult = session + let untraced_prepared_result: QueryResult = session .execute_unpaged(&untraced_prepared, &[]) .await .unwrap(); - assert!(untraced_prepared_result.tracing_id.is_none()); + assert!(untraced_prepared_result.tracing_id().is_none()); // Executing a prepared statement with tracing enabled has a tracing uuid in result let mut traced_prepared = session @@ -1001,17 +1050,17 @@ async fn test_tracing_execute(session: &LegacySession, ks: String) { traced_prepared.config.tracing = true; - let traced_prepared_result: LegacyQueryResult = session + let traced_prepared_result: QueryResult = session .execute_unpaged(&traced_prepared, &[]) .await .unwrap(); - assert!(traced_prepared_result.tracing_id.is_some()); + assert!(traced_prepared_result.tracing_id().is_some()); // Querying this uuid from tracing table gives some results - assert_in_tracing_table(session, traced_prepared_result.tracing_id.unwrap()).await; + assert_in_tracing_table(session, traced_prepared_result.tracing_id().unwrap()).await; } -async fn test_tracing_prepare(session: &LegacySession, ks: String) { +async fn test_tracing_prepare(session: &Session, ks: String) { // Preparing a statement without tracing enabled has no tracing uuids in result let untraced_prepared = session .prepare(format!("SELECT * FROM {}.tab", ks)) @@ -1033,14 +1082,13 @@ async fn test_tracing_prepare(session: &LegacySession, ks: String) { } } -async fn test_get_tracing_info(session: &LegacySession, ks: String) { +async fn test_get_tracing_info(session: &Session, ks: String) { // A query with tracing enabled has a tracing uuid in result let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: LegacyQueryResult = - session.query_unpaged(traced_query, &[]).await.unwrap(); - let tracing_id: Uuid = traced_query_result.tracing_id.unwrap(); + let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + let tracing_id: Uuid = traced_query_result.tracing_id().unwrap(); // Getting tracing info from session using this uuid works let tracing_info: TracingInfo = session.get_tracing_info(&tracing_id).await.unwrap(); @@ -1048,58 +1096,42 @@ async fn test_get_tracing_info(session: &LegacySession, ks: String) { assert!(!tracing_info.nodes().is_empty()); } -async fn test_tracing_query_iter(session: &LegacySession, ks: String) { +async fn test_tracing_query_iter(session: &Session, ks: String) { // A query without tracing enabled has no tracing ids let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); - let mut untraced_row_iter = session.query_iter(untraced_query, &[]).await.unwrap(); - while let Some(_row) = untraced_row_iter.next().await { - // Receive rows - } - - assert!(untraced_row_iter.get_tracing_ids().is_empty()); + let untraced_query_pager = session.query_iter(untraced_query, &[]).await.unwrap(); + assert!(untraced_query_pager.tracing_ids().is_empty()); - // The same is true for TypedRowIter - let untraced_typed_row_iter = untraced_row_iter.into_typed::<(String,)>(); - assert!(untraced_typed_row_iter.get_tracing_ids().is_empty()); + let untraced_typed_row_iter = untraced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(untraced_typed_row_iter.tracing_ids().is_empty()); // A query with tracing enabled has a tracing ids in result let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let mut traced_row_iter = session.query_iter(traced_query, &[]).await.unwrap(); - while let Some(_row) = traced_row_iter.next().await { - // Receive rows - } - - assert!(!traced_row_iter.get_tracing_ids().is_empty()); + let traced_query_pager = session.query_iter(traced_query, &[]).await.unwrap(); - // The same is true for TypedRowIter - let traced_typed_row_iter = traced_row_iter.into_typed::<(String,)>(); - assert!(!traced_typed_row_iter.get_tracing_ids().is_empty()); + let traced_typed_row_stream = traced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(!traced_typed_row_stream.tracing_ids().is_empty()); - for tracing_id in traced_typed_row_iter.get_tracing_ids() { + for tracing_id in traced_typed_row_stream.tracing_ids() { assert_in_tracing_table(session, *tracing_id).await; } } -async fn test_tracing_execute_iter(session: &LegacySession, ks: String) { +async fn test_tracing_execute_iter(session: &Session, ks: String) { // A prepared statement without tracing enabled has no tracing ids let untraced_prepared = session .prepare(format!("SELECT * FROM {}.tab", ks)) .await .unwrap(); - let mut untraced_row_iter = session.execute_iter(untraced_prepared, &[]).await.unwrap(); - while let Some(_row) = untraced_row_iter.next().await { - // Receive rows - } + let untraced_query_pager = session.execute_iter(untraced_prepared, &[]).await.unwrap(); + assert!(untraced_query_pager.tracing_ids().is_empty()); - assert!(untraced_row_iter.get_tracing_ids().is_empty()); - - // The same is true for TypedRowIter - let untraced_typed_row_iter = untraced_row_iter.into_typed::<(String,)>(); - assert!(untraced_typed_row_iter.get_tracing_ids().is_empty()); + let untraced_typed_row_stream = untraced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(untraced_typed_row_stream.tracing_ids().is_empty()); // A prepared statement with tracing enabled has a tracing ids in result let mut traced_prepared = session @@ -1108,43 +1140,36 @@ async fn test_tracing_execute_iter(session: &LegacySession, ks: String) { .unwrap(); traced_prepared.config.tracing = true; - let mut traced_row_iter = session.execute_iter(traced_prepared, &[]).await.unwrap(); - while let Some(_row) = traced_row_iter.next().await { - // Receive rows - } - - assert!(!traced_row_iter.get_tracing_ids().is_empty()); + let traced_query_pager = session.execute_iter(traced_prepared, &[]).await.unwrap(); - // The same is true for TypedRowIter - let traced_typed_row_iter = traced_row_iter.into_typed::<(String,)>(); - assert!(!traced_typed_row_iter.get_tracing_ids().is_empty()); + let traced_typed_row_stream = traced_query_pager.rows_stream::<(String,)>().unwrap(); + assert!(!traced_typed_row_stream.tracing_ids().is_empty()); - for tracing_id in traced_typed_row_iter.get_tracing_ids() { + for tracing_id in traced_typed_row_stream.tracing_ids() { assert_in_tracing_table(session, *tracing_id).await; } } -async fn test_tracing_batch(session: &LegacySession, ks: String) { +async fn test_tracing_batch(session: &Session, ks: String) { // A batch without tracing enabled has no tracing id let mut untraced_batch: Batch = Default::default(); untraced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); - let untraced_batch_result: LegacyQueryResult = - session.batch(&untraced_batch, ((),)).await.unwrap(); - assert!(untraced_batch_result.tracing_id.is_none()); + let untraced_batch_result: QueryResult = session.batch(&untraced_batch, ((),)).await.unwrap(); + assert!(untraced_batch_result.tracing_id().is_none()); // Batch with tracing enabled has a tracing uuid in result let mut traced_batch: Batch = Default::default(); traced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); traced_batch.config.tracing = true; - let traced_batch_result: LegacyQueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); - assert!(traced_batch_result.tracing_id.is_some()); + let traced_batch_result: QueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); + assert!(traced_batch_result.tracing_id().is_some()); - assert_in_tracing_table(session, traced_batch_result.tracing_id.unwrap()).await; + assert_in_tracing_table(session, traced_batch_result.tracing_id().unwrap()).await; } -async fn assert_in_tracing_table(session: &LegacySession, tracing_uuid: Uuid) { +async fn assert_in_tracing_table(session: &Session, tracing_uuid: Uuid) { let mut traces_query = Query::new("SELECT * FROM system_traces.sessions WHERE session_id = ?"); traces_query.config.consistency = Some(Consistency::One); @@ -1160,9 +1185,10 @@ async fn assert_in_tracing_table(session: &LegacySession, tracing_uuid: Uuid) { .query_unpaged(traces_query.clone(), (tracing_uuid,)) .await .unwrap() - .rows_num() - .unwrap(); - + .into_rows_result() + .unwrap() + .unwrap() + .rows_num(); if rows_num > 0 { // Ok there was some row for this tracing_uuid return; @@ -1179,14 +1205,14 @@ async fn assert_in_tracing_table(session: &LegacySession, tracing_uuid: Uuid) { #[tokio::test] async fn test_await_schema_agreement() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let _schema_version = session.await_schema_agreement().await.unwrap(); } #[tokio::test] async fn test_timestamp() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -1267,14 +1293,19 @@ async fn test_timestamp() { .await .unwrap(); - let mut results = session + let query_rows_result = session .query_unpaged( format!("SELECT a, b, WRITETIME(b) FROM {}.t_timestamp", ks), &[], ) .await .unwrap() - .rows_typed::<(String, String, i64)>() + .into_rows_result() + .unwrap() + .unwrap(); + + let mut results = query_rows_result + .rows::<(&str, &str, i64)>() .unwrap() .map(Result::unwrap) .collect::>(); @@ -1286,8 +1317,7 @@ async fn test_timestamp() { ("regular query", "higher timestamp", 420), ("second query in batch", "higher timestamp", 420), ] - .iter() - .map(|(x, y, t)| (x.to_string(), y.to_string(), *t)) + .into_iter() .collect::>(); assert_eq!(results, expected_results); @@ -1305,7 +1335,7 @@ async fn test_request_timeout() { .into_handle(); { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let mut query: Query = Query::new("SELECT * FROM system_schema.tables"); query.set_request_timeout(Some(Duration::from_millis(1))); @@ -1328,7 +1358,7 @@ async fn test_request_timeout() { { let timeouting_session = create_new_session_builder() .default_execution_profile_handle(fast_timeouting_profile_handle) - .build_legacy() + .build() .await .unwrap(); @@ -1364,7 +1394,7 @@ async fn test_request_timeout() { #[tokio::test] async fn test_prepared_config() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let mut query = Query::new("SELECT * FROM system_schema.tables"); query.set_is_idempotent(true); @@ -1451,7 +1481,7 @@ fn udt_type_c_def(ks: &str) -> Arc { #[tokio::test] async fn test_schema_types_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1610,7 +1640,7 @@ async fn test_schema_types_in_metadata() { #[tokio::test] async fn test_user_defined_types_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1674,7 +1704,7 @@ async fn test_user_defined_types_in_metadata() { #[tokio::test] async fn test_column_kinds_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1720,7 +1750,7 @@ async fn test_column_kinds_in_metadata() { #[tokio::test] async fn test_primary_key_ordering_in_metadata() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1769,14 +1799,14 @@ async fn test_table_partitioner_in_metadata() { return; } - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); // This test uses CDC which is not yet compatible with Scylla's tablets. let mut create_ks = format!( "CREATE KEYSPACE {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}" ); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " AND TABLETS = {'enabled': false}"; } @@ -1815,7 +1845,7 @@ async fn test_turning_off_schema_fetching() { setup_tracing(); let session = create_new_session_builder() .fetch_schema_metadata(false) - .build_legacy() + .build() .await .unwrap(); let ks = unique_keyspace_name(); @@ -1891,7 +1921,7 @@ async fn test_turning_off_schema_fetching() { #[tokio::test] async fn test_named_bind_markers() { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session @@ -1928,7 +1958,10 @@ async fn test_named_bind_markers() { .query_unpaged("SELECT pk, ck, v FROM t", &[]) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|res| res.unwrap()) .collect(); @@ -1948,13 +1981,13 @@ async fn test_named_bind_markers() { #[tokio::test] async fn test_prepared_partitioner() { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); // This test uses CDC which is not yet compatible with Scylla's tablets. let mut create_ks = format!( "CREATE KEYSPACE IF NOT EXISTS {ks} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}"); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " AND TABLETS = {'enabled': false}" } @@ -2005,14 +2038,14 @@ async fn test_prepared_partitioner() { ); } -async fn rename(session: &LegacySession, rename_str: &str) { +async fn rename(session: &Session, rename_str: &str) { session .query_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), ()) .await .unwrap(); } -async fn rename_caching(session: &LegacyCachingSession, rename_str: &str) { +async fn rename_caching(session: &CachingSession, rename_str: &str) { session .execute_unpaged(format!("ALTER TABLE tab RENAME {}", rename_str), &()) .await @@ -2030,7 +2063,7 @@ async fn rename_caching(session: &LegacyCachingSession, rename_str: &str) { async fn test_unprepared_reprepare_in_execute() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2079,7 +2112,10 @@ async fn test_unprepared_reprepare_in_execute() { .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2091,7 +2127,7 @@ async fn test_unprepared_reprepare_in_execute() { async fn test_unusual_valuelists() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2134,7 +2170,10 @@ async fn test_unusual_valuelists() { .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() - .rows_typed::<(i32, i32, String)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, String)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2159,7 +2198,7 @@ async fn test_unusual_valuelists() { async fn test_unprepared_reprepare_in_batch() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2205,7 +2244,10 @@ async fn test_unprepared_reprepare_in_batch() { .query_unpaged("SELECT a, b, c FROM tab", ()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2224,13 +2266,13 @@ async fn test_unprepared_reprepare_in_batch() { async fn test_unprepared_reprepare_in_caching_session_execute() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(ks, false).await.unwrap(); - let caching_session: LegacyCachingSession = LegacyCachingSession::from(session, 64); + let caching_session: CachingSession = CachingSession::from(session, 64); caching_session .execute_unpaged( @@ -2272,7 +2314,10 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { .execute_unpaged("SELECT a, b, c FROM tab", &()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2284,7 +2329,7 @@ async fn test_unprepared_reprepare_in_caching_session_execute() { async fn test_views_in_schema_info() { let _ = tracing_subscriber::fmt::try_init(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2334,12 +2379,15 @@ async fn test_views_in_schema_info() { ) } -async fn assert_test_batch_table_rows_contain(sess: &LegacySession, expected_rows: &[(i32, i32)]) { +async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[(i32, i32)]) { let selected_rows: BTreeSet<(i32, i32)> = sess .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() - .rows_typed::<(i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2355,7 +2403,7 @@ async fn assert_test_batch_table_rows_contain(sess: &LegacySession, expected_row #[tokio::test] async fn test_prepare_batch() { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2452,7 +2500,7 @@ async fn test_prepare_batch() { #[tokio::test] async fn test_refresh_metadata_after_schema_agreement() { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2490,7 +2538,7 @@ async fn test_refresh_metadata_after_schema_agreement() { #[tokio::test] async fn test_rate_limit_exceeded_exception() { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); // Typed errors in RPC were introduced along with per-partition rate limiting. // There is no dedicated feature for per-partition rate limiting, so we are @@ -2538,11 +2586,11 @@ async fn test_rate_limit_exceeded_exception() { // Batches containing LWT queries (IF col = som) return rows with information whether the queries were applied. #[tokio::test] async fn test_batch_lwts() { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); let mut create_ks = format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class': 'NetworkTopologyStrategy', 'replication_factor': 1}}", ks); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, &[]).await.unwrap(); @@ -2566,33 +2614,34 @@ async fn test_batch_lwts() { batch.append_statement("INSERT INTO tab (p1, c1, r1, r2) VALUES (0, 123, 321, 312)"); batch.append_statement("UPDATE tab SET r1 = 1 WHERE p1 = 0 AND c1 = 0 IF r2 = 0"); - let batch_res: LegacyQueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); + let batch_res: QueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); + let batch_deserializer = batch_res.into_rows_result().unwrap().unwrap(); // Scylla returns 5 columns, but Cassandra returns only 1 - let is_scylla: bool = batch_res.col_specs().len() == 5; + let is_scylla: bool = batch_deserializer.column_specs().len() == 5; if is_scylla { - test_batch_lwts_for_scylla(&session, &batch, batch_res).await; + test_batch_lwts_for_scylla(&session, &batch, &batch_deserializer).await; } else { - test_batch_lwts_for_cassandra(&session, &batch, batch_res).await; + test_batch_lwts_for_cassandra(&session, &batch, &batch_deserializer).await; } } async fn test_batch_lwts_for_scylla( - session: &LegacySession, + session: &Session, batch: &Batch, - batch_res: LegacyQueryResult, + query_rows_result: &QueryRowsResult, ) { // Alias required by clippy type IntOrNull = Option; // Returned columns are: // [applied], p1, c1, r1, r2 - let batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = batch_res - .rows_typed() + let batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = query_rows_result + .rows() .unwrap() - .map(|r| r.unwrap()) - .collect(); + .collect::>() + .unwrap(); let expected_batch_res_rows = vec![ (true, Some(0), Some(0), Some(0), Some(0)), @@ -2603,12 +2652,15 @@ async fn test_batch_lwts_for_scylla( assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: LegacyQueryResult = + let prepared_batch_res: QueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); let prepared_batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = prepared_batch_res - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2623,17 +2675,17 @@ async fn test_batch_lwts_for_scylla( } async fn test_batch_lwts_for_cassandra( - session: &LegacySession, + session: &Session, batch: &Batch, - batch_res: LegacyQueryResult, + query_rows_result: &QueryRowsResult, ) { // Alias required by clippy type IntOrNull = Option; // Returned columns are: // [applied] - let batch_res_rows: Vec<(bool,)> = batch_res - .rows_typed() + let batch_res_rows: Vec<(bool,)> = query_rows_result + .rows() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2643,14 +2695,17 @@ async fn test_batch_lwts_for_cassandra( assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: LegacyQueryResult = + let prepared_batch_res: QueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); // Returned columns are: // [applied], p1, c1, r1, r2 let prepared_batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = prepared_batch_res - .rows_typed() + .into_rows_result() + .unwrap() + .unwrap() + .rows() .unwrap() .map(|r| r.unwrap()) .collect(); @@ -2665,7 +2720,7 @@ async fn test_keyspaces_to_fetch() { let ks1 = unique_keyspace_name(); let ks2 = unique_keyspace_name(); - let session_default = create_new_session_builder().build_legacy().await.unwrap(); + let session_default = create_new_session_builder().build().await.unwrap(); for ks in [&ks1, &ks2] { session_default .query_unpaged(format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]) @@ -2684,7 +2739,7 @@ async fn test_keyspaces_to_fetch() { let session1 = create_new_session_builder() .keyspaces_to_fetch([&ks1]) - .build_legacy() + .build() .await .unwrap(); assert!(session1.get_cluster_data().keyspaces.contains_key(&ks1)); @@ -2692,7 +2747,7 @@ async fn test_keyspaces_to_fetch() { let session_all = create_new_session_builder() .keyspaces_to_fetch([] as [String; 0]) - .build_legacy() + .build() .await .unwrap(); assert!(session_all.get_cluster_data().keyspaces.contains_key(&ks1)); @@ -2734,7 +2789,7 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { let session = create_new_session_builder() .default_execution_profile_handle(handle) - .build_legacy() + .build() .await .unwrap(); @@ -2742,7 +2797,7 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { let cluster_size = session.get_cluster_data().get_nodes_info().len(); let ks = unique_keyspace_name(); let mut create_ks = format!("CREATE KEYSPACE {} WITH REPLICATION = {{'class': 'NetworkTopologyStrategy', 'replication_factor': {}}}", ks, cluster_size + 1); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, ()).await.unwrap(); @@ -2755,13 +2810,15 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { assert!(!retried_flag.load(Ordering::Relaxed)); // Try to write something to the new table - it should fail and the policy // will tell us to ignore the error - let mut iter = session + let mut stream = session .query_iter("INSERT INTO t (pk v) VALUES (1, 2)", ()) .await + .unwrap() + .rows_stream::() .unwrap(); assert!(retried_flag.load(Ordering::Relaxed)); - while iter.try_next().await.unwrap().is_some() {} + while stream.try_next().await.unwrap().is_some() {} retried_flag.store(false, Ordering::Relaxed); // Try the same with execute_iter() @@ -2769,7 +2826,13 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { .prepare("INSERT INTO t (pk, v) VALUES (?, ?)") .await .unwrap(); - let mut iter = session.execute_iter(p, (1, 2)).await.unwrap(); + let mut iter = session + .execute_iter(p, (1, 2)) + .await + .unwrap() + .rows_stream::() + .unwrap() + .into_stream(); assert!(retried_flag.load(Ordering::Relaxed)); while iter.try_next().await.unwrap().is_some() {} @@ -2777,7 +2840,7 @@ async fn test_iter_works_when_retry_policy_returns_ignore_write_error() { #[tokio::test] async fn test_iter_methods_with_modification_statements() { - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -2797,19 +2860,30 @@ async fn test_iter_methods_with_modification_statements() { ks )); query.set_tracing(true); - let mut row_iterator = session.query_iter(query, &[]).await.unwrap(); - row_iterator.next().await.ok_or(()).unwrap_err(); // assert empty - assert!(!row_iterator.get_tracing_ids().is_empty()); + let mut rows_stream = session + .query_iter(query, &[]) + .await + .unwrap() + .rows_stream::() + .unwrap(); + rows_stream.next().await.ok_or(()).unwrap_err(); // assert empty + assert!(!rows_stream.tracing_ids().is_empty()); let prepared_statement = session .prepare(format!("INSERT INTO {}.t (a, b, c) VALUES (?, ?, ?)", ks)) .await .unwrap(); - let mut row_iterator = session + let query_pager = session .execute_iter(prepared_statement, (2, 3, "cba")) .await .unwrap(); - row_iterator.next().await.ok_or(()).unwrap_err(); // assert empty + query_pager + .rows_stream::<()>() + .unwrap() + .next() + .await + .ok_or(()) + .unwrap_err(); // assert empty } #[tokio::test] @@ -2818,7 +2892,7 @@ async fn test_get_keyspace_name() { // Create the keyspace // No keyspace is set in config, so get_keyspace() should return None. - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); assert_eq!(session.get_keyspace(), None); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); assert_eq!(session.get_keyspace(), None); @@ -2843,7 +2917,7 @@ async fn test_get_keyspace_name() { #[tokio::test] async fn simple_strategy_test() { let ks = unique_keyspace_name(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); session .query_unpaged( @@ -2895,7 +2969,10 @@ async fn simple_strategy_test() { .query_unpaged(format!("SELECT p, c, r FROM {}.tab", ks), ()) .await .unwrap() - .rows_typed::<(i32, i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect::>(); @@ -2908,12 +2985,12 @@ async fn simple_strategy_test() { async fn test_manual_primary_key_computation() { // Setup session let ks = unique_keyspace_name(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); session.use_keyspace(&ks, true).await.unwrap(); async fn assert_tokens_equal( - session: &LegacySession, + session: &Session, prepared: &PreparedStatement, serialized_pk_values_in_pk_order: &SerializedValues, all_values_in_query_order: impl SerializeRow, diff --git a/scylla/src/transport/silent_prepare_batch_test.rs b/scylla/src/transport/silent_prepare_batch_test.rs index c7ae8e83d..48c0dc1f1 100644 --- a/scylla/src/transport/silent_prepare_batch_test.rs +++ b/scylla/src/transport/silent_prepare_batch_test.rs @@ -2,14 +2,14 @@ use crate::{ batch::Batch, prepared_statement::PreparedStatement, test_utils::{create_new_session_builder, setup_tracing, unique_keyspace_name}, - LegacySession, + Session, }; use std::collections::BTreeSet; #[tokio::test] async fn test_quietly_prepare_batch() { setup_tracing(); - let session = create_new_session_builder().build_legacy().await.unwrap(); + let session = create_new_session_builder().build().await.unwrap(); let ks = unique_keyspace_name(); session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); @@ -91,12 +91,15 @@ async fn test_quietly_prepare_batch() { } } -async fn assert_test_batch_table_rows_contain(sess: &LegacySession, expected_rows: &[(i32, i32)]) { +async fn assert_test_batch_table_rows_contain(sess: &Session, expected_rows: &[(i32, i32)]) { let selected_rows: BTreeSet<(i32, i32)> = sess .query_unpaged("SELECT a, b FROM test_batch_table", ()) .await .unwrap() - .rows_typed::<(i32, i32)>() + .into_rows_result() + .unwrap() + .unwrap() + .rows::<(i32, i32)>() .unwrap() .map(|r| r.unwrap()) .collect(); diff --git a/scylla/src/utils/test_utils.rs b/scylla/src/utils/test_utils.rs index e6286c607..2a7a21f69 100644 --- a/scylla/src/utils/test_utils.rs +++ b/scylla/src/utils/test_utils.rs @@ -2,7 +2,7 @@ use scylla_cql::frame::response::result::Row; #[cfg(test)] use crate::transport::session_builder::{GenericSessionBuilder, SessionBuilderKind}; -use crate::{LegacySession, Session}; +use crate::Session; #[cfg(test)] use std::{num::NonZeroU32, time::Duration}; use std::{ @@ -27,7 +27,7 @@ pub fn unique_keyspace_name() -> String { } #[cfg(test)] -pub(crate) async fn supports_feature(session: &LegacySession, feature: &str) -> bool { +pub(crate) async fn supports_feature(session: &Session, feature: &str) -> bool { // Cassandra doesn't have a concept of features, so first detect // if there is the `supported_features` column in system.local @@ -48,7 +48,10 @@ pub(crate) async fn supports_feature(session: &LegacySession, feature: &str) -> .query_unpaged("SELECT supported_features FROM system.local", ()) .await .unwrap() - .single_row_typed() + .into_rows_result() + .unwrap() + .unwrap() + .single_row() .unwrap(); features @@ -94,20 +97,6 @@ pub fn create_new_session_builder() -> GenericSessionBuilder bool { - let result = session - .query_unpaged( - "select column_name from system_schema.columns where - keyspace_name = 'system_schema' - and table_name = 'scylla_keyspaces' - and column_name = 'initial_tablets'", - &[], - ) - .await - .unwrap(); - result.single_row().is_ok() -} - pub async fn scylla_supports_tablets(session: &Session) -> bool { let result = session .query_unpaged( diff --git a/scylla/tests/integration/consistency.rs b/scylla/tests/integration/consistency.rs index 5531973ec..09780066a 100644 --- a/scylla/tests/integration/consistency.rs +++ b/scylla/tests/integration/consistency.rs @@ -6,8 +6,8 @@ use scylla::prepared_statement::PreparedStatement; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::routing::{Shard, Token}; use scylla::test_utils::unique_keyspace_name; -use scylla::transport::session::LegacySession; use scylla::transport::NodeRef; +use scylla::Session; use scylla_cql::frame::response::result::TableSpec; use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; @@ -59,7 +59,7 @@ fn pairs_of_all_consistencies() -> impl Iterator, @@ -81,7 +81,7 @@ async fn query_consistency_set_directly( } async fn execute_consistency_set_directly( - session: &LegacySession, + session: &Session, prepared: &PreparedStatement, c: Consistency, sc: Option, @@ -94,7 +94,7 @@ async fn execute_consistency_set_directly( } async fn batch_consistency_set_directly( - session: &LegacySession, + session: &Session, batch: &Batch, c: Consistency, sc: Option, @@ -107,7 +107,7 @@ async fn batch_consistency_set_directly( // The following functions perform a request with consistencies set on a per-statement execution profile. async fn query_consistency_set_on_exec_profile( - session: &LegacySession, + session: &Session, query: &Query, profile: ExecutionProfileHandle, ) { @@ -118,7 +118,7 @@ async fn query_consistency_set_on_exec_profile( } async fn execute_consistency_set_on_exec_profile( - session: &LegacySession, + session: &Session, prepared: &PreparedStatement, profile: ExecutionProfileHandle, ) { @@ -129,7 +129,7 @@ async fn execute_consistency_set_on_exec_profile( } async fn batch_consistency_set_on_exec_profile( - session: &LegacySession, + session: &Session, batch: &Batch, profile: ExecutionProfileHandle, ) { @@ -159,7 +159,7 @@ async fn check_for_all_consistencies_and_setting_options< let session = session_builder .clone() .default_execution_profile_handle(base_for_every_profile.clone().build().into_handle()) - .build_legacy() + .build() .await .unwrap(); create_schema(&session, ks).await; @@ -212,7 +212,7 @@ async fn check_for_all_consistencies_and_setting_options< let session_with_consistencies = session_builder .clone() .default_execution_profile_handle(handle) - .build_legacy() + .build() .await .unwrap(); session_with_consistencies @@ -473,7 +473,7 @@ async fn consistency_allows_for_paxos_selects() { let session = SessionBuilder::new() .known_node(uri.as_str()) - .build_legacy() + .build() .await .unwrap(); diff --git a/scylla/tests/integration/execution_profiles.rs b/scylla/tests/integration/execution_profiles.rs index 46ca7c7a2..0a49bae78 100644 --- a/scylla/tests/integration/execution_profiles.rs +++ b/scylla/tests/integration/execution_profiles.rs @@ -159,7 +159,7 @@ async fn test_execution_profiles() { .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .default_execution_profile_handle(profile1.into_handle()) - .build_legacy() + .build() .await .unwrap(); let ks = unique_keyspace_name(); diff --git a/scylla/tests/integration/lwt_optimisation.rs b/scylla/tests/integration/lwt_optimisation.rs index 508c953bd..ca56cff93 100644 --- a/scylla/tests/integration/lwt_optimisation.rs +++ b/scylla/tests/integration/lwt_optimisation.rs @@ -1,8 +1,8 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; use scylla::retry_policy::FallthroughRetryPolicy; -use scylla::test_utils::scylla_supports_tablets_legacy; +use scylla::test_utils::scylla_supports_tablets; use scylla::test_utils::unique_keyspace_name; -use scylla::transport::session::LegacySession; +use scylla::transport::session::Session; use scylla::{ExecutionProfile, SessionBuilder}; use scylla_cql::frame::protocol_features::ProtocolFeatures; use scylla_cql::frame::types; @@ -52,11 +52,11 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima .into_handle(); // DB preparation phase - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .default_execution_profile_handle(handle) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); @@ -70,7 +70,7 @@ async fn if_lwt_optimisation_mark_offered_then_negotiatied_and_lwt_routed_optima // Create schema let ks = unique_keyspace_name(); let mut create_ks = format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, &[]).await.unwrap(); diff --git a/scylla/tests/integration/new_session.rs b/scylla/tests/integration/new_session.rs index d28fa1d7a..6f734f0da 100644 --- a/scylla/tests/integration/new_session.rs +++ b/scylla/tests/integration/new_session.rs @@ -16,7 +16,7 @@ async fn proceed_if_only_some_hostnames_are_invalid() { let session = SessionBuilder::new() .known_nodes([uri1, uri2, uri3]) - .build_legacy() + .build() .await .unwrap(); session diff --git a/scylla/tests/integration/retries.rs b/scylla/tests/integration/retries.rs index e8bd5477c..43cbf5807 100644 --- a/scylla/tests/integration/retries.rs +++ b/scylla/tests/integration/retries.rs @@ -1,7 +1,7 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; use scylla::retry_policy::FallthroughRetryPolicy; use scylla::speculative_execution::SimpleSpeculativeExecutionPolicy; -use scylla::transport::session::LegacySession; +use scylla::transport::session::Session; use scylla::ExecutionProfile; use scylla::SessionBuilder; use scylla::{query::Query, test_utils::unique_keyspace_name}; @@ -27,11 +27,11 @@ async fn speculative_execution_is_fired() { max_retry_count: 2, retry_interval: Duration::from_millis(10), }))).retry_policy(Arc::new(FallthroughRetryPolicy)).build(); - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .default_execution_profile_handle(simple_speculative_no_retry_profile.into_handle()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); @@ -104,10 +104,10 @@ async fn retries_occur() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); @@ -183,11 +183,11 @@ async fn speculative_execution_panic_regression_test() { .retry_policy(Arc::new(FallthroughRetryPolicy)) .build(); // DB preparation phase - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .default_execution_profile_handle(profile.into_handle()) - .build_legacy() + .build() .await .unwrap(); diff --git a/scylla/tests/integration/self_identity.rs b/scylla/tests/integration/self_identity.rs index e7378c6e2..cba46f717 100644 --- a/scylla/tests/integration/self_identity.rs +++ b/scylla/tests/integration/self_identity.rs @@ -1,5 +1,5 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::{LegacySession, SessionBuilder}; +use scylla::{Session, SessionBuilder}; use scylla_cql::frame::request::options; use scylla_cql::frame::types; use std::sync::Arc; @@ -50,11 +50,11 @@ async fn test_given_self_identity(self_identity: SelfIdentity<'static>) { )])); // DB preparation phase - let _session: LegacySession = SessionBuilder::new() + let _session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) .custom_identity(self_identity.clone()) - .build_legacy() + .build() .await .unwrap(); diff --git a/scylla/tests/integration/shards.rs b/scylla/tests/integration/shards.rs index c3db91ca2..b22cfc397 100644 --- a/scylla/tests/integration/shards.rs +++ b/scylla/tests/integration/shards.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::test_utils::scylla_supports_tablets_legacy; +use scylla::test_utils::scylla_supports_tablets; use scylla::{test_utils::unique_keyspace_name, SessionBuilder}; use tokio::sync::mpsc; @@ -32,14 +32,14 @@ async fn test_consistent_shard_awareness() { let session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); let ks = unique_keyspace_name(); /* Prepare schema */ let mut create_ks = format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3}}", ks); - if scylla_supports_tablets_legacy(&session).await { + if scylla_supports_tablets(&session).await { create_ks += " and TABLETS = { 'enabled': false}"; } session.query_unpaged(create_ks, &[]).await.unwrap(); diff --git a/scylla/tests/integration/silent_prepare_query.rs b/scylla/tests/integration/silent_prepare_query.rs index 64da40406..93950206a 100644 --- a/scylla/tests/integration/silent_prepare_query.rs +++ b/scylla/tests/integration/silent_prepare_query.rs @@ -1,5 +1,5 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::transport::session::LegacySession; +use scylla::Session; use scylla::SessionBuilder; use scylla::{query::Query, test_utils::unique_keyspace_name}; use scylla_proxy::{ @@ -19,10 +19,10 @@ async fn test_prepare_query_with_values() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); @@ -70,10 +70,10 @@ async fn test_query_with_no_values() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); diff --git a/scylla/tests/integration/skip_metadata_optimization.rs b/scylla/tests/integration/skip_metadata_optimization.rs index a50e33b8b..17f595400 100644 --- a/scylla/tests/integration/skip_metadata_optimization.rs +++ b/scylla/tests/integration/skip_metadata_optimization.rs @@ -1,7 +1,6 @@ use crate::utils::{setup_tracing, test_with_3_node_cluster}; -use scylla::transport::session::LegacySession; -use scylla::SessionBuilder; use scylla::{prepared_statement::PreparedStatement, test_utils::unique_keyspace_name}; +use scylla::{Session, SessionBuilder}; use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; use scylla_cql::frame::types; use scylla_proxy::{ @@ -20,10 +19,10 @@ async fn test_skip_result_metadata() { let res = test_with_3_node_cluster(ShardAwareness::QueryNode, |proxy_uris, translation_map, mut running_proxy| async move { // DB preparation phase - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); @@ -51,7 +50,7 @@ async fn test_skip_result_metadata() { } async fn test_with_flags_predicate( - session: &LegacySession, + session: &Session, prepared: &PreparedStatement, rx: &mut tokio::sync::mpsc::UnboundedReceiver<(ResponseFrame, Option)>, predicate: impl FnOnce(i32) -> bool @@ -114,7 +113,10 @@ async fn test_skip_result_metadata() { .query_unpaged(select_query, ()) .await .unwrap() - .rows_typed::() + .into_rows_result() + .unwrap() + .unwrap() + .rows::() .unwrap() .collect::, _>>() .unwrap(); @@ -130,8 +132,14 @@ async fn test_skip_result_metadata() { .execute_single_page(&prepared_paged, &[], paging_state) .await .unwrap(); - results_from_manual_paging - .extend(rs_manual.rows_typed::().unwrap().map(Result::unwrap)); + results_from_manual_paging.extend( + rs_manual.into_rows_result() + .unwrap() + .unwrap() + .rows::() + .unwrap() + .map(Result::unwrap) + ); match paging_state_response { PagingStateResponse::HasMorePages { state } => { diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index b570f0548..9dbb5d31a 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -16,7 +16,7 @@ use scylla::test_utils::unique_keyspace_name; use scylla::transport::ClusterData; use scylla::transport::Node; use scylla::transport::NodeRef; -use scylla::{ExecutionProfile, LegacyQueryResult, LegacySession}; +use scylla::{ExecutionProfile, QueryResult, Session}; use scylla::transport::errors::QueryError; use scylla_proxy::{ @@ -28,7 +28,7 @@ use tokio::sync::mpsc; use tracing::info; use uuid::Uuid; -#[derive(scylla::FromRow)] +#[derive(scylla::DeserializeRow)] struct SelectedTablet { last_token: i64, replicas: Vec<(Uuid, i32)>, @@ -40,7 +40,7 @@ struct Tablet { replicas: Vec<(Arc, i32)>, } -async fn get_tablets(session: &LegacySession, ks: &str, table: &str) -> Vec { +async fn get_tablets(session: &Session, ks: &str, table: &str) -> Vec { let cluster_data = session.get_cluster_data(); let endpoints = cluster_data.get_nodes_info(); for endpoint in endpoints.iter() { @@ -55,8 +55,10 @@ async fn get_tablets(session: &LegacySession, ks: &str, table: &str) -> Vec() + let mut selected_tablets: Vec = selected_tablets_response + .rows_stream::() + .unwrap() + .into_stream() .try_collect::>() .await .unwrap(); @@ -179,11 +181,11 @@ impl LoadBalancingPolicy for SingleTargetLBP { } async fn send_statement_everywhere( - session: &LegacySession, + session: &Session, cluster: &ClusterData, statement: &PreparedStatement, values: &dyn SerializeRow, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| { @@ -205,10 +207,10 @@ async fn send_statement_everywhere( } async fn send_unprepared_query_everywhere( - session: &LegacySession, + session: &Session, cluster: &ClusterData, query: &Query, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| { @@ -247,7 +249,7 @@ fn count_tablet_feedbacks( .count() } -async fn prepare_schema(session: &LegacySession, ks: &str, table: &str, tablet_count: usize) { +async fn prepare_schema(session: &Session, ks: &str, table: &str, tablet_count: usize) { session .query_unpaged( format!( @@ -294,11 +296,11 @@ async fn test_default_policy_is_tablet_aware() { let session = scylla::SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets_legacy(&session).await { + if !scylla::test_utils::scylla_supports_tablets(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } @@ -416,6 +418,8 @@ async fn test_default_policy_is_tablet_aware() { #[tokio::test] #[ntest::timeout(30000)] async fn test_tablet_feedback_not_sent_for_unprepared_queries() { + use scylla::test_utils::scylla_supports_tablets; + setup_tracing(); const TABLET_COUNT: usize = 16; @@ -425,11 +429,11 @@ async fn test_tablet_feedback_not_sent_for_unprepared_queries() { let session = scylla::SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets_legacy(&session).await { + if !scylla_supports_tablets(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } @@ -488,6 +492,8 @@ async fn test_tablet_feedback_not_sent_for_unprepared_queries() { #[ntest::timeout(30000)] #[ignore] async fn test_lwt_optimization_works_with_tablets() { + use scylla::test_utils::scylla_supports_tablets; + setup_tracing(); const TABLET_COUNT: usize = 16; @@ -497,11 +503,11 @@ async fn test_lwt_optimization_works_with_tablets() { let session = scylla::SessionBuilder::new() .known_node(proxy_uris[0].as_str()) .address_translator(Arc::new(translation_map)) - .build_legacy() + .build() .await .unwrap(); - if !scylla::test_utils::scylla_supports_tablets_legacy(&session).await { + if !scylla_supports_tablets(&session).await { tracing::warn!("Skipping test because this Scylla version doesn't support tablets"); return running_proxy; } From 9a092f91485660f8022e5a164edd6f32e7ad77a0 Mon Sep 17 00:00:00 2001 From: Piotr Dulikowski Date: Tue, 14 Mar 2023 12:04:02 +0100 Subject: [PATCH 20/25] examples: adjust to use the new interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit goes over all unadjusted examples and changes them to use the new deserialization framework. Again, it contains a lot of changes, but they are quite simple. Co-authored-by: Wojciech Przytuła --- examples/allocations.rs | 10 ++-- examples/auth.rs | 2 +- examples/basic.rs | 33 ++++++------ examples/cloud.rs | 2 +- examples/compare-tokens.rs | 8 +-- examples/cql-time-types.rs | 24 ++++----- examples/cqlsh-rs.rs | 47 +++++++++-------- examples/custom_deserialization.rs | 65 +++++++++--------------- examples/custom_load_balancing_policy.rs | 6 +-- examples/execution_profile.rs | 12 ++--- examples/get_by_name.rs | 29 +++++++---- examples/logging.rs | 4 +- examples/logging_log.rs | 5 +- examples/parallel-prepared.rs | 4 +- examples/parallel.rs | 4 +- examples/query_history.rs | 12 +++-- examples/schema_agreement.rs | 10 ++-- examples/select-paging.rs | 20 +++++--- examples/speculative-execution.rs | 6 +-- examples/tls.rs | 10 ++-- examples/tower.rs | 25 +++++---- examples/tracing.rs | 40 +++++++-------- examples/user-defined-type.rs | 12 ++--- examples/value_list.rs | 15 +++--- 24 files changed, 213 insertions(+), 192 deletions(-) diff --git a/examples/allocations.rs b/examples/allocations.rs index f87f7641f..d10ad9771 100644 --- a/examples/allocations.rs +++ b/examples/allocations.rs @@ -1,5 +1,6 @@ use anyhow::Result; -use scylla::{statement::prepared_statement::PreparedStatement, LegacySession, SessionBuilder}; +use scylla::transport::session::Session; +use scylla::{statement::prepared_statement::PreparedStatement, SessionBuilder}; use std::io::Write; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -65,7 +66,7 @@ fn print_stats(stats: &stats_alloc::Stats, reqs: f64) { } async fn measure( - session: Arc, + session: Arc, prepared: Arc, reqs: usize, parallelism: usize, @@ -128,10 +129,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", args.node); - let session: LegacySession = SessionBuilder::new() - .known_node(args.node) - .build_legacy() - .await?; + let session: Session = SessionBuilder::new().known_node(args.node).build().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/auth.rs b/examples/auth.rs index ded1115f3..22fbee007 100644 --- a/examples/auth.rs +++ b/examples/auth.rs @@ -10,7 +10,7 @@ async fn main() -> Result<()> { let session = SessionBuilder::new() .known_node(uri) .user("cassandra", "cassandra") - .build_legacy() + .build() .await .unwrap(); diff --git a/examples/basic.rs b/examples/basic.rs index ad8570db5..c4fe10b8b 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -1,7 +1,9 @@ use anyhow::Result; -use futures::TryStreamExt; -use scylla::macros::FromRow; -use scylla::transport::session::LegacySession; +use futures::StreamExt as _; +use futures::TryStreamExt as _; +use scylla::frame::response::result::Row; +use scylla::transport::session::Session; +use scylla::DeserializeRow; use scylla::SessionBuilder; use std::env; @@ -11,7 +13,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -53,23 +55,24 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.basic", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.try_next().await? { println!("a, b, c: {}, {}, {}", a, b, c); } - // Or as custom structs that derive FromRow - #[derive(Debug, FromRow)] + // Or as custom structs that derive DeserializeRow + #[allow(unused)] + #[derive(Debug, DeserializeRow)] struct RowData { - _a: i32, - _b: Option, - _c: String, + a: i32, + b: Option, + c: String, } let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.basic", &[]) .await? - .into_typed::(); + .rows_stream::()?; while let Some(row_data) = iter.try_next().await? { println!("row_data: {:?}", row_data); } @@ -77,15 +80,13 @@ async fn main() -> Result<()> { // Or simply as untyped rows let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.basic", &[]) - .await?; - while let Some(row) = iter.try_next().await? { + .await? + .rows_stream::()?; + while let Some(row) = iter.next().await.transpose()? { let a = row.columns[0].as_ref().unwrap().as_int().unwrap(); let b = row.columns[1].as_ref().unwrap().as_int().unwrap(); let c = row.columns[2].as_ref().unwrap().as_text().unwrap(); println!("a, b, c: {}, {}, {}", a, b, c); - - // Alternatively each row can be parsed individually - // let (a2, b2, c2) = row.into_typed::<(i32, i32, String)>() ?; } let metrics = session.get_metrics(); diff --git a/examples/cloud.rs b/examples/cloud.rs index 5859ef12e..63265e41f 100644 --- a/examples/cloud.rs +++ b/examples/cloud.rs @@ -12,7 +12,7 @@ async fn main() -> Result<()> { .unwrap_or("examples/config_data.yaml".to_owned()); let session = CloudSessionBuilder::new(Path::new(&config_path)) .unwrap() - .build_legacy() + .build() .await .unwrap(); diff --git a/examples/compare-tokens.rs b/examples/compare-tokens.rs index 4863608ff..5350006b9 100644 --- a/examples/compare-tokens.rs +++ b/examples/compare-tokens.rs @@ -1,7 +1,7 @@ use anyhow::Result; use scylla::routing::Token; use scylla::transport::NodeAddr; -use scylla::{LegacySession, SessionBuilder}; +use scylla::{Session, SessionBuilder}; use std::env; #[tokio::main] @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -51,7 +51,9 @@ async fn main() -> Result<()> { (pk,), ) .await? - .single_row_typed::<(i64,)>()?; + .into_rows_result()? + .expect("Got not Rows result") + .single_row()?; assert_eq!(t, qt); println!("token for {}: {}", pk, t); } diff --git a/examples/cql-time-types.rs b/examples/cql-time-types.rs index 1b9e475d4..29a66349e 100644 --- a/examples/cql-time-types.rs +++ b/examples/cql-time-types.rs @@ -3,10 +3,10 @@ use anyhow::Result; use chrono::{DateTime, NaiveDate, NaiveTime, Utc}; -use futures::{StreamExt, TryStreamExt}; +use futures::{StreamExt as _, TryStreamExt as _}; use scylla::frame::response::result::CqlValue; use scylla::frame::value::{CqlDate, CqlTime, CqlTimestamp}; -use scylla::transport::session::LegacySession; +use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; @@ -16,7 +16,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -44,7 +44,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.dates", &[]) .await? - .into_typed::<(NaiveDate,)>(); + .rows_stream::<(NaiveDate,)>()?; while let Some(row_result) = iter.next().await { let (read_date,): (NaiveDate,) = match row_result { Ok(read_date) => read_date, @@ -66,7 +66,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.dates", &[]) .await? - .into_typed::<(time::Date,)>(); + .rows_stream::<(time::Date,)>()?; while let Some(row_result) = iter.next().await { let (read_date,): (time::Date,) = match row_result { Ok(read_date) => read_date, @@ -88,7 +88,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.dates", &[]) .await? - .into_typed::<(CqlValue,)>(); + .rows_stream::<(CqlValue,)>()?; while let Some(row_result) = iter.next().await { let read_days: u32 = match row_result { Ok((CqlValue::Date(CqlDate(days)),)) => days, @@ -124,7 +124,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.times", &[]) .await? - .into_typed::<(NaiveTime,)>(); + .rows_stream::<(NaiveTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Parsed a time into chrono::NaiveTime: {:?}", read_time); } @@ -139,7 +139,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.times", &[]) .await? - .into_typed::<(time::Time,)>(); + .rows_stream::<(time::Time,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Parsed a time into time::Time: {:?}", read_time); } @@ -154,7 +154,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.times", &[]) .await? - .into_typed::<(CqlTime,)>(); + .rows_stream::<(CqlTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Read a time as raw nanos: {:?}", read_time); } @@ -185,7 +185,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.timestamps", &[]) .await? - .into_typed::<(DateTime,)>(); + .rows_stream::<(DateTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!( "Parsed a timestamp into chrono::DateTime: {:?}", @@ -206,7 +206,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.timestamps", &[]) .await? - .into_typed::<(time::OffsetDateTime,)>(); + .rows_stream::<(time::OffsetDateTime,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!( "Parsed a timestamp into time::OffsetDateTime: {:?}", @@ -227,7 +227,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT d from examples_ks.timestamps", &[]) .await? - .into_typed::<(CqlTimestamp,)>(); + .rows_stream::<(CqlTimestamp,)>()?; while let Some((read_time,)) = iter.try_next().await? { println!("Read a timestamp as raw millis: {:?}", read_time); } diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index a4371909a..ba4651963 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -3,8 +3,11 @@ use rustyline::completion::{Completer, Pair}; use rustyline::error::ReadlineError; use rustyline::{CompletionType, Config, Context, Editor}; use rustyline_derive::{Helper, Highlighter, Hinter, Validator}; +use scylla::frame::response::result::Row; +use scylla::transport::session::Session; use scylla::transport::Compression; -use scylla::{LegacyQueryResult, LegacySession, SessionBuilder}; +use scylla::QueryRowsResult; +use scylla::SessionBuilder; use std::env; #[derive(Helper, Highlighter, Validator, Hinter)] @@ -173,23 +176,24 @@ impl Completer for CqlHelper { } } -fn print_result(result: &LegacyQueryResult) { - if result.rows.is_none() { - println!("OK"); - return; - } - for row in result.rows.as_ref().unwrap() { - for column in &row.columns { - print!("|"); - print!( - " {:16}", - match column { - None => "null".to_owned(), - Some(value) => format!("{:?}", value), - } - ); +fn print_result(result: Option<&QueryRowsResult>) { + if let Some(rows_result) = result { + for row in rows_result.rows::().unwrap() { + let row = row.unwrap(); + for column in &row.columns { + print!("|"); + print!( + " {:16}", + match column { + None => "null".to_owned(), + Some(value) => format!("{:?}", value), + } + ); + } + println!("|") } - println!("|") + } else { + println!("OK"); } } @@ -199,10 +203,10 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(uri) .compression(Some(Compression::Lz4)) - .build_legacy() + .build() .await?; let config = Config::builder() @@ -222,7 +226,10 @@ async fn main() -> Result<()> { let maybe_res = session.query_unpaged(line, &[]).await; match maybe_res { Err(err) => println!("Error: {}", err), - Ok(res) => print_result(&res), + Ok(res) => { + let rows_res = res.into_rows_result()?; + print_result(rows_res.as_ref()) + } } } Err(ReadlineError::Interrupted) => continue, diff --git a/examples/custom_deserialization.rs b/examples/custom_deserialization.rs index 7bd694c81..0306ebe87 100644 --- a/examples/custom_deserialization.rs +++ b/examples/custom_deserialization.rs @@ -1,8 +1,8 @@ -use anyhow::Result; -use scylla::cql_to_rust::{FromCqlVal, FromCqlValError}; -use scylla::frame::response::result::CqlValue; -use scylla::macros::impl_from_cql_value_from_method; -use scylla::{LegacySession, SessionBuilder}; +use anyhow::{Context, Result}; +use scylla::deserialize::DeserializeValue; +use scylla::frame::response::result::ColumnType; +use scylla::transport::session::Session; +use scylla::SessionBuilder; use std::env; #[tokio::main] @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session @@ -28,53 +28,38 @@ async fn main() -> Result<()> { ) .await?; - // You can implement FromCqlVal for your own types + // You can implement DeserializeValue for your own types #[derive(PartialEq, Eq, Debug)] - struct MyType(String); + struct MyType<'a>(&'a str); - impl FromCqlVal for MyType { - fn from_cql(cql_val: CqlValue) -> Result { - Ok(Self( - cql_val.into_string().ok_or(FromCqlValError::BadCqlType)?, - )) + impl<'frame, 'metadata> DeserializeValue<'frame, 'metadata> for MyType<'frame> { + fn type_check( + typ: &scylla::frame::response::result::ColumnType, + ) -> std::result::Result<(), scylla::deserialize::TypeCheckError> { + <&str as DeserializeValue<'frame, 'metadata>>::type_check(typ) } - } - - let (v,) = session - .query_unpaged( - "SELECT v FROM examples_ks.custom_deserialization WHERE pk = 1", - (), - ) - .await? - .single_row_typed::<(MyType,)>()?; - assert_eq!(v, MyType("asdf".to_owned())); - - // If you defined an extension trait for CqlValue then you can use - // the `impl_from_cql_value_from_method` macro to turn it into - // a FromCqlValue impl - #[derive(PartialEq, Eq, Debug)] - struct MyOtherType(String); - trait CqlValueExt { - fn into_my_other_type(self) -> Option; - } + fn deserialize( + typ: &'metadata ColumnType<'metadata>, + v: Option>, + ) -> std::result::Result { + let s = <&str as DeserializeValue<'frame, 'metadata>>::deserialize(typ, v)?; - impl CqlValueExt for CqlValue { - fn into_my_other_type(self) -> Option { - Some(MyOtherType(self.into_string()?)) + Ok(Self(s)) } } - impl_from_cql_value_from_method!(MyOtherType, into_my_other_type); - - let (v,) = session + let rows_result = session .query_unpaged( "SELECT v FROM examples_ks.custom_deserialization WHERE pk = 1", (), ) .await? - .single_row_typed::<(MyOtherType,)>()?; - assert_eq!(v, MyOtherType("asdf".to_owned())); + .into_rows_result()? + .context("Expected Result:Rows response, got a different Result response.")?; + + let (v,) = rows_result.single_row::<(MyType,)>()?; + assert_eq!(v, MyType("asdf")); println!("Ok."); diff --git a/examples/custom_load_balancing_policy.rs b/examples/custom_load_balancing_policy.rs index e70ed0213..5c279f233 100644 --- a/examples/custom_load_balancing_policy.rs +++ b/examples/custom_load_balancing_policy.rs @@ -6,7 +6,7 @@ use scylla::{ load_balancing::{LoadBalancingPolicy, RoutingInfo}, routing::Shard, transport::{ClusterData, ExecutionProfile}, - LegacySession, SessionBuilder, + Session, SessionBuilder, }; use std::{env, sync::Arc}; @@ -68,10 +68,10 @@ async fn main() -> Result<()> { .load_balancing_policy(Arc::new(custom_load_balancing)) .build(); - let _session: LegacySession = SessionBuilder::new() + let _session: Session = SessionBuilder::new() .known_node(uri) .default_execution_profile_handle(profile.into_handle()) - .build_legacy() + .build() .await?; Ok(()) diff --git a/examples/execution_profile.rs b/examples/execution_profile.rs index 46ae8e03f..3562966ac 100644 --- a/examples/execution_profile.rs +++ b/examples/execution_profile.rs @@ -4,7 +4,7 @@ use scylla::query::Query; use scylla::retry_policy::{DefaultRetryPolicy, FallthroughRetryPolicy}; use scylla::speculative_execution::PercentileSpeculativeExecutionPolicy; use scylla::statement::{Consistency, SerialConsistency}; -use scylla::transport::session::LegacySession; +use scylla::transport::session::Session; use scylla::transport::ExecutionProfile; use scylla::{SessionBuilder, SessionConfig}; use std::env; @@ -42,22 +42,22 @@ async fn main() -> Result<()> { let mut handle2 = profile2.into_handle(); // It is even possible to use multiple sessions interleaved, having them configured with different profiles. - let session1: LegacySession = SessionBuilder::new() + let session1: Session = SessionBuilder::new() .known_node(&uri) .default_execution_profile_handle(handle1.clone()) - .build_legacy() + .build() .await?; - let session2: LegacySession = SessionBuilder::new() + let session2: Session = SessionBuilder::new() .known_node(&uri) .default_execution_profile_handle(handle2.clone()) - .build_legacy() + .build() .await?; // As default execution profile is not provided explicitly, session 3 uses a predefined one. let mut session_3_config = SessionConfig::new(); session_3_config.add_known_node(uri); - let session3: LegacySession = LegacySession::connect(session_3_config).await?; + let session3: Session = Session::connect(session_3_config).await?; session1.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/get_by_name.rs b/examples/get_by_name.rs index a0a21b855..1caca3e3d 100644 --- a/examples/get_by_name.rs +++ b/examples/get_by_name.rs @@ -1,5 +1,6 @@ -use anyhow::{anyhow, Result}; -use scylla::transport::session::LegacySession; +use anyhow::{anyhow, Context as _, Result}; +use scylla::frame::response::result::Row; +use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; @@ -10,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -35,18 +36,26 @@ async fn main() -> Result<()> { ) .await?; - let query_result = session + let rows_result = session .query_unpaged("SELECT pk, ck, value FROM examples_ks.get_by_name", &[]) - .await?; - let (ck_idx, _) = query_result - .get_column_spec("ck") + .await? + .into_rows_result()? + .context("Response is not of Rows type")?; + let col_specs = rows_result.column_specs(); + let (ck_idx, _) = col_specs + .get_by_name("ck") .ok_or_else(|| anyhow!("No ck column found"))?; - let (value_idx, _) = query_result - .get_column_spec("value") + let (value_idx, _) = col_specs + .get_by_name("value") .ok_or_else(|| anyhow!("No value column found"))?; + let rows = rows_result + .rows::() + .unwrap() + .collect::, _>>() + .unwrap(); println!("ck | value"); println!("---------------------"); - for row in query_result.rows.ok_or_else(|| anyhow!("no rows found"))? { + for row in rows { println!("{:?} | {:?}", row.columns[ck_idx], row.columns[value_idx]); } diff --git a/examples/logging.rs b/examples/logging.rs index 37e534b8c..6b090acbc 100644 --- a/examples/logging.rs +++ b/examples/logging.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::transport::session::LegacySession; +use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; use tracing::info; @@ -16,7 +16,7 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); info!("Connecting to {}", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session.query_unpaged("USE examples_ks", &[]).await?; diff --git a/examples/logging_log.rs b/examples/logging_log.rs index a1f962419..19465018c 100644 --- a/examples/logging_log.rs +++ b/examples/logging_log.rs @@ -1,6 +1,5 @@ use anyhow::Result; -use scylla::transport::session::LegacySession; -use scylla::SessionBuilder; +use scylla::{Session, SessionBuilder}; use std::env; use tracing::info; @@ -18,7 +17,7 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); info!("Connecting to {}", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; session.query_unpaged("USE examples_ks", &[]).await?; diff --git a/examples/parallel-prepared.rs b/examples/parallel-prepared.rs index 531f6d7b4..167b58394 100644 --- a/examples/parallel-prepared.rs +++ b/examples/parallel-prepared.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::{LegacySession, SessionBuilder}; +use scylla::{Session, SessionBuilder}; use std::env; use std::sync::Arc; @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/parallel.rs b/examples/parallel.rs index 5e3f119fb..716225fb7 100644 --- a/examples/parallel.rs +++ b/examples/parallel.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use scylla::{LegacySession, SessionBuilder}; +use scylla::{Session, SessionBuilder}; use std::env; use std::sync::Arc; @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; let session = Arc::new(session); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/query_history.rs b/examples/query_history.rs index 710f9616d..04d958648 100644 --- a/examples/query_history.rs +++ b/examples/query_history.rs @@ -1,10 +1,11 @@ //! This example shows how to collect history of query execution. use anyhow::Result; -use futures::StreamExt; +use futures::StreamExt as _; +use scylla::frame::response::result::Row; use scylla::history::{HistoryCollector, StructuredHistory}; use scylla::query::Query; -use scylla::transport::session::LegacySession; +use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; use std::sync::Arc; @@ -15,7 +16,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -59,7 +60,10 @@ async fn main() -> Result<()> { let iter_history_listener = Arc::new(HistoryCollector::new()); iter_query.set_history_listener(iter_history_listener.clone()); - let mut rows_iterator = session.query_iter(iter_query, ()).await?; + let mut rows_iterator = session + .query_iter(iter_query, ()) + .await? + .rows_stream::()?; while let Some(_row) = rows_iterator.next().await { // Receive rows... } diff --git a/examples/schema_agreement.rs b/examples/schema_agreement.rs index 9b9369ac9..d37cc32b7 100644 --- a/examples/schema_agreement.rs +++ b/examples/schema_agreement.rs @@ -1,7 +1,7 @@ use anyhow::{bail, Result}; -use futures::TryStreamExt; +use futures::TryStreamExt as _; use scylla::transport::errors::QueryError; -use scylla::transport::session::LegacySession; +use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; use std::time::Duration; @@ -13,10 +13,10 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(uri) .schema_agreement_interval(Duration::from_secs(1)) // check every second for schema agreement if not agreed first check - .build_legacy() + .build() .await?; let schema_version = session.await_schema_agreement().await?; @@ -70,7 +70,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.schema_agreement", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.try_next().await? { println!("a, b, c: {}, {}, {}", a, b, c); } diff --git a/examples/select-paging.rs b/examples/select-paging.rs index f9027675a..b3c7501fe 100644 --- a/examples/select-paging.rs +++ b/examples/select-paging.rs @@ -1,7 +1,7 @@ use anyhow::Result; -use futures::stream::StreamExt; +use futures::StreamExt as _; use scylla::statement::PagingState; -use scylla::{query::Query, LegacySession, SessionBuilder}; +use scylla::{query::Query, Session, SessionBuilder}; use std::env; use std::ops::ControlFlow; @@ -11,7 +11,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -35,7 +35,7 @@ async fn main() -> Result<()> { let mut rows_stream = session .query_iter("SELECT a, b, c FROM examples_ks.select_paging", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some(next_row_res) = rows_stream.next().await { let (a, b, c) = next_row_res?; @@ -51,10 +51,14 @@ async fn main() -> Result<()> { .query_single_page(paged_query.clone(), &[], paging_state) .await?; + let res = res + .into_rows_result()? + .expect("Got result different than Rows"); + println!( "Paging state: {:#?} ({} rows)", paging_state_response, - res.rows_num()?, + res.rows_num(), ); match paging_state_response.into_paging_control_flow() { @@ -81,10 +85,14 @@ async fn main() -> Result<()> { .execute_single_page(&paged_prepared, &[], paging_state) .await?; + let res = res + .into_rows_result()? + .expect("Got result different than Rows"); + println!( "Paging state from the prepared statement execution: {:#?} ({} rows)", paging_state_response, - res.rows_num()?, + res.rows_num(), ); match paging_state_response.into_paging_control_flow() { diff --git a/examples/speculative-execution.rs b/examples/speculative-execution.rs index c53285cac..e6c64e3ad 100644 --- a/examples/speculative-execution.rs +++ b/examples/speculative-execution.rs @@ -1,6 +1,6 @@ use scylla::{ speculative_execution::PercentileSpeculativeExecutionPolicy, - transport::execution_profile::ExecutionProfile, LegacySession, SessionBuilder, + transport::execution_profile::ExecutionProfile, Session, SessionBuilder, }; use anyhow::Result; @@ -20,10 +20,10 @@ async fn main() -> Result<()> { .speculative_execution_policy(Some(Arc::new(speculative))) .build(); - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(uri) .default_execution_profile_handle(speculative_profile.into_handle()) - .build_legacy() + .build() .await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; diff --git a/examples/tls.rs b/examples/tls.rs index 1bb354e56..d95f14bea 100644 --- a/examples/tls.rs +++ b/examples/tls.rs @@ -1,6 +1,6 @@ use anyhow::Result; -use futures::TryStreamExt; -use scylla::transport::session::LegacySession; +use futures::TryStreamExt as _; +use scylla::transport::session::Session; use scylla::SessionBuilder; use std::env; use std::fs; @@ -44,10 +44,10 @@ async fn main() -> Result<()> { context_builder.set_ca_file(ca_dir.as_path())?; context_builder.set_verify(SslVerifyMode::PEER); - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(uri) .ssl_context(Some(context_builder.build())) - .build_legacy() + .build() .await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -90,7 +90,7 @@ async fn main() -> Result<()> { let mut iter = session .query_iter("SELECT a, b, c FROM examples_ks.tls", &[]) .await? - .into_typed::<(i32, i32, String)>(); + .rows_stream::<(i32, i32, String)>()?; while let Some((a, b, c)) = iter.try_next().await? { println!("a, b, c: {}, {}, {}", a, b, c); } diff --git a/examples/tower.rs b/examples/tower.rs index 0b6085e00..c34c3f398 100644 --- a/examples/tower.rs +++ b/examples/tower.rs @@ -1,3 +1,5 @@ +use scylla::frame::response::result::Row; +use scylla::transport::session::Session; use std::env; use std::future::Future; use std::pin::Pin; @@ -7,12 +9,12 @@ use std::task::Poll; use tower::Service; struct SessionService { - session: Arc, + session: Arc, } // A trivial service implementation for sending parameterless simple string requests to Scylla. impl Service for SessionService { - type Response = scylla::LegacyQueryResult; + type Response = scylla::QueryResult; type Error = scylla::transport::errors::QueryError; type Future = Pin>>>; @@ -35,14 +37,16 @@ async fn main() -> anyhow::Result<()> { session: Arc::new( scylla::SessionBuilder::new() .known_node(uri) - .build_legacy() + .build() .await?, ), }; - let resp = session + let rows_result = session .call("SELECT keyspace_name, table_name FROM system_schema.tables;".into()) - .await?; + .await? + .into_rows_result()? + .expect("Got result different than Rows"); let print_text = |t: &Option| { t.as_ref() @@ -56,14 +60,15 @@ async fn main() -> anyhow::Result<()> { println!( "Tables:\n{}", - resp.rows()? - .into_iter() - .map(|r| format!( + rows_result + .rows::()? + .map(|r| r.map(|r| format!( "\t{}.{}", print_text(&r.columns[0]), print_text(&r.columns[1]) - )) - .collect::>() + ))) + .collect::, _>>() + .unwrap() .join("\n") ); Ok(()) diff --git a/examples/tracing.rs b/examples/tracing.rs index 435e356c7..dd035c095 100644 --- a/examples/tracing.rs +++ b/examples/tracing.rs @@ -2,15 +2,14 @@ // query() prepare() execute() batch() query_iter() and execute_iter() can be traced use anyhow::{anyhow, Result}; -use futures::StreamExt; +use futures::StreamExt as _; use scylla::batch::Batch; use scylla::statement::{ prepared_statement::PreparedStatement, query::Query, Consistency, SerialConsistency, }; use scylla::tracing::TracingInfo; -use scylla::transport::iterator::LegacyRowIterator; -use scylla::LegacyQueryResult; -use scylla::{LegacySession, SessionBuilder}; +use scylla::QueryResult; +use scylla::{Session, SessionBuilder}; use std::env; use std::num::NonZeroU32; use std::time::Duration; @@ -21,9 +20,9 @@ async fn main() -> Result<()> { let uri = env::var("SCYLLA_URI").unwrap_or_else(|_| "127.0.0.1:9042".to_string()); println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(uri.as_str()) - .build_legacy() + .build() .await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -42,9 +41,9 @@ async fn main() -> Result<()> { query.set_serial_consistency(Some(SerialConsistency::LocalSerial)); // QueryResult will contain a tracing_id which can be used to query tracing information - let query_result: LegacyQueryResult = session.query_unpaged(query.clone(), &[]).await?; + let query_result: QueryResult = session.query_unpaged(query.clone(), &[]).await?; let query_tracing_id: Uuid = query_result - .tracing_id + .tracing_id() .ok_or_else(|| anyhow!("Tracing id is None!"))?; // Get tracing information for this query and print it @@ -79,23 +78,24 @@ async fn main() -> Result<()> { // To trace execution of a prepared statement tracing must be enabled for it prepared.set_tracing(true); - let execute_result: LegacyQueryResult = session.execute_unpaged(&prepared, &[]).await?; - println!("Execute tracing id: {:?}", execute_result.tracing_id); + let execute_result: QueryResult = session.execute_unpaged(&prepared, &[]).await?; + println!("Execute tracing id: {:?}", execute_result.tracing_id()); // PAGED QUERY_ITER EXECUTE_ITER // It's also possible to trace paged queries like query_iter or execute_iter - // After iterating through all rows iterator.get_tracing_ids() will give tracing ids - // for all page queries - let mut row_iterator: LegacyRowIterator = session.query_iter(query, &[]).await?; + // After iterating through all rows query_pager.tracing_ids() will give tracing ids + // for all page queries. + let mut row_stream = session.query_iter(query, &[]).await?.rows_stream()?; - while let Some(_row) = row_iterator.next().await { + while let Some(row) = row_stream.next().await { // Receive rows + let _row: (String, i32, Uuid) = row?; } // Now print tracing ids for all page queries: println!( - "Paged row iterator tracing ids: {:?}\n", - row_iterator.get_tracing_ids() + "Paged row stream tracing ids: {:?}\n", + row_stream.tracing_ids() ); // BATCH @@ -105,19 +105,19 @@ async fn main() -> Result<()> { batch.set_tracing(true); // Run the batch and print its tracing_id - let batch_result: LegacyQueryResult = session.batch(&batch, ((),)).await?; - println!("Batch tracing id: {:?}\n", batch_result.tracing_id); + let batch_result: QueryResult = session.batch(&batch, ((),)).await?; + println!("Batch tracing id: {:?}\n", batch_result.tracing_id()); // CUSTOM // Session configuration allows specifying custom settings for querying tracing info. // Tracing info might not immediately be available on queried node // so the driver performs a few attempts with sleeps in between. - let session: LegacySession = SessionBuilder::new() + let session: Session = SessionBuilder::new() .known_node(uri) .tracing_info_fetch_attempts(NonZeroU32::new(8).unwrap()) .tracing_info_fetch_interval(Duration::from_millis(100)) .tracing_info_fetch_consistency(Consistency::One) - .build_legacy() + .build() .await?; let _custom_info: TracingInfo = session.get_tracing_info(&query_tracing_id).await?; diff --git a/examples/user-defined-type.rs b/examples/user-defined-type.rs index 9e01586a7..39b300373 100644 --- a/examples/user-defined-type.rs +++ b/examples/user-defined-type.rs @@ -1,7 +1,7 @@ use anyhow::Result; -use futures::TryStreamExt; -use scylla::macros::FromUserType; -use scylla::{LegacySession, SerializeValue, SessionBuilder}; +use futures::TryStreamExt as _; +use scylla::macros::DeserializeValue; +use scylla::{SerializeValue, Session, SessionBuilder}; use std::env; #[tokio::main] @@ -10,7 +10,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await?; session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -30,7 +30,7 @@ async fn main() -> Result<()> { // Define custom struct that matches User Defined Type created earlier // wrapping field in Option will gracefully handle null field values - #[derive(Debug, FromUserType, SerializeValue)] + #[derive(Debug, DeserializeValue, SerializeValue)] struct MyType { int_val: i32, text_val: Option, @@ -56,7 +56,7 @@ async fn main() -> Result<()> { &[], ) .await? - .into_typed::<(MyType,)>(); + .rows_stream::<(MyType,)>()?; while let Some((my_val,)) = iter.try_next().await? { println!("{:?}", my_val); } diff --git a/examples/value_list.rs b/examples/value_list.rs index ce997b70e..a8197edca 100644 --- a/examples/value_list.rs +++ b/examples/value_list.rs @@ -1,5 +1,6 @@ use anyhow::Result; -use scylla::{LegacySession, SessionBuilder}; +use futures::StreamExt; +use scylla::{Session, SessionBuilder}; use std::env; #[tokio::main] @@ -8,7 +9,7 @@ async fn main() -> Result<()> { println!("Connecting to {} ...", uri); - let session: LegacySession = SessionBuilder::new().known_node(uri).build_legacy().await?; + let session: Session = SessionBuilder::new().known_node(uri).build().await.unwrap(); session.query_unpaged("CREATE KEYSPACE IF NOT EXISTS examples_ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", &[]).await?; @@ -56,11 +57,13 @@ async fn main() -> Result<()> { ) .await?; - let q = session - .query_unpaged("SELECT * FROM examples_ks.my_type", &[]) - .await?; + let iter = session + .query_iter("SELECT * FROM examples_ks.my_type", &[]) + .await? + .rows_stream::<(i32, String)>()?; - println!("Q: {:?}", q.rows); + let rows = iter.collect::>().await; + println!("Q: {:?}", rows); Ok(()) } From a204a7bb080cfbbc9925ad0d70e2c01f4024dcbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Wed, 14 Aug 2024 08:33:01 +0200 Subject: [PATCH 21/25] codewide: migrate doctests to new deser API --- scylla/src/lib.rs | 26 +-- scylla/src/transport/execution_profile.rs | 12 +- scylla/src/transport/session.rs | 80 ++++---- scylla/src/transport/session_builder.rs | 212 +++++++++++----------- 4 files changed, 169 insertions(+), 161 deletions(-) diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index d529e8141..bac3fd3f9 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -17,15 +17,15 @@ //! `Session` is created by specifying a few known nodes and connecting to them: //! //! ```rust,no_run -//! use scylla::{LegacySession, SessionBuilder}; +//! use scylla::{Session, SessionBuilder}; //! use std::error::Error; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { -//! let session: LegacySession = SessionBuilder::new() +//! let session: Session = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .known_node("1.2.3.4:9876") -//! .build_legacy() +//! .build() //! .await?; //! //! Ok(()) @@ -50,9 +50,9 @@ //! //! The easiest way to specify bound values in a query is using a tuple: //! ```rust -//! # use scylla::LegacySession; +//! # use scylla::Session; //! # use std::error::Error; -//! # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { +//! # async fn check_only_compiles(session: &Session) -> Result<(), Box> { //! // Insert an int and text into the table //! session //! .query_unpaged( @@ -69,24 +69,24 @@ //! The easiest way to read rows returned by a query is to cast each row to a tuple of values: //! //! ```rust -//! # use scylla::LegacySession; +//! # use scylla::Session; //! # use std::error::Error; -//! # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { -//! use scylla::IntoTypedRows; +//! # async fn check_only_compiles(session: &Session) -> Result<(), Box> { //! //! // Read rows containing an int and text //! // Keep in mind that all results come in one response (no paging is done!), //! // so the memory footprint and latency may be huge! //! // To prevent that, use `Session::query_iter` or `Session::query_single_page`. -//! let rows_opt = session +//! let query_rows = session //! .query_unpaged("SELECT a, b FROM ks.tab", &[]) //! .await? -//! .rows; +//! .into_rows_result()?; +//! //! -//! if let Some(rows) = rows_opt { -//! for row in rows.into_typed::<(i32, String)>() { +//! if let Some(rows) = query_rows { +//! for row in rows.rows()? { //! // Parse row as int and text \ -//! let (int_val, text_val): (i32, String) = row?; +//! let (int_val, text_val): (i32, &str) = row?; //! } //! } //! # Ok(()) diff --git a/scylla/src/transport/execution_profile.rs b/scylla/src/transport/execution_profile.rs index 421a7da7e..a94addec5 100644 --- a/scylla/src/transport/execution_profile.rs +++ b/scylla/src/transport/execution_profile.rs @@ -16,7 +16,7 @@ //! # extern crate scylla; //! # use std::error::Error; //! # async fn check_only_compiles() -> Result<(), Box> { -//! use scylla::{LegacySession, SessionBuilder}; +//! use scylla::{Session, SessionBuilder}; //! use scylla::statement::Consistency; //! use scylla::transport::ExecutionProfile; //! @@ -27,10 +27,10 @@ //! //! let handle = profile.into_handle(); //! -//! let session: LegacySession = SessionBuilder::new() +//! let session: Session = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .default_execution_profile_handle(handle) -//! .build_legacy() +//! .build() //! .await?; //! # Ok(()) //! # } @@ -109,7 +109,7 @@ //! # extern crate scylla; //! # use std::error::Error; //! # async fn check_only_compiles() -> Result<(), Box> { -//! use scylla::{LegacySession, SessionBuilder}; +//! use scylla::{Session, SessionBuilder}; //! use scylla::query::Query; //! use scylla::statement::Consistency; //! use scylla::transport::ExecutionProfile; @@ -125,10 +125,10 @@ //! let mut handle1 = profile1.clone().into_handle(); //! let mut handle2 = profile2.clone().into_handle(); //! -//! let session: LegacySession = SessionBuilder::new() +//! let session: Session = SessionBuilder::new() //! .known_node("127.0.0.1:9042") //! .default_execution_profile_handle(handle1.clone()) -//! .build_legacy() +//! .build() //! .await?; //! //! let mut query1 = Query::from("SELECT * FROM ks.table"); diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index b53cc282b..4db0bbde2 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -486,9 +486,9 @@ impl GenericSession { /// /// # Examples /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// // Insert an int and text into a table. /// session /// .query_unpaged( @@ -500,24 +500,24 @@ impl GenericSession { /// # } /// ``` /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::IntoTypedRows; /// /// // Read rows containing an int and text. /// // Keep in mind that all results come in one response (no paging is done!), /// // so the memory footprint and latency may be huge! /// // To prevent that, use `Session::query_iter` or `Session::query_single_page`. - /// let rows_opt = session - /// .query_unpaged("SELECT a, b FROM ks.tab", &[]) + /// let query_rows = session + /// .query_unpaged("SELECT a, b FROM ks.tab", &[]) /// .await? - /// .rows; + /// .into_rows_result()?; /// - /// if let Some(rows) = rows_opt { - /// for row in rows.into_typed::<(i32, String)>() { - /// // Parse row as int and text \ - /// let (int_val, text_val): (i32, String) = row?; + /// if let Some(rows) = query_rows { + /// for row in rows.rows()? { + /// // Parse row as int and text. + /// let (int_val, text_val): (i32, &str) = row?; /// } /// } /// # Ok(()) @@ -546,9 +546,9 @@ impl GenericSession { /// # Example /// /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use std::ops::ControlFlow; /// use scylla::statement::PagingState; /// @@ -560,7 +560,11 @@ impl GenericSession { /// .await?; /// /// // Do something with a single page of results. - /// for row in res.rows_typed::<(i32, String)>()? { + /// for row in res + /// .into_rows_result()? + /// .unwrap() + /// .rows::<(i32, &str)>()? + /// { /// let (a, b) = row?; /// } /// @@ -608,16 +612,16 @@ impl GenericSession { /// # Example /// /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::IntoTypedRows; /// use futures::stream::StreamExt; /// /// let mut rows_stream = session /// .query_iter("SELECT a, b FROM ks.t", &[]) /// .await? - /// .into_typed::<(i32, i32)>(); + /// .rows_stream::<(i32, i32)>()?; /// /// while let Some(next_row_res) = rows_stream.next().await { /// let (a, b): (i32, i32) = next_row_res?; @@ -661,9 +665,9 @@ impl GenericSession { /// /// # Example /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::prepared_statement::PreparedStatement; /// /// // Prepare the query for later execution @@ -697,9 +701,9 @@ impl GenericSession { /// # Example /// /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use std::ops::ControlFlow; /// use scylla::query::Query; /// use scylla::statement::{PagingState, PagingStateResponse}; @@ -719,7 +723,11 @@ impl GenericSession { /// .await?; /// /// // Do something with a single page of results. - /// for row in res.rows_typed::<(i32, String)>()? { + /// for row in res + /// .into_rows_result()? + /// .unwrap() + /// .rows::<(i32, &str)>()? + /// { /// let (a, b) = row?; /// } /// @@ -763,12 +771,12 @@ impl GenericSession { /// # Example /// /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; + /// # use futures::StreamExt as _; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::prepared_statement::PreparedStatement; /// use scylla::IntoTypedRows; - /// use futures::stream::StreamExt; /// /// // Prepare the query for later execution /// let prepared: PreparedStatement = session @@ -779,7 +787,7 @@ impl GenericSession { /// let mut rows_stream = session /// .execute_iter(prepared, &[]) /// .await? - /// .into_typed::<(i32, i32)>(); + /// .rows_stream::<(i32, i32)>()?; /// /// while let Some(next_row_res) = rows_stream.next().await { /// let (a, b): (i32, i32) = next_row_res?; @@ -815,9 +823,9 @@ impl GenericSession { /// /// # Example /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::batch::Batch; /// /// let mut batch: Batch = Default::default(); @@ -944,13 +952,13 @@ where /// ```rust /// # use std::error::Error; /// # async fn check_only_compiles() -> Result<(), Box> { - /// use scylla::{LegacySession, SessionConfig}; + /// use scylla::{Session, SessionConfig}; /// use scylla::transport::KnownNode; /// /// let mut config = SessionConfig::new(); /// config.known_nodes.push(KnownNode::Hostname("127.0.0.1:9042".to_string())); /// - /// let session: LegacySession = LegacySession::connect(config).await?; + /// let session: Session = Session::connect(config).await?; /// # Ok(()) /// # } /// ``` @@ -1282,9 +1290,9 @@ where /// /// # Example /// ```rust - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::prepared_statement::PreparedStatement; /// /// // Prepare the query for later execution @@ -1611,9 +1619,9 @@ where /// /// # Example /// ```rust /// # extern crate scylla; - /// # use scylla::LegacySession; + /// # use scylla::Session; /// # use std::error::Error; - /// # async fn check_only_compiles(session: &LegacySession) -> Result<(), Box> { + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { /// use scylla::batch::Batch; /// /// // Create a batch statement with unprepared statements @@ -1672,10 +1680,10 @@ where /// * `case_sensitive` - if set to true the generated query will put keyspace name in quotes /// # Example /// ```rust - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// # let session = SessionBuilder::new().known_node("127.0.0.1:9042").build_legacy().await?; + /// # let session = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?; /// session /// .query_unpaged("INSERT INTO my_keyspace.tab (a) VALUES ('test1')", &[]) /// .await?; diff --git a/scylla/src/transport/session_builder.rs b/scylla/src/transport/session_builder.rs index 73725d83e..31b653a5c 100644 --- a/scylla/src/transport/session_builder.rs +++ b/scylla/src/transport/session_builder.rs @@ -62,13 +62,13 @@ pub type CloudSessionBuilder = GenericSessionBuilder; /// # Example /// /// ``` -/// # use scylla::{LegacySession, SessionBuilder}; +/// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { -/// let session: LegacySession = SessionBuilder::new() +/// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) -/// .build_legacy() +/// .build() /// .await?; /// # Ok(()) /// # } @@ -97,22 +97,22 @@ impl GenericSessionBuilder { /// Add a known node with a hostname /// # Examples /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } /// ``` /// /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("db1.example.com") - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -125,12 +125,12 @@ impl GenericSessionBuilder { /// Add a known node with an IP address /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use std::net::{SocketAddr, IpAddr, Ipv4Addr}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node_addr(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 9042)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -143,11 +143,11 @@ impl GenericSessionBuilder { /// Add a list of known nodes with hostnames /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_nodes(["127.0.0.1:9042", "db1.example.com"]) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -160,15 +160,15 @@ impl GenericSessionBuilder { /// Add a list of known nodes with IP addresses /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use std::net::{SocketAddr, IpAddr, Ipv4Addr}; /// # async fn example() -> Result<(), Box> { /// let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 17, 0, 3)), 9042); /// let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 17, 0, 4)), 9042); /// - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_nodes_addr([addr1, addr2]) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -186,14 +186,14 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) /// .user("cassandra", "cassandra") - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -212,7 +212,7 @@ impl GenericSessionBuilder { /// ``` /// # use std::sync::Arc; /// use bytes::Bytes; - /// use scylla::{LegacySession, SessionBuilder}; + /// use scylla::{Session, SessionBuilder}; /// use async_trait::async_trait; /// use scylla::authentication::{AuthenticatorProvider, AuthenticatorSession, AuthError}; /// # use scylla::transport::Compression; @@ -240,12 +240,12 @@ impl GenericSessionBuilder { /// } /// /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) /// .user("cassandra", "cassandra") /// .authenticator_provider(Arc::new(CustomAuthenticatorProvider)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -266,7 +266,7 @@ impl GenericSessionBuilder { /// # use async_trait::async_trait; /// # use std::net::SocketAddr; /// # use std::sync::Arc; - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::session::{AddressTranslator, TranslationError}; /// # use scylla::transport::topology::UntranslatedPeer; /// struct IdentityTranslator; @@ -282,10 +282,10 @@ impl GenericSessionBuilder { /// } /// /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .address_translator(Arc::new(IdentityTranslator)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -296,7 +296,7 @@ impl GenericSessionBuilder { /// # use std::sync::Arc; /// # use std::collections::HashMap; /// # use std::str::FromStr; - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::session::{AddressTranslator, TranslationError}; /// # /// # async fn example() -> Result<(), Box> { @@ -304,10 +304,10 @@ impl GenericSessionBuilder { /// let addr_before_translation = SocketAddr::from_str("192.168.0.42:19042").unwrap(); /// let addr_after_translation = SocketAddr::from_str("157.123.12.42:23203").unwrap(); /// translation_rules.insert(addr_before_translation, addr_after_translation); - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .address_translator(Arc::new(translation_rules)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -327,7 +327,7 @@ impl GenericSessionBuilder { /// ``` /// # use std::fs; /// # use std::path::PathBuf; - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use openssl::ssl::{SslContextBuilder, SslVerifyMode, SslMethod, SslFiletype}; /// # async fn example() -> Result<(), Box> { /// let certdir = fs::canonicalize(PathBuf::from("./examples/certs/scylla.crt"))?; @@ -335,10 +335,10 @@ impl GenericSessionBuilder { /// context_builder.set_certificate_file(certdir.as_path(), SslFiletype::PEM)?; /// context_builder.set_verify(SslVerifyMode::NONE); /// - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .ssl_context(Some(context_builder.build())) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -350,8 +350,8 @@ impl GenericSessionBuilder { } } -// NOTE: this `impl` block contains configuration options specific for **Cloud** [`LegacySession`]. -// This means that if an option fits both non-Cloud and Cloud `LegacySession`s, it should NOT be put +// NOTE: this `impl` block contains configuration options specific for **Cloud** [`Session`]. +// This means that if an option fits both non-Cloud and Cloud `Session`s, it should NOT be put // here, but rather in `impl GenericSessionBuilder` block. #[cfg(feature = "cloud")] impl CloudSessionBuilder { @@ -386,13 +386,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -407,13 +407,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .schema_agreement_interval(Duration::from_secs(5)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -427,17 +427,17 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{statement::Consistency, ExecutionProfile, LegacySession, SessionBuilder}; + /// # use scylla::{statement::Consistency, ExecutionProfile, Session, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { /// let execution_profile = ExecutionProfile::builder() /// .consistency(Consistency::All) /// .request_timeout(Some(Duration::from_secs(2))) /// .build(); - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .default_execution_profile_handle(execution_profile.into_handle()) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -455,12 +455,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tcp_nodelay(true) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -478,12 +478,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tcp_keepalive_interval(std::time::Duration::from_secs(42)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -506,13 +506,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .use_keyspace("my_keyspace_name", false) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -536,7 +536,7 @@ impl GenericSessionBuilder { /// let session: LegacySession = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .compression(Some(Compression::Snappy)) - /// .build_legacy() // Turns SessionBuilder into Session + /// .build_legacy() // Turns SessionBuilder into LegacySession /// .await?; /// # Ok(()) /// # } @@ -550,7 +550,7 @@ impl GenericSessionBuilder { /// Builds the Session after setting all the options. /// /// The new session object uses the new deserialization API. If you wish - /// to use the old API, use [`SessionBuilder::build_legacy`]. + /// to use the old API, use [`SessionBuilder::build`]. /// /// # Example /// ``` @@ -577,13 +577,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .connection_timeout(Duration::from_secs(30)) - /// .build_legacy() // Turns SessionBuilder into Session + /// .build() // Turns SessionBuilder into Session /// .await?; /// # Ok(()) /// # } @@ -598,17 +598,17 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { /// use std::num::NonZeroUsize; /// use scylla::transport::session::PoolSize; /// /// // This session will establish 4 connections to each node. /// // For Scylla clusters, this number will be divided across shards - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .pool_size(PoolSize::PerHost(NonZeroUsize::new(4).unwrap())) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -642,12 +642,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .disallow_shard_aware_port(true) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -662,12 +662,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keyspaces_to_fetch(["my_keyspace"]) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -685,12 +685,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .fetch_schema_metadata(true) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -708,12 +708,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keepalive_interval(std::time::Duration::from_secs(42)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -738,12 +738,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .keepalive_timeout(std::time::Duration::from_secs(42)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -765,12 +765,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .schema_agreement_timeout(std::time::Duration::from_secs(120)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -785,12 +785,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .auto_await_schema_agreement(false) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -813,16 +813,16 @@ impl GenericSessionBuilder { /// # use async_trait::async_trait; /// # use std::net::SocketAddr; /// # use std::sync::Arc; - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::session::{AddressTranslator, TranslationError}; /// # use scylla::transport::host_filter::DcHostFilter; /// /// # async fn example() -> Result<(), Box> { /// // The session will only connect to nodes from "my-local-dc" - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .host_filter(Arc::new(DcHostFilter::new("my-local-dc".to_string()))) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -837,12 +837,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .refresh_metadata_on_auto_schema_agreement(true) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -865,13 +865,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use std::num::NonZeroU32; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_attempts(NonZeroU32::new(10).unwrap()) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -894,13 +894,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use std::time::Duration; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_interval(Duration::from_millis(50)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -916,12 +916,12 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder, statement::Consistency}; + /// # use scylla::{Session, SessionBuilder, statement::Consistency}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .tracing_info_fetch_consistency(Consistency::One) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -946,13 +946,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::Compression; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .write_coalescing(false) // Enabled by default - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -971,12 +971,12 @@ impl GenericSessionBuilder { /// means that the metadata is refreshed every 20 seconds. /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # async fn example() -> Result<(), Box> { - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .cluster_metadata_refresh_interval(std::time::Duration::from_secs(20)) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } @@ -994,13 +994,13 @@ impl GenericSessionBuilder { /// /// # Example /// ``` - /// # use scylla::{LegacySession, SessionBuilder}; + /// # use scylla::{Session, SessionBuilder}; /// # use scylla::transport::SelfIdentity; /// # async fn example() -> Result<(), Box> { /// let (app_major, app_minor, app_patch) = (2, 1, 3); /// let app_version = format!("{app_major}.{app_minor}.{app_patch}"); /// - /// let session: LegacySession = SessionBuilder::new() + /// let session: Session = SessionBuilder::new() /// .known_node("127.0.0.1:9042") /// .custom_identity( /// SelfIdentity::new() @@ -1008,7 +1008,7 @@ impl GenericSessionBuilder { /// .with_application_name("my-app") /// .with_application_version(app_version) /// ) - /// .build_legacy() + /// .build() /// .await?; /// # Ok(()) /// # } From e99b8756cf2a344dba204850e716910070c90736 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 21 May 2024 17:47:45 +0200 Subject: [PATCH 22/25] session_test: regression test empty collections deserialization ScyllaDB does not distinguish empty collections from nulls. That is, INSERTing an empty collection is equivalent to nullifying the corresponding column. As pointed out in [#1001](https://github.com/scylladb/scylla-rust-driver/issues/1001), it's a nice QOL feature to be able to deserialize empty CQL collections to empty Rust collections instead of `None::`. A test is added that checks it. --- scylla/src/transport/session_test.rs | 72 ++++++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index 1d036ff8f..a2d85c05b 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -1,4 +1,5 @@ use crate::batch::{Batch, BatchStatement}; +use crate::deserialize::DeserializeValue; use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::retry_policy::{QueryInfo, RetryDecision, RetryPolicy, RetrySession}; @@ -25,12 +26,12 @@ use assert_matches::assert_matches; use futures::{FutureExt, StreamExt as _, TryStreamExt}; use itertools::Itertools; use scylla_cql::frame::request::query::{PagingState, PagingStateResponse}; -use scylla_cql::frame::response::result::ColumnType; -use scylla_cql::frame::response::result::Row; +use scylla_cql::frame::response::result::{ColumnType, Row}; +use scylla_cql::frame::value::CqlVarint; use scylla_cql::types::serialize::row::{SerializeRow, SerializedValues}; use scylla_cql::types::serialize::value::SerializeValue; -use std::collections::BTreeSet; use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeSet, HashSet}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tokio::net::TcpListener; @@ -3084,3 +3085,68 @@ async fn test_manual_primary_key_computation() { .await; } } + +/// ScyllaDB does not distinguish empty collections from nulls. That is, INSERTing an empty collection +/// is equivalent to nullifying the corresponding column. +/// As pointed out in [#1001](https://github.com/scylladb/scylla-rust-driver/issues/1001), it's a nice +/// QOL feature to be able to deserialize empty CQL collections to empty Rust collections instead of +/// `None::`. This test checks that. +#[tokio::test] +async fn test_deserialize_empty_collections() { + // Setup session. + let ks = unique_keyspace_name(); + let session = create_new_session_builder().build().await.unwrap(); + session.query_unpaged(format!("CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}}", ks), &[]).await.unwrap(); + session.use_keyspace(&ks, true).await.unwrap(); + + async fn deserialize_empty_collection< + Collection: Default + for<'frame> DeserializeValue<'frame, 'frame> + SerializeValue, + >( + session: &Session, + collection_name: &str, + collection_type_params: &str, + ) -> Collection { + // Create a table for the given collection type. + let table_name = "test_empty_".to_owned() + collection_name; + let query = format!( + "CREATE TABLE {} (n int primary key, c {}<{}>)", + table_name, collection_name, collection_type_params + ); + session.query_unpaged(query, ()).await.unwrap(); + + // Populate the table with an empty collection, effectively inserting null as the collection. + session + .query_unpaged( + format!("INSERT INTO {} (n, c) VALUES (?, ?)", table_name,), + (0, Collection::default()), + ) + .await + .unwrap(); + + let query_rows_result = session + .query_unpaged(format!("SELECT c FROM {}", table_name), ()) + .await + .unwrap() + .into_rows_result() + .unwrap() + .unwrap(); + let (collection,) = query_rows_result.first_row::<(Collection,)>().unwrap(); + + // Drop the table + collection + } + + let list = deserialize_empty_collection::>(&session, "list", "int").await; + assert!(list.is_empty()); + + let set = deserialize_empty_collection::>(&session, "set", "bigint").await; + assert!(set.is_empty()); + + let map = deserialize_empty_collection::>( + &session, + "map", + "boolean, varint", + ) + .await; + assert!(map.is_empty()); +} From f1e7e020235463b6289791d4d0233e8804ba058f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Sun, 10 Nov 2024 16:02:42 +0100 Subject: [PATCH 23/25] codewide: introduce DeserializeOwned{Row,Value} As noted in a review comment, parsing `for<'r> DeserializeValue<'r, 'r>` to understand it's requiring an owned type is nontrivial and could be replaced with a subtrait with an informative name. Therefore, this commit introduces DeserializeOwnedRow and DeserializeOwnedValue (to be used by the `scylla` crate itself only). --- scylla/src/lib.rs | 7 +++++++ scylla/src/transport/cql_collections_test.rs | 4 ++-- scylla/src/transport/cql_types_test.rs | 6 +++--- scylla/src/transport/iterator.rs | 3 ++- scylla/src/transport/session_test.rs | 4 ++-- scylla/src/transport/topology.rs | 4 ++-- 6 files changed, 18 insertions(+), 10 deletions(-) diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index bac3fd3f9..715fe8d4d 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -230,6 +230,13 @@ pub mod deserialize { UdtIterator, UdtTypeCheckErrorKind, }; } + + // Shorthands for better readability. + #[cfg_attr(not(test), allow(unused))] + pub(crate) trait DeserializeOwnedValue: for<'r> DeserializeValue<'r, 'r> {} + impl DeserializeOwnedValue for T where T: for<'r> DeserializeValue<'r, 'r> {} + pub(crate) trait DeserializeOwnedRow: for<'r> DeserializeRow<'r, 'r> {} + impl DeserializeOwnedRow for T where T: for<'r> DeserializeRow<'r, 'r> {} } pub mod authentication; diff --git a/scylla/src/transport/cql_collections_test.rs b/scylla/src/transport/cql_collections_test.rs index f37d28a8f..475bd47ee 100644 --- a/scylla/src/transport/cql_collections_test.rs +++ b/scylla/src/transport/cql_collections_test.rs @@ -1,5 +1,5 @@ +use crate::deserialize::DeserializeOwnedValue; use crate::transport::session::Session; -use scylla_cql::types::deserialize::value::DeserializeValue; use crate::frame::response::result::CqlValue; use crate::test_utils::{create_new_session_builder, setup_tracing}; @@ -36,7 +36,7 @@ async fn insert_and_select( expected: &SelectT, ) where InsertT: SerializeValue, - SelectT: for<'r> DeserializeValue<'r, 'r> + PartialEq + std::fmt::Debug, + SelectT: DeserializeOwnedValue + PartialEq + std::fmt::Debug, { session .query_unpaged( diff --git a/scylla/src/transport/cql_types_test.rs b/scylla/src/transport/cql_types_test.rs index 0a1833fd7..2863df76c 100644 --- a/scylla/src/transport/cql_types_test.rs +++ b/scylla/src/transport/cql_types_test.rs @@ -1,4 +1,5 @@ use crate as scylla; +use crate::deserialize::DeserializeOwnedValue; use crate::frame::response::result::CqlValue; use crate::frame::value::{Counter, CqlDate, CqlTime, CqlTimestamp}; use crate::test_utils::{create_new_session_builder, scylla_supports_tablets, setup_tracing}; @@ -6,7 +7,6 @@ use crate::transport::session::Session; use crate::utils::test_utils::unique_keyspace_name; use itertools::Itertools; use scylla_cql::frame::value::{CqlTimeuuid, CqlVarint}; -use scylla_cql::types::deserialize::value::DeserializeValue; use scylla_cql::types::serialize::value::SerializeValue; use scylla_macros::{DeserializeValue, SerializeValue}; use std::cmp::PartialEq; @@ -74,7 +74,7 @@ async fn init_test(table_name: &str, type_name: &str) -> Session { // Expected values and bound values are computed using T::from_str async fn run_tests(tests: &[&str], type_name: &str) where - T: SerializeValue + for<'r> DeserializeValue<'r, 'r> + FromStr + Debug + Clone + PartialEq, + T: SerializeValue + DeserializeOwnedValue + FromStr + Debug + Clone + PartialEq, { let session: Session = init_test(type_name, type_name).await; session.await_schema_agreement().await.unwrap(); @@ -1799,7 +1799,7 @@ async fn test_udt_with_missing_field() { expected: TR, ) where TQ: SerializeValue, - TR: for<'r> DeserializeValue<'r, 'r> + PartialEq + Debug, + TR: DeserializeOwnedValue + PartialEq + Debug, { session .query_unpaged( diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 100fafe2e..92918281d 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -24,6 +24,7 @@ use super::query_result::ColumnSpecs; use super::session::RequestSpan; use crate::cql_to_rust::{FromRow, FromRowError}; +use crate::deserialize::DeserializeOwnedRow; use crate::frame::response::{ result, result::{ColumnSpec, Row}, @@ -1076,7 +1077,7 @@ impl TypedRowStream { /// It only works with owned types! For example, &str is not supported. impl Stream for TypedRowStream where - RowT: for<'r> DeserializeRow<'r, 'r>, + RowT: DeserializeOwnedRow, { type Item = Result; diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index a2d85c05b..6c4beeb4a 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -1,5 +1,5 @@ use crate::batch::{Batch, BatchStatement}; -use crate::deserialize::DeserializeValue; +use crate::deserialize::DeserializeOwnedValue; use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::retry_policy::{QueryInfo, RetryDecision, RetryPolicy, RetrySession}; @@ -3100,7 +3100,7 @@ async fn test_deserialize_empty_collections() { session.use_keyspace(&ks, true).await.unwrap(); async fn deserialize_empty_collection< - Collection: Default + for<'frame> DeserializeValue<'frame, 'frame> + SerializeValue, + Collection: Default + DeserializeOwnedValue + SerializeValue, >( session: &Session, collection_name: &str, diff --git a/scylla/src/transport/topology.rs b/scylla/src/transport/topology.rs index 7f3f6e41f..ab29cd46b 100644 --- a/scylla/src/transport/topology.rs +++ b/scylla/src/transport/topology.rs @@ -1,3 +1,4 @@ +use crate::deserialize::DeserializeOwnedRow; use crate::frame::response::event::Event; use crate::routing::Token; use crate::statement::query::Query; @@ -15,7 +16,6 @@ use futures::Stream; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; use scylla_cql::frame::frame_errors::RowsParseError; -use scylla_cql::types::deserialize::row::DeserializeRow; use scylla_cql::types::deserialize::TypeCheckError; use scylla_macros::DeserializeRow; use std::borrow::BorrowMut; @@ -930,7 +930,7 @@ fn query_filter_keyspace_name<'a, R>( convert_typecheck_error: impl FnOnce(TypeCheckError) -> MetadataError + 'a, ) -> impl Stream> + 'a where - R: for<'r> DeserializeRow<'r, 'r> + 'static, + R: DeserializeOwnedRow + 'static, { let conn = conn.clone(); From 98b382d6a0b8a5b6d6c96d288b0c158b13bc6d2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 12 Nov 2024 16:00:39 +0100 Subject: [PATCH 24/25] iterator: rename RowIteratorWorker to PagerWorker Not to use legacy naming, (SingleConnection)RowIteratorWorker is renamed to (SingleConnection)PagerWorker. --- scylla/src/transport/iterator.rs | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 92918281d..cb51471b0 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -127,9 +127,9 @@ use checked_channel_sender::{ProvingSender, SendAttemptedProof}; type PageSendAttemptedProof = SendAttemptedProof>; -// RowIteratorWorker works in the background to fetch pages -// RowIterator receives them through a channel -struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { +// PagerWorker works in the background to fetch pages +// QueryPager receives them through a channel +struct PagerWorker<'a, QueryFunc, SpanCreatorFunc> { sender: ProvingSender>, // Closure used to perform a single page query @@ -153,7 +153,7 @@ struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { span_creator: SpanCreatorFunc, } -impl RowIteratorWorker<'_, QueryFunc, SpanCreator> +impl PagerWorker<'_, QueryFunc, SpanCreator> where QueryFunc: Fn(Arc, Consistency, PagingState) -> QueryFut, QueryFut: Future>, @@ -260,7 +260,7 @@ where } } - // Send last_error to RowIterator - query failed fully + // Send last_error to QueryPager - query failed fully self.log_query_error(&last_error); let (proof, _) = self.sender.send(Err(last_error)).await; proof @@ -333,10 +333,10 @@ where let received_page = ReceivedPage { rows, tracing_id }; - // Send next page to RowIterator + // Send next page to QueryPager let (proof, res) = self.sender.send(Ok(received_page)).await; if res.is_err() { - // channel was closed, RowIterator was dropped - should shutdown + // channel was closed, QueryPager was dropped - should shutdown return Ok(ControlFlow::Break(proof)); } @@ -469,15 +469,15 @@ where } } -/// A massively simplified version of the RowIteratorWorker. It does not have +/// A massively simplified version of the PagerWorker. It does not have /// any complicated logic related to retries, it just fetches pages from /// a single connection. -struct SingleConnectionRowIteratorWorker { +struct SingleConnectionPagerWorker { sender: ProvingSender>, fetcher: Fetcher, } -impl SingleConnectionRowIteratorWorker +impl SingleConnectionPagerWorker where Fetcher: Fn(PagingState) -> FetchFut + Send + Sync, FetchFut: Future> + Send, @@ -508,7 +508,7 @@ where .await; if send_result.is_err() { - // channel was closed, RowIterator was dropped - should shutdown + // channel was closed, QueryPager was dropped - should shutdown return Ok(proof); } @@ -742,7 +742,7 @@ impl QueryPager { span }; - let worker = RowIteratorWorker { + let worker = PagerWorker { sender: sender.into(), page_query, statement_info: routing_info, @@ -860,7 +860,7 @@ impl QueryPager { span }; - let worker = RowIteratorWorker { + let worker = PagerWorker { sender: sender.into(), page_query, statement_info, @@ -894,7 +894,7 @@ impl QueryPager { let page_size = query.get_validated_page_size(); let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { + let worker = SingleConnectionPagerWorker { sender: sender.into(), fetcher: |paging_state| { connection.query_raw_with_consistency( @@ -924,7 +924,7 @@ impl QueryPager { let page_size = prepared.get_validated_page_size(); let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { + let worker = SingleConnectionPagerWorker { sender: sender.into(), fetcher: |paging_state| { connection.execute_raw_with_consistency( From d4a222c711dc5957b5ccd83ea30bc7d7a3f759eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Tue, 12 Nov 2024 16:07:51 +0100 Subject: [PATCH 25/25] iterator: fix QueryPager::rows_stream() lifetime constraints MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It appears that the previous requirements: ```rust fn rows_stream< 'frame, 'metadata, RowT: 'static + DeserializeRow<'frame, 'metadata> > ``` allowed creating the `TypedRowStream<&'static str>`. It was error-prone, because the compiler would accept `rows_stream::<&str>`, happily deducing that it's `&'static str`, and failing upon Stream `next()` not being a lending method. To prevent such situations, the constraints are changed the following way (credits to @Lorak-mmk): ```rust fn rows_stream< RowT: 'static + for<'frame, 'metadata> DeserializeRow<'frame, 'metadata> > ``` and now `&'static str` is not permitted (because it only implements `DeserializeValue<'static, '_>`. Co-authored-by: Karol Baryła --- scylla/src/lib.rs | 17 +++++++++++++---- scylla/src/transport/iterator.rs | 7 ++----- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index 715fe8d4d..8b62c0c2b 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -233,10 +233,19 @@ pub mod deserialize { // Shorthands for better readability. #[cfg_attr(not(test), allow(unused))] - pub(crate) trait DeserializeOwnedValue: for<'r> DeserializeValue<'r, 'r> {} - impl DeserializeOwnedValue for T where T: for<'r> DeserializeValue<'r, 'r> {} - pub(crate) trait DeserializeOwnedRow: for<'r> DeserializeRow<'r, 'r> {} - impl DeserializeOwnedRow for T where T: for<'r> DeserializeRow<'r, 'r> {} + pub(crate) trait DeserializeOwnedValue: + for<'frame, 'metadata> DeserializeValue<'frame, 'metadata> + { + } + impl DeserializeOwnedValue for T where + T: for<'frame, 'metadata> DeserializeValue<'frame, 'metadata> + { + } + pub(crate) trait DeserializeOwnedRow: + for<'frame, 'metadata> DeserializeRow<'frame, 'metadata> + { + } + impl DeserializeOwnedRow for T where T: for<'frame, 'metadata> DeserializeRow<'frame, 'metadata> {} } pub mod authentication; diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index cb51471b0..160819f6c 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -663,12 +663,9 @@ impl QueryPager { /// It only allows deserializing owned types, because [Stream] is not lending. /// Begins with performing type check. #[inline] - pub fn rows_stream<'frame, 'metadata, RowT: 'static + DeserializeRow<'frame, 'metadata>>( + pub fn rows_stream DeserializeRow<'frame, 'metadata>>( self, - ) -> Result, TypeCheckError> - where - 'frame: 'metadata, - { + ) -> Result, TypeCheckError> { TypedRowLendingStream::::new(self).map(|typed_row_lending_stream| TypedRowStream { typed_row_lending_stream, })