Skip to content

Commit

Permalink
Merge branch 'main' into kanzhen/rw-oauth
Browse files Browse the repository at this point in the history
  • Loading branch information
Rossil2012 authored Feb 22, 2024
2 parents 5d4bfe2 + 4197ad5 commit 29b74d2
Show file tree
Hide file tree
Showing 18 changed files with 144 additions and 56 deletions.
20 changes: 2 additions & 18 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ heartbeat.interval.ms=${debezium.heartbeat.interval.ms:-60000}
# In sharing cdc mode, we will subscribe to multiple tables in the given database,
# so here we set ${table.name} to a default value `RW_CDC_Sharing` just for display.
name=${hostname}:${port}:${database.name}.${table.name:-RW_CDC_Sharing}
# Enable transaction metadata by default
provide.transaction.metadata=${transactional:-true}
# In sharing cdc mode, transaction metadata will be enabled in frontend
provide.transaction.metadata=${transactional:-false}
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ heartbeat.interval.ms=${debezium.heartbeat.interval.ms:-300000}
# In sharing cdc source mode, we will subscribe to multiple tables in the given database,
# so here we set ${table.name} to a default value `RW_CDC_Sharing` just for display.
name=${hostname}:${port}:${database.name}.${schema.name}.${table.name:-RW_CDC_Sharing}
# Enable transaction metadata by default
provide.transaction.metadata=${transactional:-true}
# In sharing cdc mode, transaction metadata will be enabled in frontend
provide.transaction.metadata=${transactional:-false}
4 changes: 2 additions & 2 deletions proto/expr.proto
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,12 @@ message ExprNode {
MAKE_TIMESTAMP = 115;
// From f64 to timestamp.
// e.g. `select to_timestamp(1672044740.0)`
TO_TIMESTAMP = 104;
SEC_TO_TIMESTAMPTZ = 104;
AT_TIME_ZONE = 105;
DATE_TRUNC = 106;
// Parse text to timestamp by format string.
// e.g. `select to_timestamp('2022 08 21', 'YYYY MM DD')`
TO_TIMESTAMP1 = 107;
CHAR_TO_TIMESTAMPTZ = 107;
CHAR_TO_DATE = 111;
// Performs a cast with additional timezone information.
CAST_WITH_TIME_ZONE = 108;
Expand Down
2 changes: 2 additions & 0 deletions src/connector/src/source/cdc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ pub const CDC_SNAPSHOT_BACKFILL: &str = "rw_cdc_backfill";
pub const CDC_SHARING_MODE_KEY: &str = "rw.sharing.mode.enable";
// User can set snapshot='false' to disable cdc backfill
pub const CDC_BACKFILL_ENABLE_KEY: &str = "snapshot";
// We enable transaction for shared cdc source by default
pub const CDC_TRANSACTIONAL_KEY: &str = "transactional";

pub const MYSQL_CDC_CONNECTOR: &str = Mysql::CDC_CONNECTOR_NAME;
pub const POSTGRES_CDC_CONNECTOR: &str = Postgres::CDC_CONNECTOR_NAME;
Expand Down
8 changes: 4 additions & 4 deletions src/expr/impl/benches/expr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ fn bench_expr(c: &mut Criterion) {
}
if [
"date_trunc(character varying, timestamp with time zone) -> timestamp with time zone",
"to_timestamp1(character varying, character varying) -> timestamp with time zone",
"char_to_timestamptz(character varying, character varying) -> timestamp with time zone",
"to_char(timestamp with time zone, character varying) -> character varying",
]
.contains(&format!("{sig:?}").as_str())
Expand All @@ -321,12 +321,12 @@ fn bench_expr(c: &mut Criterion) {
for (i, t) in sig.inputs_type.iter().enumerate() {
use DataType::*;
let idx = match (sig.name.as_scalar(), i) {
(PbType::ToTimestamp1, 0) => TIMESTAMP_FORMATTED_STRING,
(PbType::ToChar | PbType::ToTimestamp1, 1) => {
(PbType::CharToTimestamptz, 0) => TIMESTAMP_FORMATTED_STRING,
(PbType::ToChar | PbType::CharToTimestamptz, 1) => {
children.push(string_literal("YYYY/MM/DD HH:MM:SS"));
continue;
}
(PbType::ToChar | PbType::ToTimestamp1, 2) => {
(PbType::ToChar | PbType::CharToTimestamptz, 2) => {
children.push(string_literal("Australia/Sydney"));
continue;
}
Expand Down
2 changes: 1 addition & 1 deletion src/expr/impl/src/scalar/timestamptz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ pub fn time_zone_err(inner_err: String) -> ExprError {
}
}

#[function("to_timestamp(float8) -> timestamptz")]
#[function("sec_to_timestamptz(float8) -> timestamptz")]
pub fn f64_sec_to_timestamptz(elem: F64) -> Result<Timestamptz> {
// TODO(#4515): handle +/- infinity
let micros = (elem.0 * 1e6)
Expand Down
6 changes: 3 additions & 3 deletions src/expr/impl/src/scalar/to_timestamp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ fn parse(s: &str, tmpl: &ChronoPattern) -> Result<Parsed> {
}

#[function(
"to_timestamp1(varchar, varchar) -> timestamp",
"char_to_timestamptz(varchar, varchar) -> timestamp",
prebuild = "ChronoPattern::compile($1)",
deprecated
)]
Expand All @@ -81,7 +81,7 @@ pub fn to_timestamp_legacy(s: &str, tmpl: &ChronoPattern) -> Result<Timestamp> {
}

#[function(
"to_timestamp1(varchar, varchar, varchar) -> timestamptz",
"char_to_timestamptz(varchar, varchar, varchar) -> timestamptz",
prebuild = "ChronoPattern::compile($1)"
)]
pub fn to_timestamp(s: &str, timezone: &str, tmpl: &ChronoPattern) -> Result<Timestamptz> {
Expand All @@ -93,7 +93,7 @@ pub fn to_timestamp(s: &str, timezone: &str, tmpl: &ChronoPattern) -> Result<Tim
})
}

#[function("to_timestamp1(varchar, varchar) -> timestamptz", rewritten)]
#[function("char_to_timestamptz(varchar, varchar) -> timestamptz", rewritten)]
fn _to_timestamp1() {}

#[function(
Expand Down
4 changes: 2 additions & 2 deletions src/frontend/src/binder/expr/function.rs
Original file line number Diff line number Diff line change
Expand Up @@ -959,8 +959,8 @@ impl Binder {
(
"to_timestamp",
dispatch_by_len(vec![
(1, raw_call(ExprType::ToTimestamp)),
(2, raw_call(ExprType::ToTimestamp1)),
(1, raw_call(ExprType::SecToTimestamptz)),
(2, raw_call(ExprType::CharToTimestamptz)),
]),
),
("date_trunc", raw_call(ExprType::DateTrunc)),
Expand Down
4 changes: 2 additions & 2 deletions src/frontend/src/expr/pure.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,13 @@ impl ExprVisitor for ImpureAnalyzer {
| expr_node::Type::Extract
| expr_node::Type::DatePart
| expr_node::Type::TumbleStart
| expr_node::Type::ToTimestamp
| expr_node::Type::SecToTimestamptz
| expr_node::Type::AtTimeZone
| expr_node::Type::DateTrunc
| expr_node::Type::MakeDate
| expr_node::Type::MakeTime
| expr_node::Type::MakeTimestamp
| expr_node::Type::ToTimestamp1
| expr_node::Type::CharToTimestamptz
| expr_node::Type::CharToDate
| expr_node::Type::CastWithTimeZone
| expr_node::Type::AddWithTimeZone
Expand Down
6 changes: 3 additions & 3 deletions src/frontend/src/expr/session_timezone.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,9 @@ impl SessionTimezone {
new_inputs.push(ExprImpl::literal_varchar(self.timezone()));
Some(FunctionCall::new(func_type, new_inputs).unwrap().into())
}
// `to_timestamp1(input_string, format_string)`
// => `to_timestamp1(input_string, format_string, zone_string)`
ExprType::ToTimestamp1 => {
// `char_to_timestamptz(input_string, format_string)`
// => `char_to_timestamptz(input_string, format_string, zone_string)`
ExprType::CharToTimestamptz => {
if !(inputs.len() == 2
&& inputs[0].return_type() == DataType::Varchar
&& inputs[1].return_type() == DataType::Varchar)
Expand Down
4 changes: 2 additions & 2 deletions src/frontend/src/expr/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -577,8 +577,8 @@ impl WatermarkAnalyzer {
},
_ => unreachable!(),
},
ExprType::ToTimestamp => self.visit_unary_op(func_call.inputs()),
ExprType::ToTimestamp1 => WatermarkDerivation::None,
ExprType::SecToTimestamptz => self.visit_unary_op(func_call.inputs()),
ExprType::CharToTimestamptz => WatermarkDerivation::None,
ExprType::Cast => {
// TODO: need more derivation
WatermarkDerivation::None
Expand Down
6 changes: 4 additions & 2 deletions src/frontend/src/handler/create_source.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ use risingwave_connector::schema::schema_registry::{
};
use risingwave_connector::source::cdc::external::CdcTableType;
use risingwave_connector::source::cdc::{
CDC_SHARING_MODE_KEY, CDC_SNAPSHOT_BACKFILL, CDC_SNAPSHOT_MODE_KEY, CITUS_CDC_CONNECTOR,
MYSQL_CDC_CONNECTOR, POSTGRES_CDC_CONNECTOR,
CDC_SHARING_MODE_KEY, CDC_SNAPSHOT_BACKFILL, CDC_SNAPSHOT_MODE_KEY, CDC_TRANSACTIONAL_KEY,
CITUS_CDC_CONNECTOR, MYSQL_CDC_CONNECTOR, POSTGRES_CDC_CONNECTOR,
};
use risingwave_connector::source::datagen::DATAGEN_CONNECTOR;
use risingwave_connector::source::nexmark::source::{get_event_data_types_with_names, EventType};
Expand Down Expand Up @@ -1196,6 +1196,8 @@ pub async fn handle_create_source(
with_properties.insert(CDC_SNAPSHOT_MODE_KEY.into(), CDC_SNAPSHOT_BACKFILL.into());
// enable cdc sharing mode, which will capture all tables in the given `database.name`
with_properties.insert(CDC_SHARING_MODE_KEY.into(), "true".into());
// enable transactional cdc
with_properties.insert(CDC_TRANSACTIONAL_KEY.into(), "true".into());
}

// must behind `handle_addition_columns`
Expand Down
2 changes: 1 addition & 1 deletion src/object_store/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ hyper-rustls = { version = "0.24.2", features = ["webpki-roots"] }
hyper-tls = "0.5.0"
itertools = "0.12"
madsim = "0.2.22"
opendal = "0.44"
opendal = "0.44.2"
prometheus = { version = "0.13", features = ["process"] }
risingwave_common = { workspace = true }
rustls = "0.21.8"
Expand Down
30 changes: 21 additions & 9 deletions src/object_store/src/object/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -818,15 +818,27 @@ pub async fn build_remote_object_store(
config: ObjectStoreConfig,
) -> ObjectStoreImpl {
match url {
s3 if s3.starts_with("s3://") => ObjectStoreImpl::S3(
S3ObjectStore::new_with_config(
s3.strip_prefix("s3://").unwrap().to_string(),
metrics.clone(),
config,
)
.await
.monitored(metrics),
),
s3 if s3.starts_with("s3://") => {
if std::env::var("RW_USE_OPENDAL_FOR_S3").is_ok() {
let bucket = s3.strip_prefix("s3://").unwrap();

ObjectStoreImpl::Opendal(
OpendalObjectStore::new_s3_engine(bucket.to_string(), config)
.unwrap()
.monitored(metrics),
)
} else {
ObjectStoreImpl::S3(
S3ObjectStore::new_with_config(
s3.strip_prefix("s3://").unwrap().to_string(),
metrics.clone(),
config,
)
.await
.monitored(metrics),
)
}
}
#[cfg(feature = "hdfs-backend")]
hdfs if hdfs.starts_with("hdfs://") => {
let hdfs = hdfs.strip_prefix("hdfs://").unwrap();
Expand Down
4 changes: 2 additions & 2 deletions src/object_store/src/object/opendal_engine/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ pub mod gcs;

pub mod obs;

pub mod oss;

pub mod azblob;
pub mod opendal_s3;
pub mod oss;

pub mod fs;
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ pub enum EngineType {
Memory,
Hdfs,
Gcs,
S3,
Obs,
Oss,
Webhdfs,
Expand Down Expand Up @@ -190,6 +191,7 @@ impl ObjectStore for OpendalObjectStore {
match self.engine_type {
EngineType::Memory => "Memory",
EngineType::Hdfs => "Hdfs",
EngineType::S3 => "S3",
EngineType::Gcs => "Gcs",
EngineType::Obs => "Obs",
EngineType::Oss => "Oss",
Expand All @@ -206,7 +208,11 @@ pub struct OpendalStreamingUploader {
}
impl OpendalStreamingUploader {
pub async fn new(op: Operator, path: String) -> ObjectResult<Self> {
let writer = op.writer_with(&path).buffer(OPENDAL_BUFFER_SIZE).await?;
let writer = op
.writer_with(&path)
.concurrent(8)
.buffer(OPENDAL_BUFFER_SIZE)
.await?;
Ok(Self { writer })
}
}
Expand Down
82 changes: 82 additions & 0 deletions src/object_store/src/object/opendal_engine/opendal_s3.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
// Copyright 2024 RisingWave Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use std::time::Duration;

use opendal::layers::{LoggingLayer, RetryLayer};
use opendal::services::S3;
use opendal::Operator;
use risingwave_common::config::ObjectStoreConfig;

use super::{EngineType, OpendalObjectStore};
use crate::object::ObjectResult;

impl OpendalObjectStore {
/// create opendal s3 engine.
pub fn new_s3_engine(
bucket: String,
object_store_config: ObjectStoreConfig,
) -> ObjectResult<Self> {
// Create s3 builder.
let mut builder = S3::default();
builder.bucket(&bucket);

// For AWS S3, there is no need to set an endpoint; for other S3 compatible object stores, it is necessary to set this field.
if let Ok(endpoint_url) = std::env::var("RW_S3_ENDPOINT") {
builder.endpoint(&endpoint_url);
}

if let Ok(region) = std::env::var("AWS_REGION") {
builder.region(&region);
} else {
tracing::error!("aws s3 region is not set, bucket {}", bucket);
}

if let Ok(access) = std::env::var("AWS_ACCESS_KEY_ID") {
builder.access_key_id(&access);
} else {
tracing::error!("access key id of aws s3 is not set, bucket {}", bucket);
}

if let Ok(secret) = std::env::var("AWS_SECRET_ACCESS_KEY") {
builder.secret_access_key(&secret);
} else {
tracing::error!("secret access key of aws s3 is not set, bucket {}", bucket);
}

if std::env::var("RW_IS_FORCE_PATH_STYLE").is_err() {
builder.enable_virtual_host_style();
}

let op: Operator = Operator::new(builder)?
.layer(LoggingLayer::default())
.layer(
RetryLayer::new()
.with_min_delay(Duration::from_millis(
object_store_config.s3.object_store_req_retry_interval_ms,
))
.with_max_delay(Duration::from_millis(
object_store_config.s3.object_store_req_retry_max_delay_ms,
))
.with_max_times(object_store_config.s3.object_store_req_retry_max_attempts)
.with_factor(1.0)
.with_jitter(),
)
.finish();
Ok(Self {
op,
engine_type: EngineType::S3,
})
}
}

0 comments on commit 29b74d2

Please sign in to comment.