-
Notifications
You must be signed in to change notification settings - Fork 596
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix(sink): add cassandra batch size and fix bigquery array null #15516
Changes from 4 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -42,6 +42,12 @@ public class CassandraConfig extends CommonSinkConfig { | |
@JsonProperty(value = "cassandra.password") | ||
private String password; | ||
|
||
@JsonProperty(value = "cassandra.max_batch_rows") | ||
private Integer maxBatchRows = 512; | ||
|
||
@JsonProperty(value = "cassandra.request_timeout_ms") | ||
private Integer requestTimeoutMs = 2000; | ||
|
||
@JsonCreator | ||
public CassandraConfig( | ||
@JsonProperty(value = "cassandra.url") String url, | ||
|
@@ -93,4 +99,29 @@ public CassandraConfig withPassword(String password) { | |
this.password = password; | ||
return this; | ||
} | ||
|
||
public Integer getMaxBatchRows() { | ||
return maxBatchRows; | ||
} | ||
|
||
public CassandraConfig withMaxBatchRows(Integer maxBatchRows) { | ||
if (maxBatchRows > 65536 || maxBatchRows < 1) { | ||
throw new IllegalArgumentException( | ||
"cassandra.max_batch_rows must be <= 65535 and >= 1"); | ||
} | ||
this.maxBatchRows = maxBatchRows; | ||
return this; | ||
} | ||
|
||
public Integer getRequestTimeoutMs() { | ||
return requestTimeoutMs; | ||
} | ||
|
||
public CassandraConfig withRequestTimeoutMs(Integer requestTimeoutMs) { | ||
if (requestTimeoutMs < 1) { | ||
throw new IllegalArgumentException("cassandra.request_timeout_ms must be >= 1"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nits: better to include |
||
} | ||
this.requestTimeoutMs = requestTimeoutMs; | ||
return this; | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -83,15 +83,14 @@ impl JsonEncoder { | |
pub fn new_with_doris( | ||
schema: Schema, | ||
col_indices: Option<Vec<usize>>, | ||
timestamp_handling_mode: TimestampHandlingMode, | ||
map: HashMap<String, (u8, u8)>, | ||
) -> Self { | ||
Self { | ||
schema, | ||
col_indices, | ||
time_handling_mode: TimeHandlingMode::Milli, | ||
date_handling_mode: DateHandlingMode::String, | ||
timestamp_handling_mode, | ||
timestamp_handling_mode: TimestampHandlingMode::String, | ||
timestamptz_handling_mode: TimestamptzHandlingMode::UtcWithoutSuffix, | ||
custom_json_type: CustomJsonType::Doris(map), | ||
kafka_connect: None, | ||
|
@@ -101,21 +100,33 @@ impl JsonEncoder { | |
pub fn new_with_starrocks( | ||
schema: Schema, | ||
col_indices: Option<Vec<usize>>, | ||
timestamp_handling_mode: TimestampHandlingMode, | ||
map: HashMap<String, (u8, u8)>, | ||
) -> Self { | ||
Self { | ||
schema, | ||
col_indices, | ||
time_handling_mode: TimeHandlingMode::Milli, | ||
date_handling_mode: DateHandlingMode::String, | ||
timestamp_handling_mode, | ||
timestamp_handling_mode: TimestampHandlingMode::String, | ||
timestamptz_handling_mode: TimestamptzHandlingMode::UtcWithoutSuffix, | ||
custom_json_type: CustomJsonType::StarRocks(map), | ||
kafka_connect: None, | ||
} | ||
} | ||
|
||
pub fn new_with_bigquery(schema: Schema, col_indices: Option<Vec<usize>>) -> Self { | ||
Self { | ||
schema, | ||
col_indices, | ||
time_handling_mode: TimeHandlingMode::Milli, | ||
date_handling_mode: DateHandlingMode::String, | ||
timestamp_handling_mode: TimestampHandlingMode::String, | ||
timestamptz_handling_mode: TimestamptzHandlingMode::UtcString, | ||
custom_json_type: CustomJsonType::BigQuery, | ||
kafka_connect: None, | ||
} | ||
} | ||
|
||
pub fn with_kafka_connect(self, kafka_connect: KafkaConnectParams) -> Self { | ||
Self { | ||
kafka_connect: Some(Arc::new(kafka_connect)), | ||
|
@@ -192,7 +203,15 @@ fn datum_to_json_object( | |
custom_json_type: &CustomJsonType, | ||
) -> ArrayResult<Value> { | ||
let scalar_ref = match datum { | ||
None => return Ok(Value::Null), | ||
None => { | ||
if let CustomJsonType::BigQuery = custom_json_type | ||
&& matches!(field.data_type(), DataType::List(_)) | ||
{ | ||
return Ok(Value::Array(vec![])); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you include the BigQuery doc link as a comment here to explain why we need to do this? |
||
} else { | ||
return Ok(Value::Null); | ||
} | ||
} | ||
Some(datum) => datum, | ||
}; | ||
|
||
|
@@ -239,7 +258,7 @@ fn datum_to_json_object( | |
} | ||
json!(v_string) | ||
} | ||
CustomJsonType::Es | CustomJsonType::None => { | ||
CustomJsonType::Es | CustomJsonType::None | CustomJsonType::BigQuery => { | ||
json!(v.to_text()) | ||
} | ||
}, | ||
|
@@ -291,7 +310,7 @@ fn datum_to_json_object( | |
} | ||
(DataType::Jsonb, ScalarRefImpl::Jsonb(jsonb_ref)) => match custom_json_type { | ||
CustomJsonType::Es | CustomJsonType::StarRocks(_) => JsonbVal::from(jsonb_ref).take(), | ||
CustomJsonType::Doris(_) | CustomJsonType::None => { | ||
CustomJsonType::Doris(_) | CustomJsonType::None | CustomJsonType::BigQuery => { | ||
json!(jsonb_ref.to_string()) | ||
} | ||
}, | ||
|
@@ -342,7 +361,7 @@ fn datum_to_json_object( | |
"starrocks can't support struct".to_string(), | ||
)); | ||
} | ||
CustomJsonType::Es | CustomJsonType::None => { | ||
CustomJsonType::Es | CustomJsonType::None | CustomJsonType::BigQuery => { | ||
let mut map = Map::with_capacity(st.len()); | ||
for (sub_datum_ref, sub_field) in struct_ref.iter_fields_ref().zip_eq_debug( | ||
st.iter() | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nits: better to include the
maxBatchRows
in the error message.