Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(source): parquet file source use number of rows to determine the end of the file reading #18149

Merged
merged 14 commits into from
Aug 29, 2024
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

87 changes: 86 additions & 1 deletion src/connector/src/parser/parquet_parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,23 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::future::IntoFuture;
use std::sync::Arc;

use arrow_array_iceberg::RecordBatch;
use futures_async_stream::try_stream;
use parquet::arrow::ParquetRecordBatchStreamBuilder;
use risingwave_common::array::arrow::IcebergArrowConvert;
use risingwave_common::array::{ArrayBuilderImpl, DataChunk, StreamChunk};
use risingwave_common::bail;
use risingwave_common::types::{Datum, ScalarImpl};
use risingwave_common::util::tokio_util::compat::FuturesAsyncReadCompatExt;

use crate::parser::ConnectorResult;
use crate::source::SourceColumnDesc;
use crate::source::filesystem::opendal_source::opendal_enumerator::OpendalEnumerator;
use crate::source::filesystem::opendal_source::{OpendalGcs, OpendalPosixFs, OpendalS3};
use crate::source::reader::desc::SourceDesc;
use crate::source::{ConnectorProperties, SourceColumnDesc};
/// `ParquetParser` is responsible for converting the incoming `record_batch_stream`
/// into a `streamChunk`.
#[derive(Debug)]
Expand Down Expand Up @@ -188,3 +195,81 @@ impl ParquetParser {
Ok(data_chunk.into())
}
}

/// Retrieves the total number of rows in the specified Parquet file.
///
/// This function constructs an `OpenDAL` operator using the information
/// from the provided `source_desc`. It then accesses the metadata of the
/// Parquet file to determine and return the total row count.
///
/// # Arguments
///
/// * `file_name` - The parquet file name.
/// * `source_desc` - A struct or type containing the necessary information
/// to construct the `OpenDAL` operator.
///
/// # Returns
///
/// Returns the total number of rows in the Parquet file as a `usize`.
pub async fn get_total_row_nums_for_parquet_file(
parquet_file_name: &str,
source_desc: SourceDesc,
) -> ConnectorResult<usize> {
let total_row_num = match source_desc.source.config {
ConnectorProperties::Gcs(prop) => {
let connector: OpendalEnumerator<OpendalGcs> =
OpendalEnumerator::new_gcs_source(*prop)?;
let reader = connector
.op
.reader_with(parquet_file_name)
.into_future()
.await?
.into_futures_async_read(..)
.await?
.compat();

ParquetRecordBatchStreamBuilder::new(reader)
.await?
.metadata()
.file_metadata()
.num_rows()
wcy-fdu marked this conversation as resolved.
Show resolved Hide resolved
}
ConnectorProperties::OpendalS3(prop) => {
let connector: OpendalEnumerator<OpendalS3> =
OpendalEnumerator::new_s3_source(prop.s3_properties, prop.assume_role)?;
let reader = connector
.op
.reader_with(parquet_file_name)
.into_future()
.await?
.into_futures_async_read(..)
.await?
.compat();
ParquetRecordBatchStreamBuilder::new(reader)
wcy-fdu marked this conversation as resolved.
Show resolved Hide resolved
.await?
.metadata()
.file_metadata()
.num_rows()
}

ConnectorProperties::PosixFs(prop) => {
let connector: OpendalEnumerator<OpendalPosixFs> =
OpendalEnumerator::new_posix_fs_source(*prop)?;
let reader = connector
.op
.reader_with(parquet_file_name)
.into_future()
.await?
.into_futures_async_read(..)
.await?
.compat();
ParquetRecordBatchStreamBuilder::new(reader)
wcy-fdu marked this conversation as resolved.
Show resolved Hide resolved
.await?
.metadata()
.file_metadata()
.num_rows()
}
other => bail!("Unsupported source: {:?}", other),
};
Ok(total_row_num as usize)
}
1 change: 1 addition & 0 deletions src/connector/src/source/filesystem/file_common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ impl FsSplit {
pub struct OpendalFsSplit<Src: OpendalSource> {
pub name: String,
pub offset: usize,
// For Parquet encoding, the size represents the number of rows, while for other encodings, the size denotes the file size.
pub size: usize,
wcy-fdu marked this conversation as resolved.
Show resolved Hide resolved
_marker: PhantomData<Src>,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ use crate::source::{SourceEnumeratorContextRef, SplitEnumerator};

#[derive(Debug, Clone)]
pub struct OpendalEnumerator<Src: OpendalSource> {
pub(crate) op: Operator,
pub op: Operator,
// prefix is used to reduce the number of objects to be listed
pub(crate) prefix: Option<String>,
pub(crate) matcher: Option<glob::Pattern>,
Expand Down
8 changes: 8 additions & 0 deletions src/stream/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,15 @@ lru = { workspace = true }
maplit = "1.0.2"
memcomparable = "0.2"
multimap = "0.10"
opendal = { workspace = true, features = [
"executors-tokio",
"services-azblob",
"services-fs",
"services-gcs",
"services-s3",
] }
parking_lot = { workspace = true }
parquet = { workspace = true }
pin-project = "1"
prehash = "1"
prometheus = { version = "0.13", features = ["process"] }
Expand Down
63 changes: 50 additions & 13 deletions src/stream/src/executor/source/fetch_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,13 @@ use std::marker::PhantomData;
use std::ops::Bound;

use either::Either;
use futures::{stream, TryStreamExt};
use futures::{stream, StreamExt, TryStreamExt};
use futures_async_stream::try_stream;
use risingwave_common::catalog::{ColumnId, TableId};
use risingwave_common::hash::VnodeBitmapExt;
use risingwave_common::types::ScalarRef;
use risingwave_connector::parser::parquet_parser::get_total_row_nums_for_parquet_file;
use risingwave_connector::parser::EncodingProperties;
use risingwave_connector::source::filesystem::opendal_source::{
OpendalGcs, OpendalPosixFs, OpendalS3, OpendalSource,
};
Expand Down Expand Up @@ -299,19 +302,53 @@ impl<S: StateStore, Src: OpendalSource> FsFetchExecutor<S, Src> {
// Receiving file assignments from upstream list executor,
// store into state table.
Message::Chunk(chunk) => {
let file_assignment = chunk
.data_chunk()
.rows()
.map(|row| {
let filename = row.datum_at(0).unwrap().into_utf8();
let size = row.datum_at(2).unwrap().into_int64();
OpendalFsSplit::<Src>::new(
filename.to_owned(),
0,
size as usize,
// For Parquet encoding, the offset indicates the current row being read.
// Therefore, to determine if the end of a Parquet file has been reached, we need to compare its offset with the total number of rows.
// We directly obtain the total row count and set the size in `OpendalFsSplit` to this value.
let file_assignment = if let EncodingProperties::Parquet =
source_desc.source.parser_config.encoding_config
{
let filename_list: Vec<_> = chunk
.data_chunk()
.rows()
.map(|row| {
let filename = row.datum_at(0).unwrap().into_utf8();
filename.to_string()
})
.collect();
let mut parquet_file_assignment = vec![];
for filename in &filename_list {
let total_row_num =
get_total_row_nums_for_parquet_file(
filename,
source_desc.clone(),
)
.await?;
parquet_file_assignment.push(
OpendalFsSplit::<Src>::new(
filename.to_owned(),
0,
total_row_num - 1, // -1 because offset start from 0.
),
)
})
.collect();
}
parquet_file_assignment
} else {
chunk
.data_chunk()
.rows()
.map(|row| {
let filename = row.datum_at(0).unwrap().into_utf8();

let size = row.datum_at(2).unwrap().into_int64();
OpendalFsSplit::<Src>::new(
filename.to_owned(),
0,
size as usize,
)
})
.collect()
};
state_store_handler.set_states(file_assignment).await?;
state_store_handler.state_table.try_flush().await?;
}
Expand Down
Loading