Skip to content

Commit

Permalink
Remove default initializations from constructors.
Browse files Browse the repository at this point in the history
  • Loading branch information
jackdelv committed Sep 14, 2023
1 parent 6352a26 commit f1aea96
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 15 deletions.
5 changes: 0 additions & 5 deletions plugins/parquet/parquetembed.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,13 +116,10 @@ ParquetHelper::ParquetHelper(const char *option, const char *_location, const ch
row_size = rowsize;
batch_size = _batchSize;
activityCtx = _activityCtx;
rowsProcessed = 0;
tablesProcessed = 0;

pool = arrow::default_memory_pool();

parquet_doc = std::vector<rapidjson::Document>(rowsize);
current_row = 0;

partition = strlen(option) > 5;
}
Expand Down Expand Up @@ -760,8 +757,6 @@ void ParquetHelper::end_row(const char *name)
ParquetRowStream::ParquetRowStream(IEngineRowAllocator *_resultAllocator, std::shared_ptr<ParquetHelper> _parquet)
: m_resultAllocator(_resultAllocator), s_parquet(_parquet)
{
m_currentRow = 0;
m_shouldRead = true;
rowsCount = _parquet->num_rows();
array_visitor = std::make_shared<ParquetArrayVisitor>();
}
Expand Down
18 changes: 8 additions & 10 deletions plugins/parquet/parquetembed.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ class JsonValueConverter
{
public:
explicit JsonValueConverter(const std::vector<rapidjson::Document> &rows)
: rows_(rows), array_levels_(0) {}
: rows_(rows) {}

JsonValueConverter(const std::vector<rapidjson::Document> &rows, const std::vector<std::string> &root_path, int64_t array_levels)
: rows_(rows), root_path_(root_path), array_levels_(array_levels) {}
Expand Down Expand Up @@ -681,7 +681,7 @@ class JsonValueConverter
arrow::ArrayBuilder *builder_;
const std::vector<rapidjson::Document> &rows_;
std::vector<std::string> root_path_;
int64_t array_levels_;
int64_t array_levels_ = 0;

/// Return a flattened iterator over values at nested location
arrow::Iterator<const rapidjson::Value *> FieldValues()
Expand Down Expand Up @@ -737,10 +737,10 @@ class ParquetHelper
void end_row(const char *name);

private:
__int64 current_row;
__int64 current_row = 0;
__int64 row_size; // The maximum size of each parquet row group.
__int64 tablesProcessed; // Current RowGroup that has been read from the input file.
__int64 rowsProcessed; // Current Row that has been read from the RowGroup
__int64 tablesProcessed = 0; // Current RowGroup that has been read from the input file.
__int64 rowsProcessed = 0; // Current Row that has been read from the RowGroup
__int64 start_row_group; // The beginning RowGroup that is read by a worker
__int64 tableCount; // The number of RowGroups to be read by the worker from the file that was opened for reading.
__int64 rowsCount; // The number of result rows in a given RowGroup read from the parquet file.
Expand All @@ -755,7 +755,7 @@ class ParquetHelper
std::vector<rapidjson::Document> parquet_doc; // Document vector for converting rows to columns for writing to parquet files.
std::vector<rapidjson::Value> row_stack; // Stack for keeping track of the context when building a nested row.
std::shared_ptr<arrow::dataset::Scanner> scanner = nullptr; // Scanner for reading through partitioned files. PARTITION
arrow::dataset::FileSystemDatasetWriteOptions write_options; // Write options for writing partitioned files. PARTITION
arrow::dataset::FileSystemDatasetWriteOptions write_options; // Write options for writing partitioned files. PARTITION
std::shared_ptr<arrow::RecordBatchReader> rbatch_reader = nullptr;
arrow::RecordBatchReader::RecordBatchReaderIterator rbatch_itr;
std::unique_ptr<parquet::arrow::FileReader> parquet_read = nullptr; // FileReader for reading from parquet files.
Expand All @@ -779,8 +779,8 @@ class ParquetRowStream : public RtlCInterface, implements IRowStream

private:
Linked<IEngineRowAllocator> m_resultAllocator; //! Pointer to allocator used when building result rows.
bool m_shouldRead; //! If true, we should continue trying to read more messages.
__int64 m_currentRow; //! Current result row.
bool m_shouldRead = true; //! If true, we should continue trying to read more messages.
__int64 m_currentRow = 0; //! Current result row.
__int64 rowsCount; //! Number of result rows read from parquet file.
std::shared_ptr<ParquetArrayVisitor> array_visitor;
std::shared_ptr<ParquetHelper> s_parquet; //! Shared pointer to ParquetHelper class for the stream class.
Expand Down Expand Up @@ -929,8 +929,6 @@ class ParquetDatasetBinder : public ParquetRecordBinder
: input(_input), ParquetRecordBinder(_logctx, _typeInfo, _firstParam, _parquet)
{
d_parquet = _parquet;
// getFieldTypes(_typeInfo);

reportIfFailure(d_parquet->fieldsToSchema(_typeInfo));
}
virtual ~ParquetDatasetBinder() = default;
Expand Down

0 comments on commit f1aea96

Please sign in to comment.