Skip to content

Commit

Permalink
Refactor sequentialFlush at PipelineDataSourceSink of Pipeline (#28734)
Browse files Browse the repository at this point in the history
* Refactor sequentialFlush at PipelineDataSourceSink

* Refactor doFlush

* Improve and add TODO
  • Loading branch information
azexcy authored Oct 16, 2023
1 parent bcd3c8b commit 559f121
Showing 1 changed file with 11 additions and 41 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,10 @@ private void tryFlush(final DataSource dataSource, final List<DataRecord> buffer

private void doFlush(final DataSource dataSource, final List<DataRecord> buffer) throws SQLException {
try (Connection connection = dataSource.getConnection()) {
connection.setAutoCommit(false);
boolean enableTransaction = buffer.size() > 1;
if (enableTransaction) {
connection.setAutoCommit(false);
}
switch (buffer.get(0).getType()) {
case IngestDataChangeType.INSERT:
if (null != rateLimitAlgorithm) {
Expand All @@ -158,45 +161,12 @@ private void doFlush(final DataSource dataSource, final List<DataRecord> buffer)
default:
break;
}
connection.commit();
}
}

private void doFlush(final Connection connection, final List<DataRecord> buffer) {
// TODO it's better use transaction, but execute delete maybe not effect when open transaction of PostgreSQL sometimes
for (DataRecord each : buffer) {
try {
doFlush(connection, each);
} catch (final SQLException ex) {
throw new PipelineImporterJobWriteException(String.format("Write failed, record=%s", each), ex);
if (enableTransaction) {
connection.commit();
}
}
}

private void doFlush(final Connection connection, final DataRecord dataRecord) throws SQLException {
switch (dataRecord.getType()) {
case IngestDataChangeType.INSERT:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.INSERT, 1);
}
executeBatchInsert(connection, Collections.singletonList(dataRecord));
break;
case IngestDataChangeType.UPDATE:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.UPDATE, 1);
}
executeUpdate(connection, dataRecord);
break;
case IngestDataChangeType.DELETE:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.DELETE, 1);
}
executeBatchDelete(connection, Collections.singletonList(dataRecord));
break;
default:
}
}

private void executeBatchInsert(final Connection connection, final List<DataRecord> dataRecords) throws SQLException {
DataRecord dataRecord = dataRecords.get(0);
String insertSql = importSQLBuilder.buildInsertSQL(getSchemaName(dataRecord.getTableName()), dataRecord);
Expand Down Expand Up @@ -279,11 +249,11 @@ private void executeBatchDelete(final Connection connection, final List<DataReco
}

private void sequentialFlush(final DataSource dataSource, final List<DataRecord> buffer) {
if (buffer.isEmpty()) {
return;
}
try (Connection connection = dataSource.getConnection()) {
doFlush(connection, buffer);
// TODO it's better use transaction, but execute delete maybe not effect when open transaction of PostgreSQL sometimes
try {
for (DataRecord each : buffer) {
doFlush(dataSource, Collections.singletonList(each));
}
} catch (final SQLException ex) {
throw new PipelineImporterJobWriteException(ex);
}
Expand Down

0 comments on commit 559f121

Please sign in to comment.