Skip to content

Commit

Permalink
Release 22.4.0 (#3783)
Browse files Browse the repository at this point in the history
* fix nullpointer on snapsync (#3773)
* Introduce RocksDbSegmentIdentifier to avoid changing the storege plug… (#3755)
* Release 22.4.0

Signed-off-by: Lucas Saldanha <[email protected]>

Co-authored-by: matkt <[email protected]>
Co-authored-by: Fabio Di Fabio <[email protected]>
  • Loading branch information
3 people authored May 4, 2022
1 parent 7f076ef commit 5ec9de8
Show file tree
Hide file tree
Showing 9 changed files with 162 additions and 90 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@

### Breaking Changes
- Version 22.4.x will be the last series to support Java 11. Version 22.7.0 will require Java 17 to build and run.
- In the Besu EVM Library all references to SHA3 have been renamed to the more accurate name Keccak256, including class names and comment. [#3749](https://github.com/hyperledger/besu/pull/3749)

### Bug Fixes
- Fix nullpointer on snapsync [#3773](https://github.com/hyperledger/besu/pull/3773)
- Introduce RocksDbSegmentIdentifier to avoid changing the storage plugin [#3755](https://github.com/hyperledger/besu/pull/3755)

## 22.4.0-RC2

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,13 +189,8 @@ public synchronized void reloadHeal() {
worldStateStorage.clearFlatDatabase();
pendingTrieNodeRequests.clearInternalQueues();
pendingCodeRequests.clearInternalQueue();
enqueueRequest(
createAccountTrieNodeDataRequest(
snapSyncState.getPivotBlockHeader().orElseThrow().getStateRoot(),
Bytes.EMPTY,
inconsistentAccounts));
requestComplete(true);
notifyTaskAvailable();
snapSyncState.setHealStatus(false);
checkCompletion(snapSyncState.getPivotBlockHeader().orElseThrow());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;

Expand Down Expand Up @@ -236,4 +237,21 @@ public void shouldCancelOutstandingTasksWhenFutureIsCancelled() {
verify(worldStateDownloadProcess).abort();
assertThat(downloadState.isDownloading()).isFalse();
}

@Test
public void shouldRestartHealWhenNewPivotBlock() {
when(snapSyncState.getPivotBlockHeader()).thenReturn(Optional.of(mock(BlockHeader.class)));
when(snapSyncState.isHealInProgress()).thenReturn(false);
assertThat(downloadState.pendingTrieNodeRequests.isEmpty()).isTrue();
// start heal
downloadState.checkCompletion(header);
verify(snapSyncState).setHealStatus(true);
assertThat(downloadState.pendingTrieNodeRequests.isEmpty()).isFalse();
// reload the heal
downloadState.reloadHeal();
verify(snapSyncState).setHealStatus(false);
spy(downloadState.pendingTrieNodeRequests).clearInternalQueues();
spy(downloadState).checkCompletion(header);
assertThat(downloadState.pendingTrieNodeRequests.isEmpty()).isFalse();
}
}
2 changes: 1 addition & 1 deletion gradle.properties
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
version=22.4.0-RC3-SNAPSHOT
version=22.4.0

# Workaround for Java 16 and spotless bug 834 https://github.com/diffplug/spotless/issues/834
org.gradle.jvmargs=--add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED \
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb;

import org.hyperledger.besu.plugin.services.exception.StorageException;

import java.util.Objects;
import java.util.concurrent.atomic.AtomicReference;

import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.RocksDBException;
import org.rocksdb.TransactionDB;

public class RocksDbSegmentIdentifier {

private final TransactionDB db;
private final AtomicReference<ColumnFamilyHandle> reference;

public RocksDbSegmentIdentifier(
final TransactionDB db, final ColumnFamilyHandle columnFamilyHandle) {
this.db = db;
this.reference = new AtomicReference<>(columnFamilyHandle);
}

public void reset() {
reference.getAndUpdate(
oldHandle -> {
try {
ColumnFamilyDescriptor descriptor =
new ColumnFamilyDescriptor(
oldHandle.getName(), oldHandle.getDescriptor().getOptions());
db.dropColumnFamily(oldHandle);
ColumnFamilyHandle newHandle = db.createColumnFamily(descriptor);
oldHandle.close();
return newHandle;
} catch (final RocksDBException e) {
throw new StorageException(e);
}
});
}

public ColumnFamilyHandle get() {
return reference.get();
}

@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RocksDbSegmentIdentifier that = (RocksDbSegmentIdentifier) o;
return Objects.equals(reference.get(), that.reference.get());
}

@Override
public int hashCode() {
return reference.get().hashCode();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetrics;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbKeyIterator;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbUtil;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorage;
Expand All @@ -36,7 +37,6 @@
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
Expand All @@ -61,7 +61,7 @@
import org.slf4j.LoggerFactory;

public class RocksDBColumnarKeyValueStorage
implements SegmentedKeyValueStorage<ColumnFamilyHandle> {
implements SegmentedKeyValueStorage<RocksDbSegmentIdentifier> {

private static final Logger LOG = LoggerFactory.getLogger(RocksDBColumnarKeyValueStorage.class);
private static final String DEFAULT_COLUMN = "default";
Expand All @@ -75,7 +75,7 @@ public class RocksDBColumnarKeyValueStorage
private final TransactionDBOptions txOptions;
private final TransactionDB db;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Map<String, AtomicReference<ColumnFamilyHandle>> columnHandlesByName;
private final Map<String, RocksDbSegmentIdentifier> columnHandlesByName;
private final RocksDBMetrics metrics;
private final WriteOptions tryDeleteOptions = new WriteOptions().setNoSlowdown(true);

Expand Down Expand Up @@ -128,14 +128,13 @@ public RocksDBColumnarKeyValueStorage(
Collectors.toMap(
segment -> Bytes.wrap(segment.getId()), SegmentIdentifier::getName));

final ImmutableMap.Builder<String, AtomicReference<ColumnFamilyHandle>> builder =
ImmutableMap.builder();
final ImmutableMap.Builder<String, RocksDbSegmentIdentifier> builder = ImmutableMap.builder();

for (ColumnFamilyHandle columnHandle : columnHandles) {
final String segmentName =
requireNonNullElse(
segmentsById.get(Bytes.wrap(columnHandle.getName())), DEFAULT_COLUMN);
builder.put(segmentName, new AtomicReference<>(columnHandle));
builder.put(segmentName, new RocksDbSegmentIdentifier(db, columnHandle));
}
columnHandlesByName = builder.build();

Expand All @@ -150,42 +149,41 @@ private BlockBasedTableConfig createBlockBasedTableConfig(final RocksDBConfigura
}

@Override
public AtomicReference<ColumnFamilyHandle> getSegmentIdentifierByName(
final SegmentIdentifier segment) {
public RocksDbSegmentIdentifier getSegmentIdentifierByName(final SegmentIdentifier segment) {
return columnHandlesByName.get(segment.getName());
}

@Override
public Optional<byte[]> get(final ColumnFamilyHandle segment, final byte[] key)
public Optional<byte[]> get(final RocksDbSegmentIdentifier segment, final byte[] key)
throws StorageException {
throwIfClosed();

try (final OperationTimer.TimingContext ignored = metrics.getReadLatency().startTimer()) {
return Optional.ofNullable(db.get(segment, key));
return Optional.ofNullable(db.get(segment.get(), key));
} catch (final RocksDBException e) {
throw new StorageException(e);
}
}

@Override
public Transaction<ColumnFamilyHandle> startTransaction() throws StorageException {
public Transaction<RocksDbSegmentIdentifier> startTransaction() throws StorageException {
throwIfClosed();
final WriteOptions writeOptions = new WriteOptions();
return new SegmentedKeyValueStorageTransactionTransitionValidatorDecorator<>(
new RocksDbTransaction(db.beginTransaction(writeOptions), writeOptions));
}

@Override
public Stream<byte[]> streamKeys(final ColumnFamilyHandle segmentHandle) {
final RocksIterator rocksIterator = db.newIterator(segmentHandle);
public Stream<byte[]> streamKeys(final RocksDbSegmentIdentifier segmentHandle) {
final RocksIterator rocksIterator = db.newIterator(segmentHandle.get());
rocksIterator.seekToFirst();
return RocksDbKeyIterator.create(rocksIterator).toStream();
}

@Override
public boolean tryDelete(final ColumnFamilyHandle segmentHandle, final byte[] key) {
public boolean tryDelete(final RocksDbSegmentIdentifier segmentHandle, final byte[] key) {
try {
db.delete(segmentHandle, tryDeleteOptions, key);
db.delete(segmentHandle.get(), tryDeleteOptions, key);
return true;
} catch (RocksDBException e) {
if (e.getStatus().getCode() == Status.Code.Incomplete) {
Expand All @@ -198,33 +196,17 @@ public boolean tryDelete(final ColumnFamilyHandle segmentHandle, final byte[] ke

@Override
public Set<byte[]> getAllKeysThat(
final ColumnFamilyHandle segmentHandle, final Predicate<byte[]> returnCondition) {
final RocksDbSegmentIdentifier segmentHandle, final Predicate<byte[]> returnCondition) {
return streamKeys(segmentHandle).filter(returnCondition).collect(toUnmodifiableSet());
}

@Override
public void clear(final ColumnFamilyHandle segmentHandle) {

var entry =
columnHandlesByName.values().stream().filter(e -> e.get().equals(segmentHandle)).findAny();

if (entry.isPresent()) {
AtomicReference<ColumnFamilyHandle> segmentHandleRef = entry.get();
segmentHandleRef.getAndUpdate(
oldHandle -> {
try {
ColumnFamilyDescriptor descriptor =
new ColumnFamilyDescriptor(
segmentHandle.getName(), segmentHandle.getDescriptor().getOptions());
db.dropColumnFamily(oldHandle);
ColumnFamilyHandle newHandle = db.createColumnFamily(descriptor);
segmentHandle.close();
return newHandle;
} catch (final RocksDBException e) {
throw new StorageException(e);
}
});
}
public void clear(final RocksDbSegmentIdentifier segmentHandle) {

columnHandlesByName.values().stream()
.filter(e -> e.equals(segmentHandle))
.findAny()
.ifPresent(segmentIdentifier -> segmentIdentifier.reset());
}

@Override
Expand All @@ -234,7 +216,7 @@ public void close() {
options.close();
tryDeleteOptions.close();
columnHandlesByName.values().stream()
.map(AtomicReference::get)
.map(RocksDbSegmentIdentifier::get)
.forEach(ColumnFamilyHandle::close);
db.close();
}
Expand All @@ -247,7 +229,7 @@ private void throwIfClosed() {
}
}

private class RocksDbTransaction implements Transaction<ColumnFamilyHandle> {
private class RocksDbTransaction implements Transaction<RocksDbSegmentIdentifier> {

private final org.rocksdb.Transaction innerTx;
private final WriteOptions options;
Expand All @@ -258,9 +240,9 @@ private class RocksDbTransaction implements Transaction<ColumnFamilyHandle> {
}

@Override
public void put(final ColumnFamilyHandle segment, final byte[] key, final byte[] value) {
public void put(final RocksDbSegmentIdentifier segment, final byte[] key, final byte[] value) {
try (final OperationTimer.TimingContext ignored = metrics.getWriteLatency().startTimer()) {
innerTx.put(segment, key, value);
innerTx.put(segment.get(), key, value);
} catch (final RocksDBException e) {
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
LOG.error(e.getMessage());
Expand All @@ -271,9 +253,9 @@ public void put(final ColumnFamilyHandle segment, final byte[] key, final byte[]
}

@Override
public void remove(final ColumnFamilyHandle segment, final byte[] key) {
public void remove(final RocksDbSegmentIdentifier segment, final byte[] key) {
try (final OperationTimer.TimingContext ignored = metrics.getRemoveLatency().startTimer()) {
innerTx.delete(segment, key);
innerTx.delete(segment.get(), key);
} catch (final RocksDBException e) {
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
LOG.error(e.getMessage());
Expand Down
Loading

0 comments on commit 5ec9de8

Please sign in to comment.