Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
non-fungible-nelson authored Jun 20, 2024
2 parents 6ce992d + 3026268 commit f4e64b5
Show file tree
Hide file tree
Showing 25 changed files with 751 additions and 79 deletions.
70 changes: 70 additions & 0 deletions .github/workflows/BesuContainerVerify.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#!/bin/bash
##
## Copyright contributors to Hyperledger Besu.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
## the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
## an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
## specific language governing permissions and limitations under the License.
##
## SPDX-License-Identifier: Apache-2.0
##

CONTAINER_NAME=${CONTAINER_NAME:-besu}
VERSION=${VERSION}
TAG=${TAG}
CHECK_LATEST=${CHECK_LATEST}
RETRY=${RETRY:-10}
SLEEP=${SLEEP:-5}

# Helper function to throw error
log_error() {
echo "::error $1"
exit 1
}

# Check container is in running state
_RUN_STATE=$(docker inspect --type=container -f={{.State.Status}} ${CONTAINER_NAME})
if [[ "${_RUN_STATE}" != "running" ]]
then
log_error "container is not running"
fi

# Check for specific log message in container logs to verify besu started
_SUCCESS=false
while [[ ${_SUCCESS} != "true" && $RETRY -gt 0 ]]
do
docker logs ${CONTAINER_NAME} | grep -q "Ethereum main loop is up" && {
_SUCCESS=true
continue
}
echo "Waiting for the besu to start. Remaining retries $RETRY ..."
RETRY=$(expr $RETRY - 1)
sleep $SLEEP
done

# Log entry does not present after all retries, fail the script with a message
if [[ ${_SUCCESS} != "true" ]]
then
docker logs --tail=100 ${CONTAINER_NAME}
log_error "could not find the log message 'Ethereum main loop is up'"
else
echo "Besu container started and entered main loop"
fi

# For the latest tag check the version match
if [[ ${TAG} == "latest" && ${CHECK_LATEST} == "true" ]]
then
_VERSION_IN_LOG=$(docker logs ${CONTAINER_NAME} | grep "#" | grep "Besu version" | cut -d " " -f 4 | sed 's/\s//g')
echo "Extracted version from logs [$_VERSION_IN_LOG]"
if [[ "$_VERSION_IN_LOG" != "${VERSION}" ]]
then
log_error "version [$_VERSION_IN_LOG] extracted from container logs does not match the expected version [${VERSION}]"
else
echo "Latest Besu container version matches"
fi
fi
57 changes: 57 additions & 0 deletions .github/workflows/container-verify.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
name: container verify

on:
workflow_dispatch:
inputs:
version:
description: 'Besu version'
required: true
verify-latest-version:
description: 'Check latest container version'
required: false
type: choice
default: "true"
options:
- "true"
- "false"

jobs:
verify:
timeout-minutes: 4
strategy:
matrix:
combination:
- tag: ${{ inputs.version }}
platform: ''
runner: ubuntu-latest
- tag: ${{ inputs.version }}-amd64
platform: 'linux/amd64'
runner: ubuntu-latest
- tag: latest
platform: ''
runner: ubuntu-latest
- tag: ${{ inputs.version }}-arm64
platform: ''
runner: besu-arm64
runs-on: ${{ matrix.combination.runner }}
env:
CONTAINER_NAME: besu-check
steps:
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11

- name: Start container
run: |
PLATFORM_OPT=""
[[ x${{ matrix.combination.platform }} != 'x' ]] && PLATFORM_OPT="--platform ${{ matrix.combination.platform }}"
docker run -d $PLATFORM_OPT --name ${{ env.CONTAINER_NAME }} hyperledger/besu:${{ matrix.combination.tag }}
- name: Verify besu container
run: bash .github/workflows/BesuContainerVerify.sh
env:
TAG: ${{ matrix.combination.tag }}
VERSION: ${{ inputs.version }}
CHECK_LATEST: ${{ inputs.verify-latest-version }}

- name: Stop container
run: docker stop ${{ env.CONTAINER_NAME }}
14 changes: 14 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -265,3 +265,17 @@ jobs:
run: ./gradlew "-Prelease.releaseVersion=${{ github.event.release.name }}" "-PdockerOrgName=${{ env.registry }}/${{ secrets.DOCKER_ORG }}" dockerUploadRelease
- name: Docker manifest
run: ./gradlew "-Prelease.releaseVersion=${{ github.event.release.name }}" "-PdockerOrgName=${{ env.registry }}/${{ secrets.DOCKER_ORG }}" manifestDockerRelease

verifyContainer:
needs: dockerPromoteX64
runs-on: ubuntu-22.04
permissions:
contents: read
actions: write
steps:
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Trigger container verify
run: echo '{"version":"${{ github.event.release.name }}","verify-latest-version":"true"}' | gh workflow run container-verify.yml --json
env:
GH_TOKEN: ${{ github.token }}
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
### Bug fixes
- Validation errors ignored in accounts-allowlist and empty list [#7138](https://github.com/hyperledger/besu/issues/7138)
- Fix "Invalid block detected" for BFT chains using Bonsai DB [#7204](https://github.com/hyperledger/besu/pull/7204)
- Fix "Could not confirm best peer had pivot block" [#7109](https://github.com/hyperledger/besu/issues/7109)
- Fix "Chain Download Halt" [#6884](https://github.com/hyperledger/besu/issues/6884)

## 24.6.0

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ protected EthProtocolManager createEthProtocolManager(
var mergeBestPeerComparator =
new TransitionBestPeerComparator(
genesisConfigOptions.getTerminalTotalDifficulty().map(Difficulty::of).orElseThrow());
ethPeers.setBestChainComparator(mergeBestPeerComparator);
ethPeers.setBestPeerComparator(mergeBestPeerComparator);
mergeContext.observeNewIsPostMergeState(mergeBestPeerComparator);

Optional<MergePeerFilter> filterToUse = Optional.of(new MergePeerFilter());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,12 @@ private boolean registerDisconnect(final EthPeer peer, final PeerConnection conn
peer.handleDisconnect();
abortPendingRequestsAssignedToDisconnectedPeers();
if (peer.getReputation().getScore() > USEFULL_PEER_SCORE_THRESHOLD) {
LOG.debug("Disconnected USEFULL peer {}", peer);
LOG.atDebug().setMessage("Disconnected USEFULL peer {}").addArgument(peer).log();
} else {
LOG.debug("Disconnected EthPeer {}", peer.getLoggableId());
LOG.atDebug()
.setMessage("Disconnected EthPeer {}")
.addArgument(peer.getLoggableId())
.log();
}
}
}
Expand Down Expand Up @@ -318,11 +321,11 @@ public Stream<EthPeer> streamAvailablePeers() {
public Stream<EthPeer> streamBestPeers() {
return streamAvailablePeers()
.filter(EthPeer::isFullyValidated)
.sorted(getBestChainComparator().reversed());
.sorted(getBestPeerComparator().reversed());
}

public Optional<EthPeer> bestPeer() {
return streamAvailablePeers().max(getBestChainComparator());
return streamAvailablePeers().max(getBestPeerComparator());
}

public Optional<EthPeer> bestPeerWithHeightEstimate() {
Expand All @@ -331,15 +334,15 @@ public Optional<EthPeer> bestPeerWithHeightEstimate() {
}

public Optional<EthPeer> bestPeerMatchingCriteria(final Predicate<EthPeer> matchesCriteria) {
return streamAvailablePeers().filter(matchesCriteria).max(getBestChainComparator());
return streamAvailablePeers().filter(matchesCriteria).max(getBestPeerComparator());
}

public void setBestChainComparator(final Comparator<EthPeer> comparator) {
public void setBestPeerComparator(final Comparator<EthPeer> comparator) {
LOG.info("Updating the default best peer comparator");
bestPeerComparator = comparator;
}

public Comparator<EthPeer> getBestChainComparator() {
public Comparator<EthPeer> getBestPeerComparator() {
return bestPeerComparator;
}

Expand Down Expand Up @@ -394,8 +397,7 @@ public boolean shouldConnect(final Peer peer, final boolean inbound) {

public void disconnectWorstUselessPeer() {
streamAvailablePeers()
.sorted(getBestChainComparator())
.findFirst()
.min(getBestPeerComparator())
.ifPresent(
peer -> {
LOG.atDebug()
Expand Down Expand Up @@ -551,29 +553,40 @@ private boolean addPeerToEthPeers(final EthPeer peer) {
if (!randomPeerPriority) {
// Disconnect if too many peers
if (!canExceedPeerLimits(id) && peerCount() >= peerUpperBound) {
LOG.trace(
"Too many peers. Disconnect connection: {}, max connections {}",
connection,
peerUpperBound);
LOG.atTrace()
.setMessage("Too many peers. Disconnect connection: {}, max connections {}")
.addArgument(connection)
.addArgument(peerUpperBound)
.log();
connection.disconnect(DisconnectMessage.DisconnectReason.TOO_MANY_PEERS);
return false;
}
// Disconnect if too many remotely-initiated connections
if (connection.inboundInitiated()
&& !canExceedPeerLimits(id)
&& remoteConnectionLimitReached()) {
LOG.trace(
"Too many remotely-initiated connections. Disconnect incoming connection: {}, maxRemote={}",
connection,
maxRemotelyInitiatedConnections);
LOG.atTrace()
.setMessage(
"Too many remotely-initiated connections. Disconnect incoming connection: {}, maxRemote={}")
.addArgument(connection)
.addArgument(maxRemotelyInitiatedConnections)
.log();
connection.disconnect(DisconnectMessage.DisconnectReason.TOO_MANY_PEERS);
return false;
}
final boolean added = (completeConnections.putIfAbsent(id, peer) == null);
if (added) {
LOG.trace("Added peer {} with connection {} to completeConnections", id, connection);
LOG.atTrace()
.setMessage("Added peer {} with connection {} to completeConnections")
.addArgument(id)
.addArgument(connection)
.log();
} else {
LOG.trace("Did not add peer {} with connection {} to completeConnections", id, connection);
LOG.atTrace()
.setMessage("Did not add peer {} with connection {} to completeConnections")
.addArgument(id)
.addArgument(connection)
.log();
}
return added;
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
public class EthScheduler {
private static final Logger LOG = LoggerFactory.getLogger(EthScheduler.class);

private final Duration defaultTimeout = Duration.ofSeconds(5);
private final AtomicBoolean stopped = new AtomicBoolean(false);
private final CountDownLatch shutdown = new CountDownLatch(1);
private static final int TX_WORKER_CAPACITY = 1_000;
Expand Down Expand Up @@ -219,10 +218,6 @@ public CompletableFuture<Void> scheduleBlockCreationTask(final Runnable task) {
return CompletableFuture.runAsync(task, blockCreationExecutor);
}

public <T> CompletableFuture<T> timeout(final EthTask<T> task) {
return timeout(task, defaultTimeout);
}

public <T> CompletableFuture<T> timeout(final EthTask<T> task, final Duration timeout) {
final CompletableFuture<T> future = task.run();
final CompletableFuture<T> result = timeout(future, timeout);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
public class RetryingGetAccountRangeFromPeerTask
extends AbstractRetryingPeerTask<AccountRangeMessage.AccountRangeData> {

public static final int MAX_RETRIES = 4;
private final EthContext ethContext;
private final Bytes32 startKeyHash;
private final Bytes32 endKeyHash;
Expand All @@ -43,7 +44,10 @@ private RetryingGetAccountRangeFromPeerTask(
final BlockHeader blockHeader,
final MetricsSystem metricsSystem) {
super(
ethContext, 4, data -> data.accounts().isEmpty() && data.proofs().isEmpty(), metricsSystem);
ethContext,
MAX_RETRIES,
data -> data.accounts().isEmpty() && data.proofs().isEmpty(),
metricsSystem);
this.ethContext = ethContext;
this.startKeyHash = startKeyHash;
this.endKeyHash = endKeyHash;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,15 +122,18 @@ protected void handleTaskError(final Throwable error) {
() ->
ethContext
.getScheduler()
// wait for a new peer for up to 5 seconds
.timeout(waitTask, Duration.ofSeconds(5))
// execute the task again
.whenComplete((r, t) -> executeTaskTimed()));
return;
}

LOG.debug(
"Retrying after recoverable failure from peer task {}: {}",
this.getClass().getSimpleName(),
cause.getMessage());
LOG.atDebug()
.setMessage("Retrying after recoverable failure from peer task {}: {}")
.addArgument(this.getClass().getSimpleName())
.addArgument(cause.getMessage())
.log();
// Wait before retrying on failure
executeSubTask(
() ->
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
public class PivotBlockRetriever {

private static final Logger LOG = LoggerFactory.getLogger(PivotBlockRetriever.class);
public static final int MAX_QUERY_RETRIES_PER_PEER = 4;
public static final int MAX_QUERY_RETRIES_PER_PEER = 5;
private static final int DEFAULT_MAX_PIVOT_BLOCK_RESETS = 250;
private static final int SUSPICIOUS_NUMBER_OF_RETRIES = 5;

Expand Down
Loading

0 comments on commit f4e64b5

Please sign in to comment.