Skip to content

Commit

Permalink
Fix unhandled exception (#7743)
Browse files Browse the repository at this point in the history
* clean up and use thread safe cache

Signed-off-by: [email protected] <[email protected]>
  • Loading branch information
pinges authored Oct 9, 2024
1 parent 3e7e4d8 commit 11a62a0
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 48 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
- Corrects a regression where custom plugin services are not initialized correctly. [#7625](https://github.com/hyperledger/besu/pull/7625)
- Fix for IBFT2 chains using the BONSAI DB format [#7631](https://github.com/hyperledger/besu/pull/7631)
- Fix reading `tx-pool-min-score` option from configuration file [#7623](https://github.com/hyperledger/besu/pull/7623)
- Fix an undhandled exception. [#7733](https://github.com/hyperledger/besu/issues/7733)

## 24.9.1

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,17 @@
import org.hyperledger.besu.ethereum.p2p.peers.PeerId;

import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;

import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.hash.BloomFilter;
import org.apache.commons.collections4.queue.CircularFifoQueue;
import org.apache.tuweni.bytes.Bytes;

/**
Expand All @@ -54,10 +55,7 @@ public class PeerTable {
private final Map<Bytes, Integer> distanceCache;
private BloomFilter<Bytes> idBloom;
private int evictionCnt = 0;
private final LinkedHashMapWithMaximumSize<String, Integer> ipAddressCheckMap =
new LinkedHashMapWithMaximumSize<>(DEFAULT_BUCKET_SIZE * N_BUCKETS);
private final CircularFifoQueue<String> invalidIPs =
new CircularFifoQueue<>(DEFAULT_BUCKET_SIZE * N_BUCKETS);
private final Cache<String, Integer> unresponsiveIPs;

/**
* Builds a new peer table, where distance is calculated using the provided nodeId as a baseline.
Expand All @@ -72,6 +70,11 @@ public PeerTable(final Bytes nodeId) {
.toArray(Bucket[]::new);
this.distanceCache = new ConcurrentHashMap<>();
this.maxEntriesCnt = N_BUCKETS * DEFAULT_BUCKET_SIZE;
this.unresponsiveIPs =
CacheBuilder.newBuilder()
.maximumSize(maxEntriesCnt)
.expireAfterWrite(15L, TimeUnit.MINUTES)
.build();

// A bloom filter with 4096 expected insertions of 64-byte keys with a 0.1% false positive
// probability yields a memory footprint of ~7.5kb.
Expand Down Expand Up @@ -140,7 +143,6 @@ public AddResult tryAdd(final DiscoveryPeer peer) {
if (!res.isPresent()) {
idBloom.put(id);
distanceCache.put(id, distance);
ipAddressCheckMap.put(getKey(peer.getEndpoint()), peer.getEndpoint().getUdpPort());
return AddResult.added();
}

Expand Down Expand Up @@ -214,26 +216,12 @@ public Stream<DiscoveryPeer> streamAllPeers() {

public boolean isIpAddressInvalid(final Endpoint endpoint) {
final String key = getKey(endpoint);
if (invalidIPs.contains(key)) {
return true;
}
if (ipAddressCheckMap.containsKey(key) && ipAddressCheckMap.get(key) != endpoint.getUdpPort()) {
// This peer has multiple discovery services on the same IP address + TCP port.
invalidIPs.add(key);
for (final Bucket bucket : table) {
bucket.getPeers().stream()
.filter(p -> p.getEndpoint().getHost().equals(endpoint.getHost()))
.forEach(bucket::evict);
}
return true;
} else {
return false;
}
return unresponsiveIPs.getIfPresent(key) != null;
}

public void invalidateIP(final Endpoint endpoint) {
final String key = getKey(endpoint);
invalidIPs.add(key);
unresponsiveIPs.put(key, Integer.MAX_VALUE);
}

private static String getKey(final Endpoint endpoint) {
Expand Down Expand Up @@ -313,20 +301,6 @@ public Peer getEvictionCandidate() {
}
}

private static class LinkedHashMapWithMaximumSize<K, V> extends LinkedHashMap<K, V> {
private final int maxSize;

public LinkedHashMapWithMaximumSize(final int maxSize) {
super(maxSize, 0.75f, false);
this.maxSize = maxSize;
}

@Override
protected boolean removeEldestEntry(final Map.Entry<K, V> eldest) {
return size() > maxSize;
}
}

static class EvictResult {
public enum EvictOutcome {
EVICTED,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,15 +188,12 @@ public void evictSelfPeerShouldReturnSelfOutcome() {
@Test
public void ipAddressIsInvalidReturnsTrue() {
final Endpoint endpoint1 = new Endpoint("1.1.1.1", 2, Optional.of(Integer.valueOf(1)));
final Endpoint endpoint2 = new Endpoint("1.1.1.1", 3, Optional.of(Integer.valueOf(1)));
final DiscoveryPeer peer1 = DiscoveryPeer.fromIdAndEndpoint(Peer.randomId(), endpoint1);
final DiscoveryPeer peer2 = DiscoveryPeer.fromIdAndEndpoint(Peer.randomId(), endpoint2);
final PeerTable table = new PeerTable(Bytes.random(64));

final PeerTable.AddResult addResult1 = table.tryAdd(peer1);
assertThat(addResult1.getOutcome()).isEqualTo(PeerTable.AddResult.added().getOutcome());
table.invalidateIP(endpoint1);

assertThat(table.isIpAddressInvalid(peer2.getEndpoint())).isEqualTo(true);
assertThat(table.isIpAddressInvalid(peer1.getEndpoint())).isEqualTo(true);
}

@Test
Expand All @@ -216,16 +213,12 @@ public void ipAddressIsInvalidReturnsFalse() {
@Test
public void invalidIPAddressNotAdded() {
final Endpoint endpoint1 = new Endpoint("1.1.1.1", 2, Optional.of(Integer.valueOf(1)));
final Endpoint endpoint2 = new Endpoint("1.1.1.1", 3, Optional.of(Integer.valueOf(1)));
final DiscoveryPeer peer1 = DiscoveryPeer.fromIdAndEndpoint(Peer.randomId(), endpoint1);
final DiscoveryPeer peer2 = DiscoveryPeer.fromIdAndEndpoint(Peer.randomId(), endpoint2);
final PeerTable table = new PeerTable(Bytes.random(64));

table.invalidateIP(endpoint1);
final PeerTable.AddResult addResult1 = table.tryAdd(peer1);
assertThat(addResult1.getOutcome()).isEqualTo(PeerTable.AddResult.added().getOutcome());

final PeerTable.AddResult addResult2 = table.tryAdd(peer2);
assertThat(addResult2.getOutcome()).isEqualTo(PeerTable.AddResult.invalid().getOutcome());
assertThat(addResult1.getOutcome()).isEqualTo(PeerTable.AddResult.invalid().getOutcome());
}

@Test
Expand Down

0 comments on commit 11a62a0

Please sign in to comment.