Skip to content

Commit

Permalink
Merge branch 'master' into 8292
Browse files Browse the repository at this point in the history
  • Loading branch information
lucassaldanha authored Jul 24, 2024
2 parents ad2f80e + 56a746b commit c265f67
Show file tree
Hide file tree
Showing 8 changed files with 244 additions and 34 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
/*
* Copyright Consensys Software Inc., 2022
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/

package tech.pegasys.teku.test.acceptance;

import static org.assertj.core.api.Fail.fail;

import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import tech.pegasys.teku.infrastructure.unsigned.UInt64;
import tech.pegasys.teku.spec.SpecMilestone;
import tech.pegasys.teku.test.acceptance.dsl.AcceptanceTestBase;
import tech.pegasys.teku.test.acceptance.dsl.TekuBeaconNode;
import tech.pegasys.teku.test.acceptance.dsl.TekuNodeConfig;
import tech.pegasys.teku.test.acceptance.dsl.TekuNodeConfigBuilder;

public class MergedGenesisInteropModeAcceptanceTest extends AcceptanceTestBase {

@ParameterizedTest
@EnumSource(SpecMilestone.class)
public void startFromMergedStatePerMilestoneUsingTerminalBlockHash(
final SpecMilestone specMilestone) throws Exception {
if (specMilestone.isGreaterThanOrEqualTo(SpecMilestone.CAPELLA)) {
final TekuNodeConfig config =
createTekuNodeBuilderForMilestone(specMilestone)
.withTerminalBlockHash(
"0x00000000000000000000000000000000000000000000000000000000000000aa")
.withStubExecutionEngine()
.build();

final TekuBeaconNode node = createTekuBeaconNode(config);

node.start();
node.waitForNonDefaultExecutionPayload();
node.waitForNewBlock();
}
}

@ParameterizedTest
@EnumSource(SpecMilestone.class)
public void startFromMergedStatePerMilestoneUsingTotalDifficultySimulation(
final SpecMilestone specMilestone) throws Exception {
if (specMilestone.isGreaterThanOrEqualTo(SpecMilestone.CAPELLA)) {
final TekuNodeConfig config =
createTekuNodeBuilderForMilestone(specMilestone).withStubExecutionEngine().build();

final TekuBeaconNode node = createTekuBeaconNode(config);

node.start();
node.waitForNonDefaultExecutionPayload();
node.waitForNewBlock();
}
}

private static TekuNodeConfigBuilder createTekuNodeBuilderForMilestone(
final SpecMilestone specMilestone) throws Exception {
final TekuNodeConfigBuilder tekuNodeConfigBuilder =
TekuNodeConfigBuilder.createBeaconNode()
.withRealNetwork()
.withNetwork("minimal")
.withAltairEpoch(UInt64.ZERO)
.withBellatrixEpoch(UInt64.ZERO)
.withTotalTerminalDifficulty(0)
.withStartupTargetPeerCount(0)
.withTrustedSetupFromClasspath("mainnet-trusted-setup.txt")
.withInteropNumberOfValidators(64)
.withValidatorProposerDefaultFeeRecipient("0xFE3B557E8Fb62b89F4916B721be55cEb828dBd73");

switch (specMilestone) {
// We do not need to consider PHASE0, ALTAIR or BELLATRIX as they are all pre-Merge
// milestones
case CAPELLA:
tekuNodeConfigBuilder.withCapellaEpoch(UInt64.ZERO);
break;
case DENEB:
tekuNodeConfigBuilder.withCapellaEpoch(UInt64.ZERO);
tekuNodeConfigBuilder.withDenebEpoch(UInt64.ZERO);
break;
case ELECTRA:
tekuNodeConfigBuilder.withCapellaEpoch(UInt64.ZERO);
tekuNodeConfigBuilder.withDenebEpoch(UInt64.ZERO);
tekuNodeConfigBuilder.withElectraEpoch(UInt64.ZERO);
break;
default:
// Test will reach this whenever a new milestone is added and isn't mapped on the switch.
// This is a way to force us to always remember to validate that a new milestone can start
// from a merged
// state.
fail("Milestone %s not used on merged genesis interop test", specMilestone);
}
return tekuNodeConfigBuilder;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.units.bigints.UInt256;
import tech.pegasys.teku.infrastructure.unsigned.UInt64;
import tech.pegasys.teku.networks.Eth2NetworkConfiguration;
Expand Down Expand Up @@ -145,6 +146,30 @@ public TekuNodeConfigBuilder withCapellaEpoch(final UInt64 capellaForkEpoch) {
return this;
}

public TekuNodeConfigBuilder withDenebEpoch(final UInt64 denebForkEpoch) {
mustBe(NodeType.BEACON_NODE);
LOG.debug("Xnetwork-deneb-fork-epoch={}", denebForkEpoch);
configMap.put("Xnetwork-deneb-fork-epoch", denebForkEpoch.toString());
specConfigModifier =
specConfigModifier.andThen(
specConfigBuilder ->
specConfigBuilder.denebBuilder(
denebBuilder -> denebBuilder.denebForkEpoch(denebForkEpoch)));
return this;
}

public TekuNodeConfigBuilder withElectraEpoch(final UInt64 electraForkEpoch) {
mustBe(NodeType.BEACON_NODE);
LOG.debug("Xnetwork-electra-fork-epoch={}", electraForkEpoch);
configMap.put("Xnetwork-electra-fork-epoch", electraForkEpoch.toString());
specConfigModifier =
specConfigModifier.andThen(
specConfigBuilder ->
specConfigBuilder.electraBuilder(
electraBuilder -> electraBuilder.electraForkEpoch(electraForkEpoch)));
return this;
}

public TekuNodeConfigBuilder withTrustedSetupFromClasspath(final String trustedSetup)
throws Exception {
mustBe(NodeType.BEACON_NODE);
Expand All @@ -171,16 +196,19 @@ public TekuNodeConfigBuilder withJwtSecretFile(final URL jwtFile) throws Excepti
return this;
}

public TekuNodeConfigBuilder withDenebEpoch(final UInt64 denebForkEpoch) {

public TekuNodeConfigBuilder withTerminalBlockHash(final String terminalBlockHash) {
mustBe(NodeType.BEACON_NODE);
LOG.debug("Xnetwork-deneb-fork-epoch={}", denebForkEpoch);
configMap.put("Xnetwork-deneb-fork-epoch", denebForkEpoch.toString());

LOG.debug("Xnetwork-terminal-block-hash-override={}", terminalBlockHash);
configMap.put("Xnetwork-terminal-block-hash-override", terminalBlockHash);

specConfigModifier =
specConfigModifier.andThen(
specConfigBuilder ->
specConfigBuilder.denebBuilder(
denebBuilder -> denebBuilder.denebForkEpoch(denebForkEpoch)));
specConfigBuilder.bellatrixBuilder(
bellatrixBuilder ->
bellatrixBuilder.terminalBlockHash(
Bytes32.fromHexString(terminalBlockHash))));
return this;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,24 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.units.bigints.UInt256;
import tech.pegasys.teku.bls.BLSKeyPair;
import tech.pegasys.teku.infrastructure.bytes.Bytes20;
import tech.pegasys.teku.infrastructure.unsigned.UInt64;
import tech.pegasys.teku.spec.Spec;
import tech.pegasys.teku.spec.SpecMilestone;
import tech.pegasys.teku.spec.datastructures.execution.ExecutionPayloadHeader;
import tech.pegasys.teku.spec.datastructures.execution.ExecutionPayloadSummary;
import tech.pegasys.teku.spec.datastructures.operations.Deposit;
import tech.pegasys.teku.spec.datastructures.operations.DepositData;
import tech.pegasys.teku.spec.datastructures.state.beaconstate.BeaconState;
import tech.pegasys.teku.spec.datastructures.util.DepositGenerator;
import tech.pegasys.teku.spec.schemas.SchemaDefinitionsBellatrix;

public class GenesisStateBuilder {

private Spec spec;
private boolean signDeposits = true;
private UInt64 genesisTime = UInt64.ZERO;
Expand All @@ -39,10 +44,19 @@ public class GenesisStateBuilder {

public BeaconState build() {
checkNotNull(spec, "Must provide a spec");

// If our Genesis is post-Bellatrix, we must have a non-default Execution Payload Header (but we
// should not override if one has been specified)
if (executionPayloadHeader.isEmpty()
&& spec.getGenesisSpec().getMilestone().isGreaterThanOrEqualTo(SpecMilestone.CAPELLA)) {
executionPayloadHeader = Optional.of(mockExecutionPayloadHeader());
}

final Bytes32 eth1BlockHash =
executionPayloadHeader
.map(ExecutionPayloadSummary::getBlockHash)
.orElseGet(this::generateMockGenesisBlockHash);

final BeaconState initialState =
spec.initializeBeaconStateFromEth1(
eth1BlockHash, genesisTime, genesisDeposits, executionPayloadHeader);
Expand Down Expand Up @@ -126,4 +140,35 @@ public GenesisStateBuilder executionPayloadHeader(
private Bytes32 generateMockGenesisBlockHash() {
return Bytes32.repeat((byte) 0x42);
}

private ExecutionPayloadHeader mockExecutionPayloadHeader() {
return SchemaDefinitionsBellatrix.required(spec.getGenesisSchemaDefinitions())
.getExecutionPayloadHeaderSchema()
.createExecutionPayloadHeader(
b -> {
b.blockHash(generateMockGenesisBlockHash());
b.parentHash(Bytes32.ZERO);
b.feeRecipient(Bytes20.ZERO);
b.stateRoot(Bytes32.ZERO);
b.receiptsRoot(Bytes32.ZERO);
b.logsBloom(Bytes.repeat((byte) 0x00, 256));
b.prevRandao(Bytes32.ZERO);
b.blockNumber(UInt64.ZERO);
b.gasLimit(UInt64.ZERO);
b.gasUsed(UInt64.ZERO);
b.timestamp(UInt64.ZERO);
b.extraData(Bytes.repeat((byte) 0x00, 20));
b.baseFeePerGas(UInt256.ZERO);
b.transactionsRoot(Bytes32.ZERO);
// Capella
b.withdrawalsRoot(() -> Bytes32.ZERO);
// Deneb
b.excessBlobGas(() -> UInt64.ZERO);
b.blobGasUsed(() -> UInt64.ZERO);
// Electra
b.depositRequestsRoot(() -> Bytes32.ZERO);
b.withdrawalRequestsRoot(() -> Bytes32.ZERO);
b.consolidationRequestsRoot(() -> Bytes32.ZERO);
});
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,10 @@
import tech.pegasys.teku.spec.TestSpecFactory;
import tech.pegasys.teku.spec.datastructures.operations.SignedBlsToExecutionChange;
import tech.pegasys.teku.spec.util.DataStructureUtil;
import tech.pegasys.teku.statetransition.BeaconChainUtil;
import tech.pegasys.teku.statetransition.validation.InternalValidationResult;
import tech.pegasys.teku.storage.client.MemoryOnlyRecentChainData;
import tech.pegasys.teku.storage.client.RecentChainData;

public class BlsToExecutionChangeGossipIntegrationTest {

Expand Down Expand Up @@ -91,8 +94,14 @@ public void shouldGossipBlsToExecutionChangesToPeers() throws Exception {
.containsExactly(signedBlsToExecutionChange));
}

@SuppressWarnings("deprecation")
private NodeManager createNodeManager(final Consumer<Eth2P2PNetworkBuilder> networkBuilder)
throws Exception {
return NodeManager.create(spec, networkFactory, validatorKeys, networkBuilder);
final RecentChainData storageClient = MemoryOnlyRecentChainData.create(spec);
final BeaconChainUtil chainUtil = BeaconChainUtil.create(spec, storageClient, validatorKeys);
chainUtil.initializeStorage();
// Advancing chain to bypass "optimistic genesis" issue that prevents some gossip subscriptions
chainUtil.createAndImportBlockAtSlot(1);
return NodeManager.create(spec, networkFactory, networkBuilder, storageClient, chainUtil);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,22 @@ public static NodeManager create(
final List<BLSKeyPair> validatorKeys,
final Consumer<Eth2P2PNetworkBuilder> configureNetwork)
throws Exception {
final EventChannels eventChannels =
EventChannels.createSyncChannels(
ChannelExceptionHandler.THROWING_HANDLER, new NoOpMetricsSystem());
final RecentChainData storageClient = MemoryOnlyRecentChainData.create(spec);

final BeaconChainUtil chainUtil = BeaconChainUtil.create(spec, storageClient, validatorKeys);
chainUtil.initializeStorage();
return create(spec, networkFactory, configureNetwork, storageClient, chainUtil);
}

public static NodeManager create(
final Spec spec,
final Eth2P2PNetworkFactory networkFactory,
final Consumer<Eth2P2PNetworkBuilder> configureNetwork,
final RecentChainData storageClient,
final BeaconChainUtil chainUtil)
throws Exception {
final EventChannels eventChannels =
EventChannels.createSyncChannels(
ChannelExceptionHandler.THROWING_HANDLER, new NoOpMetricsSystem());

final Eth2P2PNetworkBuilder networkBuilder =
networkFactory
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import tech.pegasys.teku.infrastructure.async.SafeFuture;
import tech.pegasys.teku.infrastructure.async.eventthread.AsyncRunnerEventThread;
import tech.pegasys.teku.infrastructure.events.EventChannels;
import tech.pegasys.teku.infrastructure.exceptions.InvalidConfigurationException;
import tech.pegasys.teku.infrastructure.metrics.SettableLabelledGauge;
import tech.pegasys.teku.infrastructure.metrics.TekuMetricCategory;
import tech.pegasys.teku.service.serviceutils.Service;
Expand All @@ -36,6 +37,7 @@
import tech.pegasys.teku.storage.server.ChainStorage;
import tech.pegasys.teku.storage.server.CombinedStorageChannelSplitter;
import tech.pegasys.teku.storage.server.Database;
import tech.pegasys.teku.storage.server.DatabaseVersion;
import tech.pegasys.teku.storage.server.DepositStorage;
import tech.pegasys.teku.storage.server.RetryingStorageUpdateChannel;
import tech.pegasys.teku.storage.server.StorageConfiguration;
Expand Down Expand Up @@ -117,24 +119,29 @@ protected SafeFuture<?> doStart() {
pruningActiveLabelledGauge));
}
if (config.getDataStorageMode().storesFinalizedStates()
&& config.getRetainedSlots() > -1) {
LOG.info(
"State pruner will run every: {} minute(s), retaining states for the last {} finalized slots. Limited to {} state prune per execution. ",
config.getStatePruningInterval().toMinutes(),
config.getRetainedSlots(),
config.getStatePruningLimit());
statePruner =
Optional.of(
new StatePruner(
config.getSpec(),
database,
storagePrunerAsyncRunner,
config.getStatePruningInterval(),
config.getRetainedSlots(),
config.getStatePruningLimit(),
"state",
pruningTimingsLabelledGauge,
pruningActiveLabelledGauge));
&& config.getRetainedSlots() > 0) {
if (config.getDataStorageCreateDbVersion() == DatabaseVersion.LEVELDB_TREE) {
throw new InvalidConfigurationException(
"State pruning is not supported with leveldb_tree database.");
} else {
LOG.info(
"State pruner will run every: {} minute(s), retaining states for the last {} finalized slots. Limited to {} state prune per execution. ",
config.getStatePruningInterval().toMinutes(),
config.getRetainedSlots(),
config.getStatePruningLimit());
statePruner =
Optional.of(
new StatePruner(
config.getSpec(),
database,
storagePrunerAsyncRunner,
config.getStatePruningInterval(),
config.getRetainedSlots(),
config.getStatePruningLimit(),
"state",
pruningTimingsLabelledGauge,
pruningActiveLabelledGauge));
}
}
if (config.getSpec().isMilestoneSupported(SpecMilestone.DENEB)) {
blobsPruner =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ public class StorageConfiguration {
public static final int DEFAULT_BLOCK_PRUNING_LIMIT = 5000;
public static final Duration DEFAULT_BLOBS_PRUNING_INTERVAL = Duration.ofMinutes(1);
public static final Duration DEFAULT_STATE_PRUNING_INTERVAL = Duration.ofMinutes(5);
public static final long DEFAULT_STORAGE_RETAINED_SLOTS = -1;
public static final long DEFAULT_STORAGE_RETAINED_SLOTS = 0;
public static final int DEFAULT_STATE_PRUNING_LIMIT = 1;

// 60/12 = 5 blocks per minute * 6 max blobs per block = 30 blobs per minute at maximum, 15 as
Expand Down Expand Up @@ -275,7 +275,7 @@ public Builder blobsPruningLimit(final int blobsPruningLimit) {
}

public Builder retainedSlots(final long retainedSlots) {
if (retainedSlots < -1) {
if (retainedSlots < 0) {
throw new InvalidConfigurationException(
"Invalid number of slots to retain finalized states for");
}
Expand Down
Loading

0 comments on commit c265f67

Please sign in to comment.