diff --git a/op-challenger2/.gitignore b/op-challenger2/.gitignore new file mode 100644 index 000000000000..9c44d9ca5fa5 --- /dev/null +++ b/op-challenger2/.gitignore @@ -0,0 +1,2 @@ +bin +.fault-game-address diff --git a/op-challenger2/Makefile b/op-challenger2/Makefile new file mode 100644 index 000000000000..874fd395fad4 --- /dev/null +++ b/op-challenger2/Makefile @@ -0,0 +1,34 @@ +GITCOMMIT ?= $(shell git rev-parse HEAD) +GITDATE ?= $(shell git show -s --format='%ct') +VERSION ?= v0.0.0 + +LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) +LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) +LDFLAGSSTRING +=-X main.Version=$(VERSION) +LDFLAGS := -ldflags "$(LDFLAGSSTRING)" + +# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 +ifeq ($(shell uname),Darwin) + FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic +endif + +op-challenger2: + env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-challenger2 ./cmd + +fuzz: + go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzKeccak ./game/keccak/matrix + +clean: + rm bin/op-challenger2 + +test: + go test -v ./... + +visualize: + ./scripts/visualize.sh + +.PHONY: \ + op-challenger2 \ + clean \ + test \ + visualize diff --git a/op-challenger2/README.md b/op-challenger2/README.md new file mode 100644 index 000000000000..1a10861e7a5b --- /dev/null +++ b/op-challenger2/README.md @@ -0,0 +1,176 @@ +# op-challenger2 + +The `op-challenger2` is a modular **op-stack** challenge agent written in +golang for dispute games including, but not limited to,attestation games, +fault games, and validity games. To learn more about dispute games, visit +the [fault proof specs][proof-specs]. + +[proof-specs]: https://specs.optimism.io/experimental/fault-proof/index.html + +## Quickstart + +To build the `op-challenger2`, run `make` (which executes the `make build` +[Makefile](./Makefile) target). To view a list of available commands and +options, run `./bin/op-challenger2 --help`. + +## Usage + +`op-challenger2` is configurable via command line flags and environment +variables. The help menu shows the available config options and can be +accessed by running `./op-challenger2 --help`. + +### Running with Cannon on Local Devnet + +To run `op-challenger2` against the local devnet, first clean and run +the devnet from the root of the repository. +```shell +make devnet-clean +make devnet-up +``` + +Then build the `op-challenger2` with `make op-challenger2`. + +Run the `op-challenger2` with: +```shell +DISPUTE_GAME_FACTORY=$(jq -r .DisputeGameFactoryProxy .devnet/addresses.json) +./op-challenger2/bin/op-challenger2 \ + --trace-type cannon \ + --l1-eth-rpc http://localhost:8545 \ + --rollup-rpc http://localhost:9546 \ + --game-factory-address $DISPUTE_GAME_FACTORY \ + --datadir temp/challenger-data \ + --cannon-rollup-config .devnet/rollup.json \ + --cannon-l2-genesis .devnet/genesis-l2.json \ + --cannon-bin ./cannon/bin/cannon \ + --cannon-server ./op-program/bin/op-program \ + --cannon-prestate ./op-program/bin/prestate.json \ + --l2-eth-rpc http://localhost:9545 \ + --mnemonic "test test test test test test test test test test test junk" \ + --hd-path "m/44'/60'/0'/0/8" \ + --num-confirmations 1 +``` + +The mnemonic and hd-path above is a prefunded address on the devnet. +The challenger will monitor dispute games and respond to any invalid +claims by posting the correct trace as the counter-claim. The commands +below can then be used to create and interact with games. + +## Subcommands + +The `op-challenger2` has a few subcommands to interact with on-chain +fault dispute games. The subcommands support game creation, performing +game moves, and viewing fault dispute game data. They should not be +used in production and are intended to provide convenient manual testing. + +### create-game + +```shell +./bin/op-challenger2 create-game \ + --l1-eth-rpc \ + --game-address \ + --output-root \ + --l2-block-num \ + +``` + +Starts a new fault dispute game that disputes the latest output proposal +in the L2 output oracle. + +* `L1_ETH_RPC` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`). +* `GAME_FACTORY_ADDRESS` - the address of the dispute game factory contract on L1. +* `OUTPUT_ROOT` a hex encoded 32 byte hash that is used as the proposed output root. +* `L2_BLOCK_NUM` the L2 block number the proposed output root is from. +* `SIGNER_ARGS` arguments to specify the key to sign transactions with (e.g `--private-key`) + +Optionally, you may specify the game type (aka "trace type") using the `--trace-type` +flag, which is set to the cannon trace type by default. + +### move + +The `move` subcommand can be run with either the `--attack` or `--defend` flag, +but not both. + +```shell +./bin/op-challenger2 move \ + --l1-eth-rpc \ + --game-address \ + --attack \ + --parent-index \ + --claim \ + +``` + +Performs a move to either attack or defend the latest claim in the specified game. + +* `L1_ETH_RPC` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`). +* `GAME_ADDRESS` - the address of the dispute game to perform the move in. +* `(attack|defend)` - the type of move to make. + * `attack` indicates that the state hash in your local cannon trace differs to the state + hash included in the latest claim. + * `defend` indicates that the state hash in your local cannon trace matches the state hash + included in the latest claim. +* `PARENT_INDEX` - the index of the parent claim that will be countered by this new claim. + The special value of `latest` will counter the latest claim added to the game. +* `CLAIM` - the state hash to include in the counter-claim you are posting. +* `SIGNER_ARGS` arguments to specify the key to sign transactions with (e.g `--private-key`) + +### resolve-claim + +```shell +./bin/op-challenger2 resolve-claim \ + --l1-eth-rpc \ + --game-address \ + --claim \ + +``` + +Resolves a claim in a dispute game. Note that this will fail if the claim has already been resolved or if the claim is +not yet resolvable. If the claim is resolved successfully, the result is printed. + +* `L1_ETH_RPC` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`). +* `GAME_ADDRESS` - the address of the dispute game to resolve. +* `CLAIM_INDEX` - the index of the claim to resolve. +* `SIGNER_ARGS` arguments to specify the key to sign transactions with (e.g `--private-key`). + +### resolve + +```shell +./bin/op-challenger2 resolve \ + --l1-eth-rpc \ + --game-address \ + +``` + +Resolves a dispute game. Note that this will fail if the dispute game has already +been resolved or if the clocks have not yet expired and further moves are possible. +If the game is resolved successfully, the result is printed. + +* `L1_ETH_RPC` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`). +* `GAME_ADDRESS` - the address of the dispute game to resolve. +* `SIGNER_ARGS` arguments to specify the key to sign transactions with (e.g `--private-key`). + +### list-games + +```shell +./bin/op-challenger2 list-games \ + --l1-eth-rpc \ + --game-factory-address +``` + +Prints the games created by the game factory along with their current status. + +* `L1_ETH_RPC` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`). +* `GAME_FACTORY_ADDRESS` - the address of the dispute game factory contract on L1. + +### list-claims + +```shell +./bin/op-challenger2 list-games \ + --l1-eth-rpc \ + --game-address +``` + +Prints the list of current claims in a dispute game. + +* `L1_ETH_RPC` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`). +* `GAME_ADDRESS` - the address of the dispute game to list the move in. diff --git a/op-challenger2/challenger.go b/op-challenger2/challenger.go new file mode 100644 index 000000000000..ec30839d81eb --- /dev/null +++ b/op-challenger2/challenger.go @@ -0,0 +1,20 @@ +package op_challenger2 + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game" + "github.com/ethereum-optimism/optimism/op-service/cliapp" +) + +// Main is the programmatic entry-point for running op-challenger2 with a given configuration. +func Main(ctx context.Context, logger log.Logger, cfg *config.Config, m metrics.Metricer) (cliapp.Lifecycle, error) { + if err := cfg.Check(); err != nil { + return nil, err + } + return game.NewService(ctx, logger, cfg, m) +} diff --git a/op-challenger2/challenger_test.go b/op-challenger2/challenger_test.go new file mode 100644 index 000000000000..7be1c66c15f2 --- /dev/null +++ b/op-challenger2/challenger_test.go @@ -0,0 +1,19 @@ +package op_challenger2 + +import ( + "context" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestMainShouldReturnErrorWhenConfigInvalid(t *testing.T) { + cfg := &config.Config{} + app, err := Main(context.Background(), testlog.Logger(t, log.LevelInfo), cfg, metrics.NoopMetrics) + require.ErrorIs(t, err, cfg.Check()) + require.Nil(t, app) +} diff --git a/op-challenger2/cmd/create_game.go b/op-challenger2/cmd/create_game.go new file mode 100644 index 000000000000..fd183ab7cf94 --- /dev/null +++ b/op-challenger2/cmd/create_game.go @@ -0,0 +1,80 @@ +package main + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + contractMetrics "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-challenger2/tools" + opservice "github.com/ethereum-optimism/optimism/op-service" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/urfave/cli/v2" +) + +var ( + TraceTypeFlag = &cli.StringFlag{ + Name: "trace-type", + Usage: "Trace types to support.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "TRACE_TYPE"), + Value: config.TraceTypeCannon.String(), + } + OutputRootFlag = &cli.StringFlag{ + Name: "output-root", + Usage: "The output root for the fault dispute game.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "OUTPUT_ROOT"), + } + L2BlockNumFlag = &cli.StringFlag{ + Name: "l2-block-num", + Usage: "The l2 block number for the game.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "L2_BLOCK_NUM"), + } +) + +func CreateGame(ctx *cli.Context) error { + outputRoot := common.HexToHash(ctx.String(OutputRootFlag.Name)) + traceType := ctx.Uint64(TraceTypeFlag.Name) + l2BlockNum := ctx.Uint64(L2BlockNumFlag.Name) + + contract, txMgr, err := NewContractWithTxMgr[*contracts.DisputeGameFactoryContract](ctx, flags.FactoryAddressFlag.Name, + func(ctx context.Context, metricer contractMetrics.ContractMetricer, address common.Address, caller *batching.MultiCaller) (*contracts.DisputeGameFactoryContract, error) { + return contracts.NewDisputeGameFactoryContract(metricer, address, caller), nil + }) + if err != nil { + return fmt.Errorf("failed to create dispute game factory bindings: %w", err) + } + + creator := tools.NewGameCreator(contract, txMgr) + gameAddr, err := creator.CreateGame(ctx.Context, outputRoot, traceType, l2BlockNum) + if err != nil { + return fmt.Errorf("failed to create game: %w", err) + } + fmt.Printf("Fetched Game Address: %s\n", gameAddr.String()) + return nil +} + +func createGameFlags() []cli.Flag { + cliFlags := []cli.Flag{ + flags.L1EthRpcFlag, + flags.FactoryAddressFlag, + TraceTypeFlag, + OutputRootFlag, + L2BlockNumFlag, + } + cliFlags = append(cliFlags, txmgr.CLIFlagsWithDefaults(flags.EnvVarPrefix, txmgr.DefaultChallengerFlagValues)...) + cliFlags = append(cliFlags, oplog.CLIFlags(flags.EnvVarPrefix)...) + return cliFlags +} + +var CreateGameCommand = &cli.Command{ + Name: "create-game", + Usage: "Creates a dispute game via the factory", + Description: "Creates a dispute game via the factory", + Action: CreateGame, + Flags: createGameFlags(), +} diff --git a/op-challenger2/cmd/list_claims.go b/op-challenger2/cmd/list_claims.go new file mode 100644 index 000000000000..4324da9b7fd9 --- /dev/null +++ b/op-challenger2/cmd/list_claims.go @@ -0,0 +1,190 @@ +package main + +import ( + "context" + "fmt" + "math/big" + "strconv" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum/go-ethereum/common" + "github.com/urfave/cli/v2" +) + +var ( + GameAddressFlag = &cli.StringFlag{ + Name: "game-address", + Usage: "Address of the fault game contract.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "GAME_ADDRESS"), + } + VerboseFlag = &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "Verbose output", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "VERBOSE"), + } +) + +func ListClaims(ctx *cli.Context) error { + logger, err := setupLogging(ctx) + if err != nil { + return err + } + rpcUrl := ctx.String(flags.L1EthRpcFlag.Name) + if rpcUrl == "" { + return fmt.Errorf("missing %v", flags.L1EthRpcFlag.Name) + } + gameAddr, err := opservice.ParseAddress(ctx.String(GameAddressFlag.Name)) + if err != nil { + return err + } + + l1Client, err := dial.DialEthClientWithTimeout(ctx.Context, dial.DefaultDialTimeout, logger, rpcUrl) + if err != nil { + return fmt.Errorf("failed to dial L1: %w", err) + } + defer l1Client.Close() + + caller := batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize) + contract, err := contracts.NewFaultDisputeGameContract(ctx.Context, metrics.NoopContractMetrics, gameAddr, caller) + if err != nil { + return err + } + return listClaims(ctx.Context, contract, ctx.Bool(VerboseFlag.Name)) +} + +func listClaims(ctx context.Context, game contracts.FaultDisputeGameContract, verbose bool) error { + metadata, err := game.GetGameMetadata(ctx, rpcblock.Latest) + if err != nil { + return fmt.Errorf("failed to retrieve metadata: %w", err) + } + maxDepth, err := game.GetMaxGameDepth(ctx) + if err != nil { + return fmt.Errorf("failed to retrieve max depth: %w", err) + } + maxClockDuration, err := game.GetMaxClockDuration(ctx) + if err != nil { + return fmt.Errorf("failed to retrieve max clock duration: %w", err) + } + splitDepth, err := game.GetSplitDepth(ctx) + if err != nil { + return fmt.Errorf("failed to retrieve split depth: %w", err) + } + status := metadata.Status + l2StartBlockNum, l2BlockNum, err := game.GetBlockRange(ctx) + if err != nil { + return fmt.Errorf("failed to retrieve status: %w", err) + } + + claims, err := game.GetAllClaims(ctx, rpcblock.Latest) + if err != nil { + return fmt.Errorf("failed to retrieve claims: %w", err) + } + + // The top game runs from depth 0 to split depth *inclusive*. + // The - 1 here accounts for the fact that the split depth is included in the top game. + bottomDepth := maxDepth - splitDepth - 1 + + resolved, err := game.IsResolved(ctx, rpcblock.Latest, claims...) + if err != nil { + return fmt.Errorf("failed to retrieve claim resolution: %w", err) + } + + gameState := types.NewGameState(claims, maxDepth) + valueFormat := "%-14v" + if verbose { + valueFormat = "%-66v" + } + now := time.Now() + lineFormat := "%3v %-7v %6v %5v %14v " + valueFormat + " %-42v %12v %-19v %10v %v\n" + info := fmt.Sprintf(lineFormat, "Idx", "Move", "Parent", "Depth", "Index", "Value", "Claimant", "Bond (ETH)", "Time", "Clock Used", "Resolution") + for i, claim := range claims { + pos := claim.Position + parent := strconv.Itoa(claim.ParentContractIndex) + var elapsed time.Duration // Root claim does not accumulate any time on its team's chess clock + if claim.IsRoot() { + parent = "" + } else { + parentClaim, err := gameState.GetParent(claim) + if err != nil { + return fmt.Errorf("failed to retrieve parent claim: %w", err) + } + // Get the total chess clock time accumulated by the team that posted this claim at the time of the claim. + elapsed = gameState.ChessClock(claim.Clock.Timestamp, parentClaim) + } + var countered string + if !resolved[i] { + clock := gameState.ChessClock(now, claim) + resolvableAt := now.Add(maxClockDuration - clock).Format(time.DateTime) + countered = fmt.Sprintf("⏱️ %v", resolvableAt) + } else if claim.IsRoot() && metadata.L2BlockNumberChallenged { + countered = "❌ " + metadata.L2BlockNumberChallenger.Hex() + } else if claim.CounteredBy != (common.Address{}) { + countered = "❌ " + claim.CounteredBy.Hex() + } else { + countered = "✅" + } + move := "Attack" + if gameState.DefendsParent(claim) { + move = "Defend" + } + var traceIdx *big.Int + if claim.Depth() <= splitDepth { + traceIdx = claim.TraceIndex(splitDepth) + } else { + relativePos, err := claim.Position.RelativeToAncestorAtDepth(splitDepth + 1) + if err != nil { + fmt.Printf("Error calculating relative position for claim %v: %v", claim.ContractIndex, err) + traceIdx = big.NewInt(-1) + } else { + traceIdx = relativePos.TraceIndex(bottomDepth) + } + } + value := claim.Value.TerminalString() + if verbose { + value = claim.Value.Hex() + } + timestamp := claim.Clock.Timestamp.Format(time.DateTime) + bond := fmt.Sprintf("%12.8f", eth.WeiToEther(claim.Bond)) + if verbose { + bond = fmt.Sprintf("%f", eth.WeiToEther(claim.Bond)) + } + info = info + fmt.Sprintf(lineFormat, + i, move, parent, pos.Depth(), traceIdx, value, claim.Claimant, bond, timestamp, elapsed, countered) + } + blockNumChallenger := "L2 Block: Unchallenged" + if metadata.L2BlockNumberChallenged { + blockNumChallenger = "L2 Block: ❌ " + metadata.L2BlockNumberChallenger.Hex() + } + fmt.Printf("Status: %v • L2 Blocks: %v to %v • Split Depth: %v • Max Depth: %v • %v • Claim Count: %v\n%v\n", + status, l2StartBlockNum, l2BlockNum, splitDepth, maxDepth, blockNumChallenger, len(claims), info) + return nil +} + +func listClaimsFlags() []cli.Flag { + cliFlags := []cli.Flag{ + flags.L1EthRpcFlag, + GameAddressFlag, + VerboseFlag, + } + cliFlags = append(cliFlags, oplog.CLIFlags(flags.EnvVarPrefix)...) + return cliFlags +} + +var ListClaimsCommand = &cli.Command{ + Name: "list-claims", + Usage: "List the claims in a dispute game", + Description: "Lists the claims in a dispute game", + Action: ListClaims, + Flags: listClaimsFlags(), +} diff --git a/op-challenger2/cmd/list_games.go b/op-challenger2/cmd/list_games.go new file mode 100644 index 000000000000..642a891102d7 --- /dev/null +++ b/op-challenger2/cmd/list_games.go @@ -0,0 +1,187 @@ +package main + +import ( + "cmp" + "context" + "fmt" + "slices" + "sync" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/dial" + openum "github.com/ethereum-optimism/optimism/op-service/enum" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum/go-ethereum/common" + "github.com/urfave/cli/v2" +) + +var ColumnTypes = []string{"time", "claimCount", "l2BlockNum"} + +var ( + SortByFlag = &cli.StringFlag{ + Name: "sort-by", + Usage: "Sort games by column. Valid options: " + openum.EnumString(ColumnTypes), + Value: "time", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "SORT_BY"), + } + SortOrderFlag = &cli.StringFlag{ + Name: "sort-order", + Usage: "Sort order for games. Valid options: 'asc' or 'desc'.", + Value: "asc", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "SORT_ORDER"), + } +) + +func ListGames(ctx *cli.Context) error { + logger, err := setupLogging(ctx) + if err != nil { + return err + } + rpcUrl := ctx.String(flags.L1EthRpcFlag.Name) + if rpcUrl == "" { + return fmt.Errorf("missing %v", flags.L1EthRpcFlag.Name) + } + factoryAddr, err := opservice.ParseAddress(ctx.String(flags.FactoryAddressFlag.Name)) + if err != nil { + return err + } + sortBy := ctx.String(SortByFlag.Name) + if sortBy != "" && !slices.Contains(ColumnTypes, sortBy) { + return fmt.Errorf("invalid sort-by value: %v", sortBy) + } + sortOrder := ctx.String(SortOrderFlag.Name) + if sortOrder != "" && sortOrder != "asc" && sortOrder != "desc" { + return fmt.Errorf("invalid sort-order value: %v", sortOrder) + } + + gameWindow := ctx.Duration(flags.GameWindowFlag.Name) + + l1Client, err := dial.DialEthClientWithTimeout(ctx.Context, dial.DefaultDialTimeout, logger, rpcUrl) + if err != nil { + return fmt.Errorf("failed to dial L1: %w", err) + } + defer l1Client.Close() + + caller := batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize) + contract := contracts.NewDisputeGameFactoryContract(metrics.NoopContractMetrics, factoryAddr, caller) + head, err := l1Client.HeaderByNumber(ctx.Context, nil) + if err != nil { + return fmt.Errorf("failed to retrieve current head block: %w", err) + } + return listGames(ctx.Context, caller, contract, head.Hash(), gameWindow, sortBy, sortOrder) +} + +type gameInfo struct { + types.GameMetadata + claimCount uint64 + l2BlockNum uint64 + rootClaim common.Hash + status types.GameStatus + err error +} + +func listGames(ctx context.Context, caller *batching.MultiCaller, factory *contracts.DisputeGameFactoryContract, block common.Hash, gameWindow time.Duration, sortBy, sortOrder string) error { + earliestTimestamp := clock.MinCheckedTimestamp(clock.SystemClock, gameWindow) + games, err := factory.GetGamesAtOrAfter(ctx, block, earliestTimestamp) + if err != nil { + return fmt.Errorf("failed to retrieve games: %w", err) + } + slices.Reverse(games) + + infos := make([]gameInfo, len(games)) + var wg sync.WaitGroup + for idx, game := range games { + gameContract, err := contracts.NewFaultDisputeGameContract(ctx, metrics.NoopContractMetrics, game.Proxy, caller) + if err != nil { + return fmt.Errorf("failed to create dispute game contract: %w", err) + } + info := gameInfo{GameMetadata: game} + infos[idx] = info + gameProxy := game.Proxy + currIndex := idx + wg.Add(1) + go func() { + defer wg.Done() + metadata, err := gameContract.GetGameMetadata(ctx, rpcblock.ByHash(block)) + if err != nil { + info.err = fmt.Errorf("failed to retrieve metadata for game %v: %w", gameProxy, err) + return + } + infos[currIndex].status = metadata.Status + infos[currIndex].l2BlockNum = metadata.L2BlockNum + infos[currIndex].rootClaim = metadata.RootClaim + claimCount, err := gameContract.GetClaimCount(ctx) + if err != nil { + info.err = fmt.Errorf("failed to retrieve claim count for game %v: %w", gameProxy, err) + return + } + infos[currIndex].claimCount = claimCount + }() + } + wg.Wait() + lineFormat := "%3v %-42v %4v %-21v %14v %-66v %6v %-14v\n" + fmt.Printf(lineFormat, "Idx", "Game", "Type", "Created (Local)", "L2 Block", "Output Root", "Claims", "Status") + + // Sort infos by the specified column + switch sortBy { + case "time": + slices.SortFunc(infos, func(i, j gameInfo) int { + if sortOrder == "desc" { + return cmp.Compare(j.Timestamp, i.Timestamp) + } + return cmp.Compare(i.Timestamp, j.Timestamp) + }) + case "claimCount": + slices.SortFunc(infos, func(i, j gameInfo) int { + if sortOrder == "desc" { + return cmp.Compare(j.claimCount, i.claimCount) + } + return cmp.Compare(i.claimCount, j.claimCount) + }) + case "l2BlockNum": + slices.SortFunc(infos, func(i, j gameInfo) int { + if sortOrder == "desc" { + return cmp.Compare(j.l2BlockNum, i.l2BlockNum) + } + return cmp.Compare(i.l2BlockNum, j.l2BlockNum) + }) + } + + for _, game := range infos { + if game.err != nil { + return game.err + } + created := time.Unix(int64(game.Timestamp), 0).Format(time.DateTime) + fmt.Printf(lineFormat, + game.Index, game.Proxy, game.GameType, created, game.l2BlockNum, game.rootClaim, game.claimCount, game.status) + } + return nil +} + +func listGamesFlags() []cli.Flag { + cliFlags := []cli.Flag{ + SortByFlag, + SortOrderFlag, + flags.L1EthRpcFlag, + flags.FactoryAddressFlag, + flags.GameWindowFlag, + } + cliFlags = append(cliFlags, oplog.CLIFlags(flags.EnvVarPrefix)...) + return cliFlags +} + +var ListGamesCommand = &cli.Command{ + Name: "list-games", + Usage: "List the games created by a dispute game factory", + Description: "Lists the games created by a dispute game factory", + Action: ListGames, + Flags: listGamesFlags(), +} diff --git a/op-challenger2/cmd/main.go b/op-challenger2/cmd/main.go new file mode 100644 index 000000000000..ff51cc9c6d47 --- /dev/null +++ b/op-challenger2/cmd/main.go @@ -0,0 +1,80 @@ +package main + +import ( + "context" + "os" + + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/urfave/cli/v2" + + "github.com/ethereum/go-ethereum/log" + + challenger "github.com/ethereum-optimism/optimism/op-challenger2" + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + "github.com/ethereum-optimism/optimism/op-challenger2/version" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/cliapp" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/opio" +) + +var ( + GitCommit = "" + GitDate = "" +) + +// VersionWithMeta holds the textual version string including the metadata. +var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDate, version.Meta) + +func main() { + args := os.Args + ctx := opio.WithInterruptBlocker(context.Background()) + if err := run(ctx, args, func(ctx context.Context, l log.Logger, config *config.Config) (cliapp.Lifecycle, error) { + return challenger.Main(ctx, l, config, metrics.NewMetrics()) + }); err != nil { + log.Crit("Application failed", "err", err) + } +} + +type ConfiguredLifecycle func(ctx context.Context, log log.Logger, config *config.Config) (cliapp.Lifecycle, error) + +func run(ctx context.Context, args []string, action ConfiguredLifecycle) error { + oplog.SetupDefaults() + + app := cli.NewApp() + app.Version = VersionWithMeta + app.Flags = cliapp.ProtectFlags(flags.Flags) + app.Name = "op-challenger2" + app.Usage = "Challenge outputs" + app.Description = "Ensures that on chain outputs are correct." + app.Commands = []*cli.Command{ + ListGamesCommand, + ListClaimsCommand, + CreateGameCommand, + MoveCommand, + ResolveCommand, + ResolveClaimCommand, + } + app.Action = cliapp.LifecycleCmd(func(ctx *cli.Context, close context.CancelCauseFunc) (cliapp.Lifecycle, error) { + logger, err := setupLogging(ctx) + if err != nil { + return nil, err + } + logger.Info("Starting op-challenger2", "version", VersionWithMeta) + + cfg, err := flags.NewConfigFromCLI(ctx, logger) + if err != nil { + return nil, err + } + return action(ctx.Context, logger, cfg) + }) + return app.RunContext(ctx, args) +} + +func setupLogging(ctx *cli.Context) (log.Logger, error) { + logCfg := oplog.ReadCLIConfig(ctx) + logger := oplog.NewLogger(oplog.AppOut(ctx), logCfg) + oplog.SetGlobalLogHandler(logger.Handler()) + return logger, nil +} diff --git a/op-challenger2/cmd/main_test.go b/op-challenger2/cmd/main_test.go new file mode 100644 index 000000000000..0bf249043283 --- /dev/null +++ b/op-challenger2/cmd/main_test.go @@ -0,0 +1,808 @@ +package main + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/txmgr" +) + +var ( + l1EthRpc = "http://example.com:8545" + l1Beacon = "http://example.com:9000" + gameFactoryAddressValue = "0xbb00000000000000000000000000000000000000" + cannonNetwork = "op-mainnet" + testNetwork = "op-sepolia" + l2EthRpc = "http://example.com:9545" + cannonBin = "./bin/cannon" + cannonServer = "./bin/op-program" + cannonPreState = "./pre.json" + datadir = "./test_data" + rollupRpc = "http://example.com:8555" + asteriscNetwork = "op-mainnet" + asteriscBin = "./bin/asterisc" + asteriscServer = "./bin/op-program" + asteriscPreState = "./pre.json" +) + +func TestLogLevel(t *testing.T) { + t.Run("RejectInvalid", func(t *testing.T) { + verifyArgsInvalid(t, "unknown level: foo", addRequiredArgs(config.TraceTypeAlphabet, "--log.level=foo")) + }) + + for _, lvl := range []string{"trace", "debug", "info", "error", "crit"} { + lvl := lvl + t.Run("AcceptValid_"+lvl, func(t *testing.T) { + logger, _, err := dryRunWithArgs(addRequiredArgs(config.TraceTypeAlphabet, "--log.level", lvl)) + require.NoError(t, err) + require.NotNil(t, logger) + }) + } +} + +func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet)) + defaultCfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, l1Beacon, rollupRpc, l2EthRpc, datadir, config.TraceTypeAlphabet) + require.Equal(t, defaultCfg, cfg) +} + +func TestDefaultConfigIsValid(t *testing.T) { + cfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, l1Beacon, rollupRpc, l2EthRpc, datadir, config.TraceTypeAlphabet) + require.NoError(t, cfg.Check()) +} + +func TestL1ETHRPCAddress(t *testing.T) { + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag l1-eth-rpc is required", addRequiredArgsExcept(config.TraceTypeAlphabet, "--l1-eth-rpc")) + }) + + t.Run("Valid", func(t *testing.T) { + url := "http://example.com:8888" + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--l1-eth-rpc", "--l1-eth-rpc="+url)) + require.Equal(t, url, cfg.L1EthRpc) + require.Equal(t, url, cfg.TxMgrConfig.L1RPCURL) + }) +} + +func TestL1Beacon(t *testing.T) { + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag l1-beacon is required", addRequiredArgsExcept(config.TraceTypeAlphabet, "--l1-beacon")) + }) + + t.Run("Valid", func(t *testing.T) { + url := "http://example.com:8888" + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--l1-beacon", "--l1-beacon="+url)) + require.Equal(t, url, cfg.L1Beacon) + }) +} + +func TestTraceType(t *testing.T) { + t.Run("Default", func(t *testing.T) { + expectedDefault := config.TraceTypeCannon + cfg := configForArgs(t, addRequiredArgsExcept(expectedDefault, "--trace-type")) + require.Equal(t, []config.TraceType{expectedDefault}, cfg.TraceTypes) + }) + + for _, traceType := range config.TraceTypes { + traceType := traceType + t.Run("Valid_"+traceType.String(), func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) + require.Equal(t, []config.TraceType{traceType}, cfg.TraceTypes) + }) + } + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "unknown trace type: \"foo\"", addRequiredArgsExcept(config.TraceTypeAlphabet, "--trace-type", "--trace-type=foo")) + }) +} + +func TestMultipleTraceTypes(t *testing.T) { + t.Run("WithAllOptions", func(t *testing.T) { + argsMap := requiredArgs(config.TraceTypeCannon) + // Add Asterisc required flags + addRequiredAsteriscArgs(argsMap) + args := toArgList(argsMap) + // Add extra trace types (cannon is already specified) + args = append(args, + "--trace-type", config.TraceTypeAlphabet.String()) + args = append(args, + "--trace-type", config.TraceTypePermissioned.String()) + args = append(args, + "--trace-type", config.TraceTypeAsterisc.String()) + cfg := configForArgs(t, args) + require.Equal(t, []config.TraceType{config.TraceTypeCannon, config.TraceTypeAlphabet, config.TraceTypePermissioned, config.TraceTypeAsterisc}, cfg.TraceTypes) + }) + t.Run("WithSomeOptions", func(t *testing.T) { + argsMap := requiredArgs(config.TraceTypeCannon) + args := toArgList(argsMap) + // Add extra trace types (cannon is already specified) + args = append(args, + "--trace-type", config.TraceTypeAlphabet.String()) + cfg := configForArgs(t, args) + require.Equal(t, []config.TraceType{config.TraceTypeCannon, config.TraceTypeAlphabet}, cfg.TraceTypes) + }) + + t.Run("SpecifySameOptionMultipleTimes", func(t *testing.T) { + argsMap := requiredArgs(config.TraceTypeCannon) + args := toArgList(argsMap) + // Add cannon trace type again + args = append(args, "--trace-type", config.TraceTypeCannon.String()) + // We're fine with the same option being listed multiple times, just deduplicate them. + cfg := configForArgs(t, args) + require.Equal(t, []config.TraceType{config.TraceTypeCannon}, cfg.TraceTypes) + }) +} + +func TestGameFactoryAddress(t *testing.T) { + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag game-factory-address is required", addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-factory-address")) + }) + + t.Run("Valid", func(t *testing.T) { + addr := common.Address{0xbb, 0xcc, 0xdd} + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-factory-address", "--game-factory-address="+addr.Hex())) + require.Equal(t, addr, cfg.GameFactoryAddress) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "invalid address: foo", addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-factory-address", "--game-factory-address=foo")) + }) +} + +func TestGameAllowlist(t *testing.T) { + t.Run("Optional", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-allowlist")) + require.NoError(t, cfg.Check()) + }) + + t.Run("Valid", func(t *testing.T) { + addr := common.Address{0xbb, 0xcc, 0xdd} + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-allowlist", "--game-allowlist="+addr.Hex())) + require.Contains(t, cfg.GameAllowlist, addr) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "invalid address: foo", addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-allowlist", "--game-allowlist=foo")) + }) +} + +func TestTxManagerFlagsSupported(t *testing.T) { + // Not a comprehensive list of flags, just enough to sanity check the txmgr.CLIFlags were defined + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--"+txmgr.NumConfirmationsFlagName, "7")) + require.Equal(t, uint64(7), cfg.TxMgrConfig.NumConfirmations) +} + +func TestMaxConcurrency(t *testing.T) { + t.Run("Valid", func(t *testing.T) { + expected := uint(345) + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--max-concurrency", "345")) + require.Equal(t, expected, cfg.MaxConcurrency) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid( + t, + "invalid value \"abc\" for flag -max-concurrency", + addRequiredArgs(config.TraceTypeAlphabet, "--max-concurrency", "abc")) + }) + + t.Run("Zero", func(t *testing.T) { + verifyArgsInvalid( + t, + "max-concurrency must not be 0", + addRequiredArgs(config.TraceTypeAlphabet, "--max-concurrency", "0")) + }) +} + +func TestMaxPendingTx(t *testing.T) { + t.Run("Valid", func(t *testing.T) { + expected := uint64(345) + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--max-pending-tx", "345")) + require.Equal(t, expected, cfg.MaxPendingTx) + }) + + t.Run("Zero", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--max-pending-tx", "0")) + require.Equal(t, uint64(0), cfg.MaxPendingTx) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid( + t, + "invalid value \"abc\" for flag -max-pending-tx", + addRequiredArgs(config.TraceTypeAlphabet, "--max-pending-tx", "abc")) + }) +} + +func TestPollInterval(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon)) + require.Equal(t, config.DefaultPollInterval, cfg.PollInterval) + }) + + t.Run("Valid", func(t *testing.T) { + expected := 100 * time.Second + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--http-poll-interval", "100s")) + require.Equal(t, expected, cfg.PollInterval) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid( + t, + "invalid value \"abc\" for flag -http-poll-interval", + addRequiredArgs(config.TraceTypeAlphabet, "--http-poll-interval", "abc")) + }) +} + +func TestAsteriscRequiredArgs(t *testing.T) { + for _, traceType := range []config.TraceType{config.TraceTypeAsterisc} { + traceType := traceType + t.Run(fmt.Sprintf("TestAsteriscBin-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--asterisc-bin")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag asterisc-bin is required", addRequiredArgsExcept(traceType, "--asterisc-bin")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-bin", "--asterisc-bin=./asterisc")) + require.Equal(t, "./asterisc", cfg.AsteriscBin) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscServer-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--asterisc-server")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag asterisc-server is required", addRequiredArgsExcept(traceType, "--asterisc-server")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-server", "--asterisc-server=./op-program")) + require.Equal(t, "./op-program", cfg.AsteriscServer) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscAbsolutePrestate-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--asterisc-prestate")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag asterisc-prestates-url or asterisc-prestate is required", addRequiredArgsExcept(traceType, "--asterisc-prestate")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-prestate", "--asterisc-prestate=./pre.json")) + require.Equal(t, "./pre.json", cfg.AsteriscAbsolutePreState) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscAbsolutePrestateBaseURL-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--asterisc-prestates-url")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag asterisc-prestates-url or asterisc-prestate is required", addRequiredArgsExcept(traceType, "--asterisc-prestate")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-prestates-url", "--asterisc-prestates-url=http://localhost/bar")) + require.Equal(t, "http://localhost/bar", cfg.AsteriscAbsolutePreStateBaseURL.String()) + }) + }) + + t.Run(fmt.Sprintf("TestL2Rpc-%v", traceType), func(t *testing.T) { + t.Run("RequiredForAsteriscTrace", func(t *testing.T) { + verifyArgsInvalid(t, "flag l2-eth-rpc is required", addRequiredArgsExcept(traceType, "--l2-eth-rpc")) + }) + + t.Run("ValidLegacy", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--l2-eth-rpc", fmt.Sprintf("--cannon-l2=%s", l2EthRpc))) + require.Equal(t, l2EthRpc, cfg.L2Rpc) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) + require.Equal(t, l2EthRpc, cfg.L2Rpc) + }) + + t.Run("InvalidUsingBothFlags", func(t *testing.T) { + verifyArgsInvalid(t, "flag cannon-l2 and l2-eth-rpc must not be both set", addRequiredArgsExcept(traceType, "", fmt.Sprintf("--cannon-l2=%s", l2EthRpc))) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) + require.Equal(t, config.DefaultAsteriscSnapshotFreq, cfg.AsteriscSnapshotFreq) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--asterisc-snapshot-freq=1234")) + require.Equal(t, uint(1234), cfg.AsteriscSnapshotFreq) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "invalid value \"abc\" for flag -asterisc-snapshot-freq", + addRequiredArgs(traceType, "--asterisc-snapshot-freq=abc")) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) + require.Equal(t, config.DefaultAsteriscInfoFreq, cfg.AsteriscInfoFreq) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--asterisc-info-freq=1234")) + require.Equal(t, uint(1234), cfg.AsteriscInfoFreq) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "invalid value \"abc\" for flag -asterisc-info-freq", + addRequiredArgs(traceType, "--asterisc-info-freq=abc")) + }) + }) + + t.Run(fmt.Sprintf("TestRequireEitherAsteriscNetworkOrRollupAndGenesis-%v", traceType), func(t *testing.T) { + verifyArgsInvalid( + t, + "flag asterisc-network or asterisc-rollup-config and asterisc-l2-genesis is required", + addRequiredArgsExcept(traceType, "--asterisc-network")) + verifyArgsInvalid( + t, + "flag asterisc-network or asterisc-rollup-config and asterisc-l2-genesis is required", + addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-rollup-config=rollup.json")) + verifyArgsInvalid( + t, + "flag asterisc-network or asterisc-rollup-config and asterisc-l2-genesis is required", + addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-l2-genesis=gensis.json")) + }) + + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { + verifyArgsInvalid( + t, + "flag asterisc-network can not be used with asterisc-rollup-config and asterisc-l2-genesis", + addRequiredArgsExcept(traceType, "--asterisc-network", + "--asterisc-network", asteriscNetwork, "--asterisc-rollup-config=rollup.json")) + }) + + t.Run(fmt.Sprintf("TestAsteriscNetwork-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--asterisc-network")) + }) + + t.Run("NotRequiredWhenRollupAndGenesIsSpecified", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-network", + "--asterisc-rollup-config=rollup.json", "--asterisc-l2-genesis=genesis.json")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-network", testNetwork)) + require.Equal(t, testNetwork, cfg.AsteriscNetwork) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscRollupConfig-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--asterisc-rollup-config")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-rollup-config=rollup.json", "--asterisc-l2-genesis=genesis.json")) + require.Equal(t, "rollup.json", cfg.AsteriscRollupConfigPath) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscL2Genesis-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--asterisc-l2-genesis")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-rollup-config=rollup.json", "--asterisc-l2-genesis=genesis.json")) + require.Equal(t, "genesis.json", cfg.AsteriscL2GenesisPath) + }) + }) + } +} + +func TestAlphabetRequiredArgs(t *testing.T) { + t.Run(fmt.Sprintf("TestL2Rpc-%v", config.TraceTypeAlphabet), func(t *testing.T) { + t.Run("RequiredForAlphabetTrace", func(t *testing.T) { + verifyArgsInvalid(t, "flag l2-eth-rpc is required", addRequiredArgsExcept(config.TraceTypeAlphabet, "--l2-eth-rpc")) + }) + + t.Run("ValidLegacy", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--l2-eth-rpc", fmt.Sprintf("--cannon-l2=%s", l2EthRpc))) + require.Equal(t, l2EthRpc, cfg.L2Rpc) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet)) + require.Equal(t, l2EthRpc, cfg.L2Rpc) + }) + }) +} + +func TestCannonRequiredArgs(t *testing.T) { + for _, traceType := range []config.TraceType{config.TraceTypeCannon, config.TraceTypePermissioned} { + traceType := traceType + t.Run(fmt.Sprintf("TestCannonBin-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-bin")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag cannon-bin is required", addRequiredArgsExcept(traceType, "--cannon-bin")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-bin", "--cannon-bin=./cannon")) + require.Equal(t, "./cannon", cfg.CannonBin) + }) + }) + + t.Run(fmt.Sprintf("TestCannonServer-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-server")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag cannon-server is required", addRequiredArgsExcept(traceType, "--cannon-server")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-server", "--cannon-server=./op-program")) + require.Equal(t, "./op-program", cfg.CannonServer) + }) + }) + + t.Run(fmt.Sprintf("TestCannonAbsolutePrestate-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-prestate")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag cannon-prestates-url or cannon-prestate is required", addRequiredArgsExcept(traceType, "--cannon-prestate")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-prestate", "--cannon-prestate=./pre.json")) + require.Equal(t, "./pre.json", cfg.CannonAbsolutePreState) + }) + }) + + t.Run(fmt.Sprintf("TestCannonAbsolutePrestateBaseURL-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-prestates-url")) + }) + + t.Run("Required", func(t *testing.T) { + verifyArgsInvalid(t, "flag cannon-prestates-url or cannon-prestate is required", addRequiredArgsExcept(traceType, "--cannon-prestate")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-prestates-url", "--cannon-prestates-url=http://localhost/foo")) + require.Equal(t, "http://localhost/foo", cfg.CannonAbsolutePreStateBaseURL.String()) + }) + }) + + t.Run(fmt.Sprintf("TestL2Rpc-%v", traceType), func(t *testing.T) { + t.Run("RequiredForCannonTrace", func(t *testing.T) { + verifyArgsInvalid(t, "flag l2-eth-rpc is required", addRequiredArgsExcept(traceType, "--l2-eth-rpc")) + }) + + t.Run("ValidLegacy", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--l2-eth-rpc", fmt.Sprintf("--cannon-l2=%s", l2EthRpc))) + require.Equal(t, l2EthRpc, cfg.L2Rpc) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) + require.Equal(t, l2EthRpc, cfg.L2Rpc) + }) + }) + + t.Run(fmt.Sprintf("TestCannonSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) + require.Equal(t, config.DefaultCannonSnapshotFreq, cfg.CannonSnapshotFreq) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--cannon-snapshot-freq=1234")) + require.Equal(t, uint(1234), cfg.CannonSnapshotFreq) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "invalid value \"abc\" for flag -cannon-snapshot-freq", + addRequiredArgs(traceType, "--cannon-snapshot-freq=abc")) + }) + }) + + t.Run(fmt.Sprintf("TestCannonInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) + require.Equal(t, config.DefaultCannonInfoFreq, cfg.CannonInfoFreq) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--cannon-info-freq=1234")) + require.Equal(t, uint(1234), cfg.CannonInfoFreq) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "invalid value \"abc\" for flag -cannon-info-freq", + addRequiredArgs(traceType, "--cannon-info-freq=abc")) + }) + }) + + t.Run(fmt.Sprintf("TestRequireEitherCannonNetworkOrRollupAndGenesis-%v", traceType), func(t *testing.T) { + verifyArgsInvalid( + t, + "flag cannon-network or cannon-rollup-config and cannon-l2-genesis is required", + addRequiredArgsExcept(traceType, "--cannon-network")) + verifyArgsInvalid( + t, + "flag cannon-network or cannon-rollup-config and cannon-l2-genesis is required", + addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-rollup-config=rollup.json")) + verifyArgsInvalid( + t, + "flag cannon-network or cannon-rollup-config and cannon-l2-genesis is required", + addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-l2-genesis=gensis.json")) + }) + + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { + verifyArgsInvalid( + t, + "flag cannon-network can not be used with cannon-rollup-config and cannon-l2-genesis", + addRequiredArgsExcept(traceType, "--cannon-network", + "--cannon-network", cannonNetwork, "--cannon-rollup-config=rollup.json")) + }) + + t.Run(fmt.Sprintf("TestCannonNetwork-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-network")) + }) + + t.Run("NotRequiredWhenRollupAndGenesIsSpecified", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", + "--cannon-rollup-config=rollup.json", "--cannon-l2-genesis=genesis.json")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-network", testNetwork)) + require.Equal(t, testNetwork, cfg.CannonNetwork) + }) + }) + + t.Run(fmt.Sprintf("TestCannonRollupConfig-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-rollup-config")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-rollup-config=rollup.json", "--cannon-l2-genesis=genesis.json")) + require.Equal(t, "rollup.json", cfg.CannonRollupConfigPath) + }) + }) + + t.Run(fmt.Sprintf("TestCannonL2qGenesis-%v", traceType), func(t *testing.T) { + t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-l2-genesis")) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-rollup-config=rollup.json", "--cannon-l2-genesis=genesis.json")) + require.Equal(t, "genesis.json", cfg.CannonL2GenesisPath) + }) + }) + } +} + +func TestDataDir(t *testing.T) { + for _, traceType := range config.TraceTypes { + traceType := traceType + + t.Run(fmt.Sprintf("RequiredFor-%v", traceType), func(t *testing.T) { + verifyArgsInvalid(t, "flag datadir is required", addRequiredArgsExcept(traceType, "--datadir")) + }) + } + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeCannon, "--datadir", "--datadir=/foo/bar/cannon")) + require.Equal(t, "/foo/bar/cannon", cfg.Datadir) + }) +} + +func TestRollupRpc(t *testing.T) { + for _, traceType := range config.TraceTypes { + traceType := traceType + + t.Run(fmt.Sprintf("RequiredFor-%v", traceType), func(t *testing.T) { + verifyArgsInvalid(t, "flag rollup-rpc is required", addRequiredArgsExcept(traceType, "--rollup-rpc")) + }) + } + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon)) + require.Equal(t, rollupRpc, cfg.RollupRpc) + }) +} + +func TestGameWindow(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet)) + require.Equal(t, config.DefaultGameWindow, cfg.GameWindow) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--game-window=1m")) + require.Equal(t, time.Duration(time.Minute), cfg.GameWindow) + }) + + t.Run("ParsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--game-window=672h")) + require.Equal(t, config.DefaultGameWindow, cfg.GameWindow) + }) +} + +func TestUnsafeAllowInvalidPrestate(t *testing.T) { + t.Run("DefaultsToFalse", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--unsafe-allow-invalid-prestate")) + require.False(t, cfg.AllowInvalidPrestate) + }) + + t.Run("EnabledWithNoValue", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon, "--unsafe-allow-invalid-prestate")) + require.True(t, cfg.AllowInvalidPrestate) + }) + + t.Run("EnabledWithTrue", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon, "--unsafe-allow-invalid-prestate=true")) + require.True(t, cfg.AllowInvalidPrestate) + }) + + t.Run("DisabledWithFalse", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon, "--unsafe-allow-invalid-prestate=false")) + require.False(t, cfg.AllowInvalidPrestate) + }) +} + +func TestAdditionalBondClaimants(t *testing.T) { + t.Run("DefaultsToEmpty", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--additional-bond-claimants")) + require.Empty(t, cfg.AdditionalBondClaimants) + }) + + t.Run("Valid-Single", func(t *testing.T) { + claimant := common.Address{0xaa} + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--additional-bond-claimants", claimant.Hex())) + require.Contains(t, cfg.AdditionalBondClaimants, claimant) + require.Len(t, cfg.AdditionalBondClaimants, 1) + }) + + t.Run("Valid-Multiple", func(t *testing.T) { + claimant1 := common.Address{0xaa} + claimant2 := common.Address{0xbb} + claimant3 := common.Address{0xcc} + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, + "--additional-bond-claimants", fmt.Sprintf("%v,%v,%v", claimant1.Hex(), claimant2.Hex(), claimant3.Hex()))) + require.Contains(t, cfg.AdditionalBondClaimants, claimant1) + require.Contains(t, cfg.AdditionalBondClaimants, claimant2) + require.Contains(t, cfg.AdditionalBondClaimants, claimant3) + require.Len(t, cfg.AdditionalBondClaimants, 3) + }) + + t.Run("Invalid-Single", func(t *testing.T) { + verifyArgsInvalid(t, "invalid additional claimant", + addRequiredArgs(config.TraceTypeAlphabet, "--additional-bond-claimants", "nope")) + }) + + t.Run("Invalid-Multiple", func(t *testing.T) { + claimant1 := common.Address{0xaa} + claimant2 := common.Address{0xbb} + verifyArgsInvalid(t, "invalid additional claimant", + addRequiredArgs(config.TraceTypeAlphabet, "--additional-bond-claimants", fmt.Sprintf("%v,nope,%v", claimant1.Hex(), claimant2.Hex()))) + }) +} + +func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) { + _, _, err := dryRunWithArgs(cliArgs) + require.ErrorContains(t, err, messageContains) +} + +func configForArgs(t *testing.T, cliArgs []string) config.Config { + _, cfg, err := dryRunWithArgs(cliArgs) + require.NoError(t, err) + return cfg +} + +func dryRunWithArgs(cliArgs []string) (log.Logger, config.Config, error) { + cfg := new(config.Config) + var logger log.Logger + fullArgs := append([]string{"op-challenger2"}, cliArgs...) + testErr := errors.New("dry-run") + err := run(context.Background(), fullArgs, func(ctx context.Context, log log.Logger, config *config.Config) (cliapp.Lifecycle, error) { + logger = log + cfg = config + return nil, testErr + }) + if errors.Is(err, testErr) { // expected error + err = nil + } + return logger, *cfg, err +} + +func addRequiredArgs(traceType config.TraceType, args ...string) []string { + req := requiredArgs(traceType) + combined := toArgList(req) + return append(combined, args...) +} + +func addRequiredArgsExcept(traceType config.TraceType, name string, optionalArgs ...string) []string { + req := requiredArgs(traceType) + delete(req, name) + return append(toArgList(req), optionalArgs...) +} + +func requiredArgs(traceType config.TraceType) map[string]string { + args := map[string]string{ + "--l1-eth-rpc": l1EthRpc, + "--l1-beacon": l1Beacon, + "--rollup-rpc": rollupRpc, + "--l2-eth-rpc": l2EthRpc, + "--game-factory-address": gameFactoryAddressValue, + "--trace-type": traceType.String(), + "--datadir": datadir, + } + switch traceType { + case config.TraceTypeCannon, config.TraceTypePermissioned: + addRequiredCannonArgs(args) + case config.TraceTypeAsterisc: + addRequiredAsteriscArgs(args) + } + return args +} + +func addRequiredCannonArgs(args map[string]string) { + args["--cannon-network"] = cannonNetwork + args["--cannon-bin"] = cannonBin + args["--cannon-server"] = cannonServer + args["--cannon-prestate"] = cannonPreState + args["--l2-eth-rpc"] = l2EthRpc +} + +func addRequiredAsteriscArgs(args map[string]string) { + args["--asterisc-network"] = asteriscNetwork + args["--asterisc-bin"] = asteriscBin + args["--asterisc-server"] = asteriscServer + args["--asterisc-prestate"] = asteriscPreState + args["--l2-eth-rpc"] = l2EthRpc +} + +func toArgList(req map[string]string) []string { + var combined []string + for name, value := range req { + combined = append(combined, fmt.Sprintf("%s=%s", name, value)) + } + return combined +} diff --git a/op-challenger2/cmd/move.go b/op-challenger2/cmd/move.go new file mode 100644 index 000000000000..1b8040126752 --- /dev/null +++ b/op-challenger2/cmd/move.go @@ -0,0 +1,102 @@ +package main + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + opservice "github.com/ethereum-optimism/optimism/op-service" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/urfave/cli/v2" +) + +var ( + AttackFlag = &cli.BoolFlag{ + Name: "attack", + Usage: "An attack move. If true, the defend flag must not be set.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "ATTACK"), + } + DefendFlag = &cli.BoolFlag{ + Name: "defend", + Usage: "A defending move. If true, the attack flag must not be set.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "DEFEND"), + } + ParentIndexFlag = &cli.StringFlag{ + Name: "parent-index", + Usage: "The index of the claim to move on.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "PARENT_INDEX"), + } + ClaimFlag = &cli.StringFlag{ + Name: "claim", + Usage: "The claim hash.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "CLAIM"), + } +) + +func Move(ctx *cli.Context) error { + attack := ctx.Bool(AttackFlag.Name) + defend := ctx.Bool(DefendFlag.Name) + parentIndex := ctx.Uint64(ParentIndexFlag.Name) + claim := common.HexToHash(ctx.String(ClaimFlag.Name)) + + if attack && defend { + return fmt.Errorf("both attack and defense flags cannot be set") + } + + contract, txMgr, err := NewContractWithTxMgr[contracts.FaultDisputeGameContract](ctx, GameAddressFlag.Name, contracts.NewFaultDisputeGameContract) + if err != nil { + return fmt.Errorf("failed to create dispute game bindings: %w", err) + } + + parentClaim, err := contract.GetClaim(ctx.Context, parentIndex) + if err != nil { + return fmt.Errorf("failed to get parent claim: %w", err) + } + var tx txmgr.TxCandidate + if attack { + tx, err = contract.AttackTx(ctx.Context, parentClaim, claim) + if err != nil { + return fmt.Errorf("failed to create attack tx: %w", err) + } + } else if defend { + tx, err = contract.DefendTx(ctx.Context, parentClaim, claim) + if err != nil { + return fmt.Errorf("failed to create defense tx: %w", err) + } + } else { + return fmt.Errorf("either attack or defense flag must be set") + } + + rct, err := txMgr.Send(context.Background(), tx) + if err != nil { + return fmt.Errorf("failed to send tx: %w", err) + } + fmt.Printf("Sent tx with status: %v, hash: %s\n", rct.Status, rct.TxHash.String()) + + return nil +} + +func moveFlags() []cli.Flag { + cliFlags := []cli.Flag{ + flags.L1EthRpcFlag, + GameAddressFlag, + AttackFlag, + DefendFlag, + ParentIndexFlag, + ClaimFlag, + } + cliFlags = append(cliFlags, txmgr.CLIFlagsWithDefaults(flags.EnvVarPrefix, txmgr.DefaultChallengerFlagValues)...) + cliFlags = append(cliFlags, oplog.CLIFlags(flags.EnvVarPrefix)...) + return cliFlags +} + +var MoveCommand = &cli.Command{ + Name: "move", + Usage: "Creates and sends a move transaction to the dispute game", + Description: "Creates and sends a move transaction to the dispute game", + Action: Move, + Flags: moveFlags(), +} diff --git a/op-challenger2/cmd/resolve.go b/op-challenger2/cmd/resolve.go new file mode 100644 index 000000000000..46c484654a36 --- /dev/null +++ b/op-challenger2/cmd/resolve.go @@ -0,0 +1,51 @@ +package main + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/urfave/cli/v2" +) + +func Resolve(ctx *cli.Context) error { + contract, txMgr, err := NewContractWithTxMgr[contracts.FaultDisputeGameContract](ctx, GameAddressFlag.Name, contracts.NewFaultDisputeGameContract) + if err != nil { + return fmt.Errorf("failed to create dispute game bindings: %w", err) + } + + tx, err := contract.ResolveTx() + if err != nil { + return fmt.Errorf("failed to create resolve tx: %w", err) + } + + rct, err := txMgr.Send(context.Background(), tx) + if err != nil { + return fmt.Errorf("failed to send tx: %w", err) + } + + fmt.Printf("Sent resolve tx with status: %v, hash: %s\n", rct.Status, rct.TxHash.String()) + + return nil +} + +func resolveFlags() []cli.Flag { + cliFlags := []cli.Flag{ + flags.L1EthRpcFlag, + GameAddressFlag, + } + cliFlags = append(cliFlags, txmgr.CLIFlagsWithDefaults(flags.EnvVarPrefix, txmgr.DefaultChallengerFlagValues)...) + cliFlags = append(cliFlags, oplog.CLIFlags(flags.EnvVarPrefix)...) + return cliFlags +} + +var ResolveCommand = &cli.Command{ + Name: "resolve", + Usage: "Resolves the specified dispute game if possible", + Description: "Resolves the specified dispute game if possible", + Action: Resolve, + Flags: resolveFlags(), +} diff --git a/op-challenger2/cmd/resolve_claim.go b/op-challenger2/cmd/resolve_claim.go new file mode 100644 index 000000000000..f273584f10cd --- /dev/null +++ b/op-challenger2/cmd/resolve_claim.go @@ -0,0 +1,71 @@ +package main + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + opservice "github.com/ethereum-optimism/optimism/op-service" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/urfave/cli/v2" +) + +var ( + ClaimIdxFlag = &cli.Uint64Flag{ + Name: "claim", + Usage: "Index of the claim to resolve.", + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "CLAIM"), + } +) + +func ResolveClaim(ctx *cli.Context) error { + if !ctx.IsSet(ClaimIdxFlag.Name) { + return fmt.Errorf("must specify %v flag", ClaimIdxFlag.Name) + } + idx := ctx.Uint64(ClaimIdxFlag.Name) + + contract, txMgr, err := NewContractWithTxMgr[contracts.FaultDisputeGameContract](ctx, GameAddressFlag.Name, contracts.NewFaultDisputeGameContract) + if err != nil { + return fmt.Errorf("failed to create dispute game bindings: %w", err) + } + + err = contract.CallResolveClaim(ctx.Context, idx) + if err != nil { + return fmt.Errorf("claim is not resolvable: %w", err) + } + + tx, err := contract.ResolveClaimTx(idx) + if err != nil { + return fmt.Errorf("failed to create resolve claim tx: %w", err) + } + + rct, err := txMgr.Send(context.Background(), tx) + if err != nil { + return fmt.Errorf("failed to send tx: %w", err) + } + + fmt.Printf("Sent resolve claim tx with status: %v, hash: %s\n", rct.Status, rct.TxHash.String()) + + return nil +} + +func resolveClaimFlags() []cli.Flag { + cliFlags := []cli.Flag{ + flags.L1EthRpcFlag, + GameAddressFlag, + ClaimIdxFlag, + } + cliFlags = append(cliFlags, txmgr.CLIFlagsWithDefaults(flags.EnvVarPrefix, txmgr.DefaultChallengerFlagValues)...) + cliFlags = append(cliFlags, oplog.CLIFlags(flags.EnvVarPrefix)...) + return cliFlags +} + +var ResolveClaimCommand = &cli.Command{ + Name: "resolve-claim", + Usage: "Resolves the specified claim if possible", + Description: "Resolves the specified claim if possible", + Action: ResolveClaim, + Flags: resolveClaimFlags(), +} diff --git a/op-challenger2/cmd/utils.go b/op-challenger2/cmd/utils.go new file mode 100644 index 000000000000..fdeb2877bcca --- /dev/null +++ b/op-challenger2/cmd/utils.go @@ -0,0 +1,78 @@ +package main + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/flags" + contractMetrics "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" + "github.com/ethereum/go-ethereum/common" + "github.com/urfave/cli/v2" +) + +type ContractCreator[T any] func(context.Context, contractMetrics.ContractMetricer, common.Address, *batching.MultiCaller) (T, error) + +// NewContractWithTxMgr creates a new contract and a transaction manager. +func NewContractWithTxMgr[T any](ctx *cli.Context, flagName string, creator ContractCreator[T]) (T, txmgr.TxManager, error) { + var contract T + caller, txMgr, err := newClientsFromCLI(ctx) + if err != nil { + return contract, nil, err + } + + created, err := newContractFromCLI(ctx, flagName, caller, creator) + if err != nil { + return contract, nil, err + } + + return created, txMgr, nil +} + +// newContractFromCLI creates a new contract from the CLI context. +func newContractFromCLI[T any](ctx *cli.Context, flagName string, caller *batching.MultiCaller, creator ContractCreator[T]) (T, error) { + var contract T + gameAddr, err := opservice.ParseAddress(ctx.String(flagName)) + if err != nil { + return contract, err + } + + created, err := creator(ctx.Context, contractMetrics.NoopContractMetrics, gameAddr, caller) + if err != nil { + return contract, fmt.Errorf("failed to create contract bindings: %w", err) + } + + return created, nil +} + +// newClientsFromCLI creates a new caller and transaction manager from the CLI context. +func newClientsFromCLI(ctx *cli.Context) (*batching.MultiCaller, txmgr.TxManager, error) { + logger, err := setupLogging(ctx) + if err != nil { + return nil, nil, err + } + + rpcUrl := ctx.String(flags.L1EthRpcFlag.Name) + if rpcUrl == "" { + return nil, nil, fmt.Errorf("missing %v", flags.L1EthRpcFlag.Name) + } + + l1Client, err := dial.DialEthClientWithTimeout(ctx.Context, dial.DefaultDialTimeout, logger, rpcUrl) + if err != nil { + return nil, nil, fmt.Errorf("failed to dial L1: %w", err) + } + defer l1Client.Close() + + caller := batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize) + txMgrConfig := txmgr.ReadCLIConfig(ctx) + txMgr, err := txmgr.NewSimpleTxManager("challenger", logger, &metrics.NoopTxMetrics{}, txMgrConfig) + if err != nil { + return nil, nil, fmt.Errorf("failed to create the transaction manager: %w", err) + } + + return caller, txMgr, nil +} diff --git a/op-challenger2/config/config.go b/op-challenger2/config/config.go new file mode 100644 index 000000000000..6128e33328a6 --- /dev/null +++ b/op-challenger2/config/config.go @@ -0,0 +1,310 @@ +package config + +import ( + "errors" + "fmt" + "net/url" + "runtime" + "slices" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/txmgr" +) + +var ( + ErrMissingTraceType = errors.New("no supported trace types specified") + ErrMissingDatadir = errors.New("missing datadir") + ErrMaxConcurrencyZero = errors.New("max concurrency must not be 0") + ErrMissingL2Rpc = errors.New("missing L2 rpc url") + ErrMissingCannonBin = errors.New("missing cannon bin") + ErrMissingCannonServer = errors.New("missing cannon server") + ErrMissingCannonAbsolutePreState = errors.New("missing cannon absolute pre-state") + ErrCannonAbsolutePreStateAndBaseURL = errors.New("only specify one of cannon absolute pre-state and cannon absolute pre-state base URL") + ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url") + ErrMissingL1Beacon = errors.New("missing l1 beacon url") + ErrMissingGameFactoryAddress = errors.New("missing game factory address") + ErrMissingCannonSnapshotFreq = errors.New("missing cannon snapshot freq") + ErrMissingCannonInfoFreq = errors.New("missing cannon info freq") + ErrMissingCannonRollupConfig = errors.New("missing cannon network or rollup config path") + ErrMissingCannonL2Genesis = errors.New("missing cannon network or l2 genesis path") + ErrCannonNetworkAndRollupConfig = errors.New("only specify one of network or rollup config path") + ErrCannonNetworkAndL2Genesis = errors.New("only specify one of network or l2 genesis path") + ErrCannonNetworkUnknown = errors.New("unknown cannon network") + ErrMissingRollupRpc = errors.New("missing rollup rpc url") + + ErrMissingAsteriscBin = errors.New("missing asterisc bin") + ErrMissingAsteriscServer = errors.New("missing asterisc server") + ErrMissingAsteriscAbsolutePreState = errors.New("missing asterisc absolute pre-state") + ErrAsteriscAbsolutePreStateAndBaseURL = errors.New("only specify one of asterisc absolute pre-state and asterisc absolute pre-state base URL") + ErrMissingAsteriscSnapshotFreq = errors.New("missing asterisc snapshot freq") + ErrMissingAsteriscInfoFreq = errors.New("missing asterisc info freq") + ErrMissingAsteriscRollupConfig = errors.New("missing asterisc network or rollup config path") + ErrMissingAsteriscL2Genesis = errors.New("missing asterisc network or l2 genesis path") + ErrAsteriscNetworkAndRollupConfig = errors.New("only specify one of network or rollup config path") + ErrAsteriscNetworkAndL2Genesis = errors.New("only specify one of network or l2 genesis path") + ErrAsteriscNetworkUnknown = errors.New("unknown asterisc network") +) + +type TraceType string + +const ( + TraceTypeAlphabet TraceType = "alphabet" + TraceTypeCannon TraceType = "cannon" + TraceTypeAsterisc TraceType = "asterisc" + TraceTypePermissioned TraceType = "permissioned" +) + +var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon, TraceTypePermissioned, TraceTypeAsterisc} + +func (t TraceType) String() string { + return string(t) +} + +// Set implements the Set method required by the [cli.Generic] interface. +func (t *TraceType) Set(value string) error { + if !ValidTraceType(TraceType(value)) { + return fmt.Errorf("unknown trace type: %q", value) + } + *t = TraceType(value) + return nil +} + +func (t *TraceType) Clone() any { + cpy := *t + return &cpy +} + +func ValidTraceType(value TraceType) bool { + for _, t := range TraceTypes { + if t == value { + return true + } + } + return false +} + +const ( + DefaultPollInterval = time.Second * 12 + DefaultCannonSnapshotFreq = uint(1_000_000_000) + DefaultCannonInfoFreq = uint(10_000_000) + DefaultAsteriscSnapshotFreq = uint(1_000_000_000) + DefaultAsteriscInfoFreq = uint(10_000_000) + // DefaultGameWindow is the default maximum time duration in the past + // that the challenger will look for games to progress. + // The default value is 28 days. The worst case duration for a game is 16 days + // (due to clock extension), plus 7 days WETH withdrawal delay leaving a 5 day + // buffer to monitor games to ensure bonds are claimed. + DefaultGameWindow = time.Duration(28 * 24 * time.Hour) + DefaultMaxPendingTx = 10 +) + +// Config is a well typed config that is parsed from the CLI params. +// This also contains config options for auxiliary services. +// It is used to initialize the challenger. +type Config struct { + L1EthRpc string // L1 RPC Url + L1Beacon string // L1 Beacon API Url + GameFactoryAddress common.Address // Address of the dispute game factory + GameAllowlist []common.Address // Allowlist of fault game addresses + GameWindow time.Duration // Maximum time duration to look for games to progress + Datadir string // Data Directory + MaxConcurrency uint // Maximum number of threads to use when progressing games + PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider + AllowInvalidPrestate bool // Whether to allow responding to games where the prestate does not match + + AdditionalBondClaimants []common.Address // List of addresses to claim bonds for in addition to the tx manager sender + + SelectiveClaimResolution bool // Whether to only resolve claims for the claimants in AdditionalBondClaimants union [TxSender.From()] + + TraceTypes []TraceType // Type of traces supported + + RollupRpc string // L2 Rollup RPC Url + + L2Rpc string // L2 RPC Url + + // Specific to the cannon trace provider + CannonBin string // Path to the cannon executable to run when generating trace data + CannonServer string // Path to the op-program executable that provides the pre-image oracle server + CannonAbsolutePreState string // File to load the absolute pre-state for Cannon traces from + CannonAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for Cannon traces from + CannonNetwork string + CannonRollupConfigPath string + CannonL2GenesisPath string + CannonSnapshotFreq uint // Frequency of snapshots to create when executing cannon (in VM instructions) + CannonInfoFreq uint // Frequency of cannon progress log messages (in VM instructions) + + // Specific to the asterisc trace provider + AsteriscBin string // Path to the asterisc executable to run when generating trace data + AsteriscServer string // Path to the op-program executable that provides the pre-image oracle server + AsteriscAbsolutePreState string // File to load the absolute pre-state for Asterisc traces from + AsteriscAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for Asterisc traces from + AsteriscNetwork string + AsteriscRollupConfigPath string + AsteriscL2GenesisPath string + AsteriscSnapshotFreq uint // Frequency of snapshots to create when executing asterisc (in VM instructions) + AsteriscInfoFreq uint // Frequency of asterisc progress log messages (in VM instructions) + + MaxPendingTx uint64 // Maximum number of pending transactions (0 == no limit) + + TxMgrConfig txmgr.CLIConfig + MetricsConfig opmetrics.CLIConfig + PprofConfig oppprof.CLIConfig +} + +func NewConfig( + gameFactoryAddress common.Address, + l1EthRpc string, + l1BeaconApi string, + l2RollupRpc string, + l2EthRpc string, + datadir string, + supportedTraceTypes ...TraceType, +) Config { + return Config{ + L1EthRpc: l1EthRpc, + L1Beacon: l1BeaconApi, + RollupRpc: l2RollupRpc, + L2Rpc: l2EthRpc, + GameFactoryAddress: gameFactoryAddress, + MaxConcurrency: uint(runtime.NumCPU()), + PollInterval: DefaultPollInterval, + + TraceTypes: supportedTraceTypes, + + MaxPendingTx: DefaultMaxPendingTx, + + TxMgrConfig: txmgr.NewCLIConfig(l1EthRpc, txmgr.DefaultChallengerFlagValues), + MetricsConfig: opmetrics.DefaultCLIConfig(), + PprofConfig: oppprof.DefaultCLIConfig(), + + Datadir: datadir, + + CannonSnapshotFreq: DefaultCannonSnapshotFreq, + CannonInfoFreq: DefaultCannonInfoFreq, + AsteriscSnapshotFreq: DefaultAsteriscSnapshotFreq, + AsteriscInfoFreq: DefaultAsteriscInfoFreq, + GameWindow: DefaultGameWindow, + } +} + +func (c Config) TraceTypeEnabled(t TraceType) bool { + return slices.Contains(c.TraceTypes, t) +} + +func (c Config) Check() error { + if c.L1EthRpc == "" { + return ErrMissingL1EthRPC + } + if c.L1Beacon == "" { + return ErrMissingL1Beacon + } + if c.RollupRpc == "" { + return ErrMissingRollupRpc + } + if c.L2Rpc == "" { + return ErrMissingL2Rpc + } + if c.GameFactoryAddress == (common.Address{}) { + return ErrMissingGameFactoryAddress + } + if len(c.TraceTypes) == 0 { + return ErrMissingTraceType + } + if c.Datadir == "" { + return ErrMissingDatadir + } + if c.MaxConcurrency == 0 { + return ErrMaxConcurrencyZero + } + if c.TraceTypeEnabled(TraceTypeCannon) || c.TraceTypeEnabled(TraceTypePermissioned) { + if c.CannonBin == "" { + return ErrMissingCannonBin + } + if c.CannonServer == "" { + return ErrMissingCannonServer + } + if c.CannonNetwork == "" { + if c.CannonRollupConfigPath == "" { + return ErrMissingCannonRollupConfig + } + if c.CannonL2GenesisPath == "" { + return ErrMissingCannonL2Genesis + } + } else { + if c.CannonRollupConfigPath != "" { + return ErrCannonNetworkAndRollupConfig + } + if c.CannonL2GenesisPath != "" { + return ErrCannonNetworkAndL2Genesis + } + if ch := chaincfg.ChainByName(c.CannonNetwork); ch == nil { + return fmt.Errorf("%w: %v", ErrCannonNetworkUnknown, c.CannonNetwork) + } + } + if c.CannonAbsolutePreState == "" && c.CannonAbsolutePreStateBaseURL == nil { + return ErrMissingCannonAbsolutePreState + } + if c.CannonAbsolutePreState != "" && c.CannonAbsolutePreStateBaseURL != nil { + return ErrCannonAbsolutePreStateAndBaseURL + } + if c.CannonSnapshotFreq == 0 { + return ErrMissingCannonSnapshotFreq + } + if c.CannonInfoFreq == 0 { + return ErrMissingCannonInfoFreq + } + } + if c.TraceTypeEnabled(TraceTypeAsterisc) { + if c.AsteriscBin == "" { + return ErrMissingAsteriscBin + } + if c.AsteriscServer == "" { + return ErrMissingAsteriscServer + } + if c.AsteriscNetwork == "" { + if c.AsteriscRollupConfigPath == "" { + return ErrMissingAsteriscRollupConfig + } + if c.AsteriscL2GenesisPath == "" { + return ErrMissingAsteriscL2Genesis + } + } else { + if c.AsteriscRollupConfigPath != "" { + return ErrAsteriscNetworkAndRollupConfig + } + if c.AsteriscL2GenesisPath != "" { + return ErrAsteriscNetworkAndL2Genesis + } + if ch := chaincfg.ChainByName(c.AsteriscNetwork); ch == nil { + return fmt.Errorf("%w: %v", ErrAsteriscNetworkUnknown, c.AsteriscNetwork) + } + } + if c.AsteriscAbsolutePreState == "" && c.AsteriscAbsolutePreStateBaseURL == nil { + return ErrMissingAsteriscAbsolutePreState + } + if c.AsteriscAbsolutePreState != "" && c.AsteriscAbsolutePreStateBaseURL != nil { + return ErrAsteriscAbsolutePreStateAndBaseURL + } + if c.AsteriscSnapshotFreq == 0 { + return ErrMissingAsteriscSnapshotFreq + } + if c.AsteriscInfoFreq == 0 { + return ErrMissingAsteriscInfoFreq + } + } + if err := c.TxMgrConfig.Check(); err != nil { + return err + } + if err := c.MetricsConfig.Check(); err != nil { + return err + } + if err := c.PprofConfig.Check(); err != nil { + return err + } + return nil +} diff --git a/op-challenger2/config/config_test.go b/op-challenger2/config/config_test.go new file mode 100644 index 000000000000..dc2949e41d20 --- /dev/null +++ b/op-challenger2/config/config_test.go @@ -0,0 +1,424 @@ +package config + +import ( + "fmt" + "net/url" + "runtime" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-service/txmgr" +) + +var ( + validL1EthRpc = "http://localhost:8545" + validL1BeaconUrl = "http://localhost:9000" + validGameFactoryAddress = common.Address{0x23} + validCannonBin = "./bin/cannon" + validCannonOpProgramBin = "./bin/op-program" + validCannonNetwork = "mainnet" + validCannonAbsolutPreState = "pre.json" + validCannonAbsolutPreStateBaseURL, _ = url.Parse("http://localhost/foo/") + validDatadir = "/tmp/data" + validL2Rpc = "http://localhost:9545" + validRollupRpc = "http://localhost:8555" + + validAsteriscBin = "./bin/asterisc" + validAsteriscOpProgramBin = "./bin/op-program" + validAsteriscNetwork = "mainnet" + validAsteriscAbsolutPreState = "pre.json" + validAsteriscAbsolutPreStateBaseURL, _ = url.Parse("http://localhost/bar/") +) + +var cannonTraceTypes = []TraceType{TraceTypeCannon, TraceTypePermissioned} +var asteriscTraceTypes = []TraceType{TraceTypeAsterisc} + +func applyValidConfigForCannon(cfg *Config) { + cfg.CannonBin = validCannonBin + cfg.CannonServer = validCannonOpProgramBin + cfg.CannonAbsolutePreStateBaseURL = validCannonAbsolutPreStateBaseURL + cfg.CannonNetwork = validCannonNetwork +} + +func applyValidConfigForAsterisc(cfg *Config) { + cfg.AsteriscBin = validAsteriscBin + cfg.AsteriscServer = validAsteriscOpProgramBin + cfg.AsteriscAbsolutePreStateBaseURL = validAsteriscAbsolutPreStateBaseURL + cfg.AsteriscNetwork = validAsteriscNetwork +} + +func validConfig(traceType TraceType) Config { + cfg := NewConfig(validGameFactoryAddress, validL1EthRpc, validL1BeaconUrl, validRollupRpc, validL2Rpc, validDatadir, traceType) + if traceType == TraceTypeCannon || traceType == TraceTypePermissioned { + applyValidConfigForCannon(&cfg) + } + if traceType == TraceTypeAsterisc { + applyValidConfigForAsterisc(&cfg) + } + return cfg +} + +// TestValidConfigIsValid checks that the config provided by validConfig is actually valid +func TestValidConfigIsValid(t *testing.T) { + for _, traceType := range TraceTypes { + traceType := traceType + t.Run(traceType.String(), func(t *testing.T) { + err := validConfig(traceType).Check() + require.NoError(t, err) + }) + } +} + +func TestTxMgrConfig(t *testing.T) { + t.Run("Invalid", func(t *testing.T) { + config := validConfig(TraceTypeCannon) + config.TxMgrConfig = txmgr.CLIConfig{} + require.Equal(t, config.Check().Error(), "must provide a L1 RPC url") + }) +} + +func TestL1EthRpcRequired(t *testing.T) { + config := validConfig(TraceTypeCannon) + config.L1EthRpc = "" + require.ErrorIs(t, config.Check(), ErrMissingL1EthRPC) +} + +func TestL1BeaconRequired(t *testing.T) { + config := validConfig(TraceTypeCannon) + config.L1Beacon = "" + require.ErrorIs(t, config.Check(), ErrMissingL1Beacon) +} + +func TestGameFactoryAddressRequired(t *testing.T) { + config := validConfig(TraceTypeCannon) + config.GameFactoryAddress = common.Address{} + require.ErrorIs(t, config.Check(), ErrMissingGameFactoryAddress) +} + +func TestSelectiveClaimResolutionNotRequired(t *testing.T) { + config := validConfig(TraceTypeCannon) + require.Equal(t, false, config.SelectiveClaimResolution) + require.NoError(t, config.Check()) +} + +func TestGameAllowlistNotRequired(t *testing.T) { + config := validConfig(TraceTypeCannon) + config.GameAllowlist = []common.Address{} + require.NoError(t, config.Check()) +} + +func TestCannonRequiredArgs(t *testing.T) { + for _, traceType := range cannonTraceTypes { + traceType := traceType + + t.Run(fmt.Sprintf("TestCannonBinRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.CannonBin = "" + require.ErrorIs(t, config.Check(), ErrMissingCannonBin) + }) + + t.Run(fmt.Sprintf("TestCannonServerRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.CannonServer = "" + require.ErrorIs(t, config.Check(), ErrMissingCannonServer) + }) + + t.Run(fmt.Sprintf("TestCannonAbsolutePreStateOrBaseURLRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.CannonAbsolutePreState = "" + config.CannonAbsolutePreStateBaseURL = nil + require.ErrorIs(t, config.Check(), ErrMissingCannonAbsolutePreState) + }) + + t.Run(fmt.Sprintf("TestCannonAbsolutePreState-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.CannonAbsolutePreState = validCannonAbsolutPreState + config.CannonAbsolutePreStateBaseURL = nil + require.NoError(t, config.Check()) + }) + + t.Run(fmt.Sprintf("TestCannonAbsolutePreStateBaseURL-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.CannonAbsolutePreState = "" + config.CannonAbsolutePreStateBaseURL = validCannonAbsolutPreStateBaseURL + require.NoError(t, config.Check()) + }) + + t.Run(fmt.Sprintf("TestMustNotSupplyBothCannonAbsolutePreStateAndBaseURL-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.CannonAbsolutePreState = validCannonAbsolutPreState + config.CannonAbsolutePreStateBaseURL = validCannonAbsolutPreStateBaseURL + require.ErrorIs(t, config.Check(), ErrCannonAbsolutePreStateAndBaseURL) + }) + + t.Run(fmt.Sprintf("TestL2RpcRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.L2Rpc = "" + require.ErrorIs(t, config.Check(), ErrMissingL2Rpc) + }) + + t.Run(fmt.Sprintf("TestCannonSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) + cfg.CannonSnapshotFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingCannonSnapshotFreq) + }) + }) + + t.Run(fmt.Sprintf("TestCannonInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) + cfg.CannonInfoFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingCannonInfoFreq) + }) + }) + + t.Run(fmt.Sprintf("TestCannonNetworkOrRollupConfigRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.CannonNetwork = "" + cfg.CannonRollupConfigPath = "" + cfg.CannonL2GenesisPath = "genesis.json" + require.ErrorIs(t, cfg.Check(), ErrMissingCannonRollupConfig) + }) + + t.Run(fmt.Sprintf("TestCannonNetworkOrL2GenesisRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.CannonNetwork = "" + cfg.CannonRollupConfigPath = "foo.json" + cfg.CannonL2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrMissingCannonL2Genesis) + }) + + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.CannonNetwork = validCannonNetwork + cfg.CannonRollupConfigPath = "foo.json" + cfg.CannonL2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrCannonNetworkAndRollupConfig) + }) + + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndL2Genesis-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.CannonNetwork = validCannonNetwork + cfg.CannonRollupConfigPath = "" + cfg.CannonL2GenesisPath = "foo.json" + require.ErrorIs(t, cfg.Check(), ErrCannonNetworkAndL2Genesis) + }) + + t.Run(fmt.Sprintf("TestNetworkMustBeValid-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.CannonNetwork = "unknown" + require.ErrorIs(t, cfg.Check(), ErrCannonNetworkUnknown) + }) + } +} + +func TestAsteriscRequiredArgs(t *testing.T) { + for _, traceType := range asteriscTraceTypes { + traceType := traceType + + t.Run(fmt.Sprintf("TestAsteriscBinRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.AsteriscBin = "" + require.ErrorIs(t, config.Check(), ErrMissingAsteriscBin) + }) + + t.Run(fmt.Sprintf("TestAsteriscServerRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.AsteriscServer = "" + require.ErrorIs(t, config.Check(), ErrMissingAsteriscServer) + }) + + t.Run(fmt.Sprintf("TestAsteriscAbsolutePreStateOrBaseURLRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.AsteriscAbsolutePreState = "" + config.AsteriscAbsolutePreStateBaseURL = nil + require.ErrorIs(t, config.Check(), ErrMissingAsteriscAbsolutePreState) + }) + + t.Run(fmt.Sprintf("TestAsteriscAbsolutePreState-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.AsteriscAbsolutePreState = validAsteriscAbsolutPreState + config.AsteriscAbsolutePreStateBaseURL = nil + require.NoError(t, config.Check()) + }) + + t.Run(fmt.Sprintf("TestAsteriscAbsolutePreStateBaseURL-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.AsteriscAbsolutePreState = "" + config.AsteriscAbsolutePreStateBaseURL = validAsteriscAbsolutPreStateBaseURL + require.NoError(t, config.Check()) + }) + + t.Run(fmt.Sprintf("TestMustNotSupplyBothAsteriscAbsolutePreStateAndBaseURL-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.AsteriscAbsolutePreState = validAsteriscAbsolutPreState + config.AsteriscAbsolutePreStateBaseURL = validAsteriscAbsolutPreStateBaseURL + require.ErrorIs(t, config.Check(), ErrAsteriscAbsolutePreStateAndBaseURL) + }) + + t.Run(fmt.Sprintf("TestL2RpcRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) + config.L2Rpc = "" + require.ErrorIs(t, config.Check(), ErrMissingL2Rpc) + }) + + t.Run(fmt.Sprintf("TestAsteriscSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) + cfg.AsteriscSnapshotFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscSnapshotFreq) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) + cfg.AsteriscInfoFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscInfoFreq) + }) + }) + + t.Run(fmt.Sprintf("TestAsteriscNetworkOrRollupConfigRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.AsteriscNetwork = "" + cfg.AsteriscRollupConfigPath = "" + cfg.AsteriscL2GenesisPath = "genesis.json" + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscRollupConfig) + }) + + t.Run(fmt.Sprintf("TestAsteriscNetworkOrL2GenesisRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.AsteriscNetwork = "" + cfg.AsteriscRollupConfigPath = "foo.json" + cfg.AsteriscL2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscL2Genesis) + }) + + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.AsteriscNetwork = validAsteriscNetwork + cfg.AsteriscRollupConfigPath = "foo.json" + cfg.AsteriscL2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrAsteriscNetworkAndRollupConfig) + }) + + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndL2Genesis-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.AsteriscNetwork = validAsteriscNetwork + cfg.AsteriscRollupConfigPath = "" + cfg.AsteriscL2GenesisPath = "foo.json" + require.ErrorIs(t, cfg.Check(), ErrAsteriscNetworkAndL2Genesis) + }) + + t.Run(fmt.Sprintf("TestNetworkMustBeValid-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) + cfg.AsteriscNetwork = "unknown" + require.ErrorIs(t, cfg.Check(), ErrAsteriscNetworkUnknown) + }) + } +} + +func TestDatadirRequired(t *testing.T) { + config := validConfig(TraceTypeAlphabet) + config.Datadir = "" + require.ErrorIs(t, config.Check(), ErrMissingDatadir) +} + +func TestMaxConcurrency(t *testing.T) { + t.Run("Required", func(t *testing.T) { + config := validConfig(TraceTypeAlphabet) + config.MaxConcurrency = 0 + require.ErrorIs(t, config.Check(), ErrMaxConcurrencyZero) + }) + + t.Run("DefaultToNumberOfCPUs", func(t *testing.T) { + config := validConfig(TraceTypeAlphabet) + require.EqualValues(t, runtime.NumCPU(), config.MaxConcurrency) + }) +} + +func TestHttpPollInterval(t *testing.T) { + t.Run("Default", func(t *testing.T) { + config := validConfig(TraceTypeAlphabet) + require.EqualValues(t, DefaultPollInterval, config.PollInterval) + }) +} + +func TestRollupRpcRequired(t *testing.T) { + for _, traceType := range TraceTypes { + traceType := traceType + t.Run(traceType.String(), func(t *testing.T) { + config := validConfig(traceType) + config.RollupRpc = "" + require.ErrorIs(t, config.Check(), ErrMissingRollupRpc) + }) + } +} + +func TestRequireConfigForMultipleTraceTypesForCannon(t *testing.T) { + cfg := validConfig(TraceTypeCannon) + cfg.TraceTypes = []TraceType{TraceTypeCannon, TraceTypeAlphabet} + // Set all required options and check its valid + cfg.RollupRpc = validRollupRpc + require.NoError(t, cfg.Check()) + + // Require cannon specific args + cfg.CannonAbsolutePreState = "" + cfg.CannonAbsolutePreStateBaseURL = nil + require.ErrorIs(t, cfg.Check(), ErrMissingCannonAbsolutePreState) + cfg.CannonAbsolutePreState = validCannonAbsolutPreState + + // Require output cannon specific args + cfg.RollupRpc = "" + require.ErrorIs(t, cfg.Check(), ErrMissingRollupRpc) +} + +func TestRequireConfigForMultipleTraceTypesForAsterisc(t *testing.T) { + cfg := validConfig(TraceTypeAsterisc) + cfg.TraceTypes = []TraceType{TraceTypeAsterisc, TraceTypeAlphabet} + // Set all required options and check its valid + cfg.RollupRpc = validRollupRpc + require.NoError(t, cfg.Check()) + + // Require asterisc specific args + cfg.AsteriscAbsolutePreState = "" + cfg.AsteriscAbsolutePreStateBaseURL = nil + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscAbsolutePreState) + cfg.AsteriscAbsolutePreState = validAsteriscAbsolutPreState + + // Require output asterisc specific args + cfg.RollupRpc = "" + require.ErrorIs(t, cfg.Check(), ErrMissingRollupRpc) +} + +func TestRequireConfigForMultipleTraceTypesForCannonAndAsterisc(t *testing.T) { + cfg := validConfig(TraceTypeCannon) + applyValidConfigForAsterisc(&cfg) + + cfg.TraceTypes = []TraceType{TraceTypeCannon, TraceTypeAsterisc, TraceTypeAlphabet} + // Set all required options and check its valid + cfg.RollupRpc = validRollupRpc + require.NoError(t, cfg.Check()) + + // Require cannon specific args + cfg.CannonBin = "" + require.ErrorIs(t, cfg.Check(), ErrMissingCannonBin) + cfg.CannonBin = validCannonBin + + // Require asterisc specific args + cfg.AsteriscAbsolutePreState = "" + cfg.AsteriscAbsolutePreStateBaseURL = nil + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscAbsolutePreState) + cfg.AsteriscAbsolutePreState = validAsteriscAbsolutPreState + + // Require cannon specific args + cfg.AsteriscServer = "" + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscServer) + cfg.AsteriscServer = validAsteriscOpProgramBin + + // Check final config is valid + require.NoError(t, cfg.Check()) +} diff --git a/op-challenger2/flags/flags.go b/op-challenger2/flags/flags.go new file mode 100644 index 000000000000..e088810516db --- /dev/null +++ b/op-challenger2/flags/flags.go @@ -0,0 +1,483 @@ +package flags + +import ( + "fmt" + "net/url" + "runtime" + "slices" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/urfave/cli/v2" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + opservice "github.com/ethereum-optimism/optimism/op-service" + openum "github.com/ethereum-optimism/optimism/op-service/enum" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/txmgr" +) + +const EnvVarPrefix = "OP_CHALLENGER" + +func prefixEnvVars(name string) []string { + return opservice.PrefixEnvVar(EnvVarPrefix, name) +} + +var ( + // Required Flags + L1EthRpcFlag = &cli.StringFlag{ + Name: "l1-eth-rpc", + Usage: "HTTP provider URL for L1.", + EnvVars: prefixEnvVars("L1_ETH_RPC"), + } + L1BeaconFlag = &cli.StringFlag{ + Name: "l1-beacon", + Usage: "Address of L1 Beacon API endpoint to use", + EnvVars: prefixEnvVars("L1_BEACON"), + } + RollupRpcFlag = &cli.StringFlag{ + Name: "rollup-rpc", + Usage: "HTTP provider URL for the rollup node", + EnvVars: prefixEnvVars("ROLLUP_RPC"), + } + FactoryAddressFlag = &cli.StringFlag{ + Name: "game-factory-address", + Usage: "Address of the fault game factory contract.", + EnvVars: prefixEnvVars("GAME_FACTORY_ADDRESS"), + } + GameAllowlistFlag = &cli.StringSliceFlag{ + Name: "game-allowlist", + Usage: "List of Fault Game contract addresses the challenger is allowed to play. " + + "If empty, the challenger will play all games.", + EnvVars: prefixEnvVars("GAME_ALLOWLIST"), + } + TraceTypeFlag = &cli.StringSliceFlag{ + Name: "trace-type", + Usage: "The trace types to support. Valid options: " + openum.EnumString(config.TraceTypes), + EnvVars: prefixEnvVars("TRACE_TYPE"), + Value: cli.NewStringSlice(config.TraceTypeCannon.String()), + } + DatadirFlag = &cli.StringFlag{ + Name: "datadir", + Usage: "Directory to store data generated as part of responding to games", + EnvVars: prefixEnvVars("DATADIR"), + } + // Optional Flags + MaxConcurrencyFlag = &cli.UintFlag{ + Name: "max-concurrency", + Usage: "Maximum number of threads to use when progressing games", + EnvVars: prefixEnvVars("MAX_CONCURRENCY"), + Value: uint(runtime.NumCPU()), + } + L2EthRpcFlag = &cli.StringFlag{ + Name: "l2-eth-rpc", + Usage: "L2 Address of L2 JSON-RPC endpoint to use (eth and debug namespace required) (cannon/asterisc trace type only)", + EnvVars: prefixEnvVars("L2_ETH_RPC"), + } + MaxPendingTransactionsFlag = &cli.Uint64Flag{ + Name: "max-pending-tx", + Usage: "The maximum number of pending transactions. 0 for no limit.", + Value: config.DefaultMaxPendingTx, + EnvVars: prefixEnvVars("MAX_PENDING_TX"), + } + HTTPPollInterval = &cli.DurationFlag{ + Name: "http-poll-interval", + Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider.", + EnvVars: prefixEnvVars("HTTP_POLL_INTERVAL"), + Value: config.DefaultPollInterval, + } + AdditionalBondClaimants = &cli.StringSliceFlag{ + Name: "additional-bond-claimants", + Usage: "List of addresses to claim bonds for, in addition to the configured transaction sender", + EnvVars: prefixEnvVars("ADDITIONAL_BOND_CLAIMANTS"), + } + CannonNetworkFlag = &cli.StringFlag{ + Name: "cannon-network", + Usage: fmt.Sprintf( + "Predefined network selection. Available networks: %s (cannon trace type only)", + strings.Join(chaincfg.AvailableNetworks(), ", "), + ), + EnvVars: prefixEnvVars("CANNON_NETWORK"), + } + CannonRollupConfigFlag = &cli.StringFlag{ + Name: "cannon-rollup-config", + Usage: "Rollup chain parameters (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_ROLLUP_CONFIG"), + } + CannonL2GenesisFlag = &cli.StringFlag{ + Name: "cannon-l2-genesis", + Usage: "Path to the op-geth genesis file (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_L2_GENESIS"), + } + CannonBinFlag = &cli.StringFlag{ + Name: "cannon-bin", + Usage: "Path to cannon executable to use when generating trace data (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_BIN"), + } + CannonServerFlag = &cli.StringFlag{ + Name: "cannon-server", + Usage: "Path to executable to use as pre-image oracle server when generating trace data (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_SERVER"), + } + CannonPreStateFlag = &cli.StringFlag{ + Name: "cannon-prestate", + Usage: "Path to absolute prestate to use when generating trace data (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_PRESTATE"), + } + CannonPreStatesURLFlag = &cli.StringFlag{ + Name: "cannon-prestates-url", + Usage: "Base URL to absolute prestates to use when generating trace data. " + + "Prestates in this directory should be name as .json (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_PRESTATES_URL"), + } + CannonL2Flag = &cli.StringFlag{ + Name: "cannon-l2", + Usage: fmt.Sprintf("Deprecated: Use %v instead", L2EthRpcFlag.Name), + EnvVars: prefixEnvVars("CANNON_L2"), + } + CannonSnapshotFreqFlag = &cli.UintFlag{ + Name: "cannon-snapshot-freq", + Usage: "Frequency of cannon snapshots to generate in VM steps (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_SNAPSHOT_FREQ"), + Value: config.DefaultCannonSnapshotFreq, + } + CannonInfoFreqFlag = &cli.UintFlag{ + Name: "cannon-info-freq", + Usage: "Frequency of cannon info log messages to generate in VM steps (cannon trace type only)", + EnvVars: prefixEnvVars("CANNON_INFO_FREQ"), + Value: config.DefaultCannonInfoFreq, + } + AsteriscNetworkFlag = &cli.StringFlag{ + Name: "asterisc-network", + Usage: fmt.Sprintf( + "Predefined network selection. Available networks: %s (asterisc trace type only)", + strings.Join(chaincfg.AvailableNetworks(), ", "), + ), + EnvVars: prefixEnvVars("ASTERISC_NETWORK"), + } + AsteriscRollupConfigFlag = &cli.StringFlag{ + Name: "asterisc-rollup-config", + Usage: "Rollup chain parameters (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_ROLLUP_CONFIG"), + } + AsteriscL2GenesisFlag = &cli.StringFlag{ + Name: "asterisc-l2-genesis", + Usage: "Path to the op-geth genesis file (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_L2_GENESIS"), + } + AsteriscBinFlag = &cli.StringFlag{ + Name: "asterisc-bin", + Usage: "Path to asterisc executable to use when generating trace data (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_BIN"), + } + AsteriscServerFlag = &cli.StringFlag{ + Name: "asterisc-server", + Usage: "Path to executable to use as pre-image oracle server when generating trace data (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_SERVER"), + } + AsteriscPreStateFlag = &cli.StringFlag{ + Name: "asterisc-prestate", + Usage: "Path to absolute prestate to use when generating trace data (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_PRESTATE"), + } + AsteriscPreStatesURLFlag = &cli.StringFlag{ + Name: "asterisc-prestates-url", + Usage: "Base URL to absolute prestates to use when generating trace data. " + + "Prestates in this directory should be name as .json (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_PRESTATES_URL"), + } + AsteriscSnapshotFreqFlag = &cli.UintFlag{ + Name: "asterisc-snapshot-freq", + Usage: "Frequency of asterisc snapshots to generate in VM steps (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_SNAPSHOT_FREQ"), + Value: config.DefaultAsteriscSnapshotFreq, + } + AsteriscInfoFreqFlag = &cli.UintFlag{ + Name: "asterisc-info-freq", + Usage: "Frequency of asterisc info log messages to generate in VM steps (asterisc trace type only)", + EnvVars: prefixEnvVars("ASTERISC_INFO_FREQ"), + Value: config.DefaultAsteriscInfoFreq, + } + GameWindowFlag = &cli.DurationFlag{ + Name: "game-window", + Usage: "The time window which the challenger will look for games to progress and claim bonds. " + + "This should include a buffer for the challenger to claim bonds for games outside the maximum game duration.", + EnvVars: prefixEnvVars("GAME_WINDOW"), + Value: config.DefaultGameWindow, + } + SelectiveClaimResolutionFlag = &cli.BoolFlag{ + Name: "selective-claim-resolution", + Usage: "Only resolve claims for the configured claimants", + EnvVars: prefixEnvVars("SELECTIVE_CLAIM_RESOLUTION"), + } + UnsafeAllowInvalidPrestate = &cli.BoolFlag{ + Name: "unsafe-allow-invalid-prestate", + Usage: "Allow responding to games where the absolute prestate is configured incorrectly. THIS IS UNSAFE!", + EnvVars: prefixEnvVars("UNSAFE_ALLOW_INVALID_PRESTATE"), + Hidden: true, // Hidden as this is an unsafe flag added only for testing purposes + } +) + +// requiredFlags are checked by [CheckRequired] +var requiredFlags = []cli.Flag{ + L1EthRpcFlag, + FactoryAddressFlag, + DatadirFlag, + RollupRpcFlag, + L1BeaconFlag, +} + +// optionalFlags is a list of unchecked cli flags +var optionalFlags = []cli.Flag{ + TraceTypeFlag, + MaxConcurrencyFlag, + L2EthRpcFlag, + MaxPendingTransactionsFlag, + HTTPPollInterval, + AdditionalBondClaimants, + GameAllowlistFlag, + CannonNetworkFlag, + CannonRollupConfigFlag, + CannonL2GenesisFlag, + CannonBinFlag, + CannonServerFlag, + CannonPreStateFlag, + CannonPreStatesURLFlag, + CannonL2Flag, + CannonSnapshotFreqFlag, + CannonInfoFreqFlag, + AsteriscNetworkFlag, + AsteriscRollupConfigFlag, + AsteriscL2GenesisFlag, + AsteriscBinFlag, + AsteriscServerFlag, + AsteriscPreStateFlag, + AsteriscPreStatesURLFlag, + AsteriscSnapshotFreqFlag, + AsteriscInfoFreqFlag, + GameWindowFlag, + SelectiveClaimResolutionFlag, + UnsafeAllowInvalidPrestate, +} + +func init() { + optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...) + optionalFlags = append(optionalFlags, txmgr.CLIFlagsWithDefaults(EnvVarPrefix, txmgr.DefaultChallengerFlagValues)...) + optionalFlags = append(optionalFlags, opmetrics.CLIFlags(EnvVarPrefix)...) + optionalFlags = append(optionalFlags, oppprof.CLIFlags(EnvVarPrefix)...) + + Flags = append(requiredFlags, optionalFlags...) +} + +// Flags contains the list of configuration options available to the binary. +var Flags []cli.Flag + +func CheckCannonFlags(ctx *cli.Context) error { + if !ctx.IsSet(CannonNetworkFlag.Name) && + !(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) { + return fmt.Errorf("flag %v or %v and %v is required", + CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name) + } + if ctx.IsSet(CannonNetworkFlag.Name) && + (ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name)) { + return fmt.Errorf("flag %v can not be used with %v and %v", + CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name) + } + if !ctx.IsSet(CannonBinFlag.Name) { + return fmt.Errorf("flag %s is required", CannonBinFlag.Name) + } + if !ctx.IsSet(CannonServerFlag.Name) { + return fmt.Errorf("flag %s is required", CannonServerFlag.Name) + } + if !ctx.IsSet(CannonPreStateFlag.Name) && !ctx.IsSet(CannonPreStatesURLFlag.Name) { + return fmt.Errorf("flag %s or %s is required", CannonPreStatesURLFlag.Name, CannonPreStateFlag.Name) + } + return nil +} + +func CheckAsteriscFlags(ctx *cli.Context) error { + if !ctx.IsSet(AsteriscNetworkFlag.Name) && + !(ctx.IsSet(AsteriscRollupConfigFlag.Name) && ctx.IsSet(AsteriscL2GenesisFlag.Name)) { + return fmt.Errorf("flag %v or %v and %v is required", + AsteriscNetworkFlag.Name, AsteriscRollupConfigFlag.Name, AsteriscL2GenesisFlag.Name) + } + if ctx.IsSet(AsteriscNetworkFlag.Name) && + (ctx.IsSet(AsteriscRollupConfigFlag.Name) || ctx.IsSet(AsteriscL2GenesisFlag.Name)) { + return fmt.Errorf("flag %v can not be used with %v and %v", + AsteriscNetworkFlag.Name, AsteriscRollupConfigFlag.Name, AsteriscL2GenesisFlag.Name) + } + if !ctx.IsSet(AsteriscBinFlag.Name) { + return fmt.Errorf("flag %s is required", AsteriscBinFlag.Name) + } + if !ctx.IsSet(AsteriscServerFlag.Name) { + return fmt.Errorf("flag %s is required", AsteriscServerFlag.Name) + } + if !ctx.IsSet(AsteriscPreStateFlag.Name) && !ctx.IsSet(AsteriscPreStatesURLFlag.Name) { + return fmt.Errorf("flag %s or %s is required", AsteriscPreStatesURLFlag.Name, AsteriscPreStateFlag.Name) + } + return nil +} + +func CheckRequired(ctx *cli.Context, traceTypes []config.TraceType) error { + for _, f := range requiredFlags { + if !ctx.IsSet(f.Names()[0]) { + return fmt.Errorf("flag %s is required", f.Names()[0]) + } + } + // CannonL2Flag is checked because it is an alias with L2EthRpcFlag + if !ctx.IsSet(CannonL2Flag.Name) && !ctx.IsSet(L2EthRpcFlag.Name) { + return fmt.Errorf("flag %s is required", L2EthRpcFlag.Name) + } + for _, traceType := range traceTypes { + switch traceType { + case config.TraceTypeCannon, config.TraceTypePermissioned: + if err := CheckCannonFlags(ctx); err != nil { + return err + } + case config.TraceTypeAsterisc: + if err := CheckAsteriscFlags(ctx); err != nil { + return err + } + case config.TraceTypeAlphabet: + default: + return fmt.Errorf("invalid trace type. must be one of %v", config.TraceTypes) + } + } + return nil +} + +func parseTraceTypes(ctx *cli.Context) ([]config.TraceType, error) { + var traceTypes []config.TraceType + for _, typeName := range ctx.StringSlice(TraceTypeFlag.Name) { + traceType := new(config.TraceType) + if err := traceType.Set(typeName); err != nil { + return nil, err + } + if !slices.Contains(traceTypes, *traceType) { + traceTypes = append(traceTypes, *traceType) + } + } + return traceTypes, nil +} + +func getL2Rpc(ctx *cli.Context, logger log.Logger) (string, error) { + if ctx.IsSet(CannonL2Flag.Name) && ctx.IsSet(L2EthRpcFlag.Name) { + return "", fmt.Errorf("flag %v and %v must not be both set", CannonL2Flag.Name, L2EthRpcFlag.Name) + } + l2Rpc := "" + if ctx.IsSet(CannonL2Flag.Name) { + logger.Warn(fmt.Sprintf("flag %v is deprecated, please use %v", CannonL2Flag.Name, L2EthRpcFlag.Name)) + l2Rpc = ctx.String(CannonL2Flag.Name) + } + if ctx.IsSet(L2EthRpcFlag.Name) { + l2Rpc = ctx.String(L2EthRpcFlag.Name) + } + return l2Rpc, nil +} + +// NewConfigFromCLI parses the Config from the provided flags or environment variables. +func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, error) { + traceTypes, err := parseTraceTypes(ctx) + if err != nil { + return nil, err + } + if err := CheckRequired(ctx, traceTypes); err != nil { + return nil, err + } + gameFactoryAddress, err := opservice.ParseAddress(ctx.String(FactoryAddressFlag.Name)) + if err != nil { + return nil, err + } + var allowedGames []common.Address + if ctx.StringSlice(GameAllowlistFlag.Name) != nil { + for _, addr := range ctx.StringSlice(GameAllowlistFlag.Name) { + gameAddress, err := opservice.ParseAddress(addr) + if err != nil { + return nil, err + } + allowedGames = append(allowedGames, gameAddress) + } + } + + txMgrConfig := txmgr.ReadCLIConfig(ctx) + metricsConfig := opmetrics.ReadCLIConfig(ctx) + pprofConfig := oppprof.ReadCLIConfig(ctx) + + maxConcurrency := ctx.Uint(MaxConcurrencyFlag.Name) + if maxConcurrency == 0 { + return nil, fmt.Errorf("%v must not be 0", MaxConcurrencyFlag.Name) + } + var claimants []common.Address + if ctx.IsSet(AdditionalBondClaimants.Name) { + for _, addrStr := range ctx.StringSlice(AdditionalBondClaimants.Name) { + claimant, err := opservice.ParseAddress(addrStr) + if err != nil { + return nil, fmt.Errorf("invalid additional claimant: %w", err) + } + claimants = append(claimants, claimant) + } + } + var cannonPrestatesURL *url.URL + if ctx.IsSet(CannonPreStatesURLFlag.Name) { + parsed, err := url.Parse(ctx.String(CannonPreStatesURLFlag.Name)) + if err != nil { + return nil, fmt.Errorf("invalid cannon pre states url (%v): %w", ctx.String(CannonPreStatesURLFlag.Name), err) + } + cannonPrestatesURL = parsed + } + var asteriscPreStatesURL *url.URL + if ctx.IsSet(AsteriscPreStatesURLFlag.Name) { + parsed, err := url.Parse(ctx.String(AsteriscPreStatesURLFlag.Name)) + if err != nil { + return nil, fmt.Errorf("invalid asterisc pre states url (%v): %w", ctx.String(AsteriscPreStatesURLFlag.Name), err) + } + asteriscPreStatesURL = parsed + } + l2Rpc, err := getL2Rpc(ctx, logger) + if err != nil { + return nil, err + } + return &config.Config{ + // Required Flags + L1EthRpc: ctx.String(L1EthRpcFlag.Name), + L1Beacon: ctx.String(L1BeaconFlag.Name), + TraceTypes: traceTypes, + GameFactoryAddress: gameFactoryAddress, + GameAllowlist: allowedGames, + GameWindow: ctx.Duration(GameWindowFlag.Name), + MaxConcurrency: maxConcurrency, + L2Rpc: l2Rpc, + MaxPendingTx: ctx.Uint64(MaxPendingTransactionsFlag.Name), + PollInterval: ctx.Duration(HTTPPollInterval.Name), + AdditionalBondClaimants: claimants, + RollupRpc: ctx.String(RollupRpcFlag.Name), + CannonNetwork: ctx.String(CannonNetworkFlag.Name), + CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name), + CannonL2GenesisPath: ctx.String(CannonL2GenesisFlag.Name), + CannonBin: ctx.String(CannonBinFlag.Name), + CannonServer: ctx.String(CannonServerFlag.Name), + CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name), + CannonAbsolutePreStateBaseURL: cannonPrestatesURL, + Datadir: ctx.String(DatadirFlag.Name), + CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name), + CannonInfoFreq: ctx.Uint(CannonInfoFreqFlag.Name), + AsteriscNetwork: ctx.String(AsteriscNetworkFlag.Name), + AsteriscRollupConfigPath: ctx.String(AsteriscRollupConfigFlag.Name), + AsteriscL2GenesisPath: ctx.String(AsteriscL2GenesisFlag.Name), + AsteriscBin: ctx.String(AsteriscBinFlag.Name), + AsteriscServer: ctx.String(AsteriscServerFlag.Name), + AsteriscAbsolutePreState: ctx.String(AsteriscPreStateFlag.Name), + AsteriscAbsolutePreStateBaseURL: asteriscPreStatesURL, + AsteriscSnapshotFreq: ctx.Uint(AsteriscSnapshotFreqFlag.Name), + AsteriscInfoFreq: ctx.Uint(AsteriscInfoFreqFlag.Name), + TxMgrConfig: txMgrConfig, + MetricsConfig: metricsConfig, + PprofConfig: pprofConfig, + SelectiveClaimResolution: ctx.Bool(SelectiveClaimResolutionFlag.Name), + AllowInvalidPrestate: ctx.Bool(UnsafeAllowInvalidPrestate.Name), + }, nil +} diff --git a/op-challenger2/flags/flags_test.go b/op-challenger2/flags/flags_test.go new file mode 100644 index 000000000000..27a361ca900a --- /dev/null +++ b/op-challenger2/flags/flags_test.go @@ -0,0 +1,92 @@ +package flags + +import ( + "reflect" + "slices" + "strings" + "testing" + + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" +) + +// TestUniqueFlags asserts that all flag names are unique, to avoid accidental conflicts between the many flags. +func TestUniqueFlags(t *testing.T) { + seenCLI := make(map[string]struct{}) + for _, flag := range Flags { + for _, name := range flag.Names() { + if _, ok := seenCLI[name]; ok { + t.Errorf("duplicate flag %s", name) + continue + } + seenCLI[name] = struct{}{} + } + } +} + +// TestUniqueEnvVars asserts that all flag env vars are unique, to avoid accidental conflicts between the many flags. +func TestUniqueEnvVars(t *testing.T) { + seenCLI := make(map[string]struct{}) + for _, flag := range Flags { + envVar := envVarForFlag(flag) + if _, ok := seenCLI[envVar]; envVar != "" && ok { + t.Errorf("duplicate flag env var %s", envVar) + continue + } + seenCLI[envVar] = struct{}{} + } +} + +func TestCorrectEnvVarPrefix(t *testing.T) { + for _, flag := range Flags { + envVar := envVarForFlag(flag) + if envVar == "" { + t.Errorf("Failed to find EnvVar for flag %v", flag.Names()[0]) + } + if !strings.HasPrefix(envVar, "OP_CHALLENGER_") { + t.Errorf("Flag %v env var (%v) does not start with OP_CHALLENGER_", flag.Names()[0], envVar) + } + if strings.Contains(envVar, "__") { + t.Errorf("Flag %v env var (%v) has duplicate underscores", flag.Names()[0], envVar) + } + } +} + +func envVarForFlag(flag cli.Flag) string { + values := reflect.ValueOf(flag) + envVarValue := values.Elem().FieldByName("EnvVars") + if envVarValue == (reflect.Value{}) || envVarValue.Len() == 0 { + return "" + } + return envVarValue.Index(0).String() +} + +func TestEnvVarFormat(t *testing.T) { + for _, flag := range Flags { + flag := flag + flagName := flag.Names()[0] + + skippedFlags := []string{ + txmgr.FeeLimitMultiplierFlagName, + txmgr.TxSendTimeoutFlagName, + txmgr.TxNotInMempoolTimeoutFlagName, + } + + t.Run(flagName, func(t *testing.T) { + if slices.Contains(skippedFlags, flagName) { + t.Skipf("Skipping flag %v which is known to not have a standard flag name <-> env var conversion", flagName) + } + envFlagGetter, ok := flag.(interface { + GetEnvVars() []string + }) + envFlags := envFlagGetter.GetEnvVars() + require.True(t, ok, "must be able to cast the flag to an EnvVar interface") + require.Equal(t, 1, len(envFlags), "flags should have exactly one env var") + expectedEnvVar := opservice.FlagNameToEnvVarName(flagName, "OP_CHALLENGER") + require.Equal(t, expectedEnvVar, envFlags[0]) + }) + } +} diff --git a/op-challenger2/game/disk.go b/op-challenger2/game/disk.go new file mode 100644 index 000000000000..7385400860b4 --- /dev/null +++ b/op-challenger2/game/disk.go @@ -0,0 +1,55 @@ +package game + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/ethereum/go-ethereum/common" +) + +const gameDirPrefix = "game-" + +// diskManager coordinates the storage of game data on disk. +type diskManager struct { + datadir string +} + +func newDiskManager(dir string) *diskManager { + return &diskManager{datadir: dir} +} + +func (d *diskManager) DirForGame(addr common.Address) string { + return filepath.Join(d.datadir, gameDirPrefix+addr.Hex()) +} + +func (d *diskManager) RemoveAllExcept(keep []common.Address) error { + entries, err := os.ReadDir(d.datadir) + if err != nil { + return fmt.Errorf("failed to list directory: %w", err) + } + var errs []error + for _, entry := range entries { + if !entry.IsDir() || !strings.HasPrefix(entry.Name(), gameDirPrefix) { + // Skip files and directories that don't have the game directory prefix. + // While random content shouldn't be in our datadir, we want to avoid + // deleting things like OS generated files. + continue + } + name := entry.Name()[len(gameDirPrefix):] + addr := common.HexToAddress(name) + if addr == (common.Address{}) { + // Ignore directories with non-address names. + continue + } + if slices.Contains(keep, addr) { + // Preserve data for games we should keep. + continue + } + errs = append(errs, os.RemoveAll(filepath.Join(d.datadir, entry.Name()))) + } + return errors.Join(errs...) +} diff --git a/op-challenger2/game/disk_test.go b/op-challenger2/game/disk_test.go new file mode 100644 index 000000000000..dc5fccfd62d5 --- /dev/null +++ b/op-challenger2/game/disk_test.go @@ -0,0 +1,57 @@ +package game + +import ( + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestDiskManager_DirForGame(t *testing.T) { + baseDir := t.TempDir() + addr := common.Address{0x53} + disk := newDiskManager(baseDir) + result := disk.DirForGame(addr) + require.Equal(t, filepath.Join(baseDir, gameDirPrefix+addr.Hex()), result) +} + +func TestDiskManager_RemoveAllExcept(t *testing.T) { + baseDir := t.TempDir() + keep := common.Address{0x53} + delete := common.Address{0xaa} + disk := newDiskManager(baseDir) + keepDir := disk.DirForGame(keep) + deleteDir := disk.DirForGame(delete) + + unexpectedFile := filepath.Join(baseDir, "file.txt") + require.NoError(t, os.WriteFile(unexpectedFile, []byte("test"), 0644)) + unexpectedDir := filepath.Join(baseDir, "notagame") + require.NoError(t, os.MkdirAll(unexpectedDir, 0777)) + invalidHexDir := filepath.Join(baseDir, gameDirPrefix+"0xNOPE") + require.NoError(t, os.MkdirAll(invalidHexDir, 0777)) + + populateDir := func(dir string) []string { + require.NoError(t, os.MkdirAll(dir, 0777)) + file1 := filepath.Join(dir, "test.txt") + require.NoError(t, os.WriteFile(file1, []byte("foo"), 0644)) + nestedDirs := filepath.Join(dir, "subdir", "deep") + require.NoError(t, os.MkdirAll(nestedDirs, 0777)) + file2 := filepath.Join(nestedDirs, ".foo.txt") + require.NoError(t, os.WriteFile(file2, []byte("foo"), 0644)) + return []string{file1, file2} + } + + keepFiles := populateDir(keepDir) + populateDir(deleteDir) + + require.NoError(t, disk.RemoveAllExcept([]common.Address{keep})) + require.NoDirExists(t, deleteDir, "should have deleted directory") + for _, file := range keepFiles { + require.FileExists(t, file, "should have kept file for active game") + } + require.FileExists(t, unexpectedFile, "should not delete unexpected file") + require.DirExists(t, unexpectedDir, "should not delete unexpected dir") + require.DirExists(t, invalidHexDir, "should not delete dir with invalid address") +} diff --git a/op-challenger2/game/fault/agent.go b/op-challenger2/game/fault/agent.go new file mode 100644 index 000000000000..18ec37555d39 --- /dev/null +++ b/op-challenger2/game/fault/agent.go @@ -0,0 +1,244 @@ +package fault + +import ( + "context" + "errors" + "fmt" + "slices" + "sync" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/solver" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// Responder takes a response action & executes. +// For full op-challenger2 this means executing the transaction on chain. +type Responder interface { + CallResolve(ctx context.Context) (gameTypes.GameStatus, error) + Resolve() error + CallResolveClaim(ctx context.Context, claimIdx uint64) error + ResolveClaims(claimIdx ...uint64) error + PerformAction(ctx context.Context, action types.Action) error +} + +type ClaimLoader interface { + GetAllClaims(ctx context.Context, block rpcblock.Block) ([]types.Claim, error) + IsL2BlockNumberChallenged(ctx context.Context, block rpcblock.Block) (bool, error) +} + +type Agent struct { + metrics metrics.Metricer + systemClock clock.Clock + l1Clock types.ClockReader + solver *solver.GameSolver + loader ClaimLoader + responder Responder + selective bool + claimants []common.Address + maxDepth types.Depth + maxClockDuration time.Duration + log log.Logger +} + +func NewAgent( + m metrics.Metricer, + systemClock clock.Clock, + l1Clock types.ClockReader, + loader ClaimLoader, + maxDepth types.Depth, + maxClockDuration time.Duration, + trace types.TraceAccessor, + responder Responder, + log log.Logger, + selective bool, + claimants []common.Address, +) *Agent { + return &Agent{ + metrics: m, + systemClock: systemClock, + l1Clock: l1Clock, + solver: solver.NewGameSolver(maxDepth, trace), + loader: loader, + responder: responder, + selective: selective, + claimants: claimants, + maxDepth: maxDepth, + maxClockDuration: maxClockDuration, + log: log, + } +} + +// Act iterates the game & performs all of the next actions. +func (a *Agent) Act(ctx context.Context) error { + if a.tryResolve(ctx) { + return nil + } + + start := a.systemClock.Now() + defer func() { + a.metrics.RecordGameActTime(a.systemClock.Since(start).Seconds()) + }() + + if challenged, err := a.loader.IsL2BlockNumberChallenged(ctx, rpcblock.Latest); err != nil { + return fmt.Errorf("failed to check if L2 block number already challenged: %w", err) + } else if challenged { + a.log.Debug("Skipping game with already challenged L2 block number") + return nil + } + + game, err := a.newGameFromContracts(ctx) + if err != nil { + return fmt.Errorf("create game from contracts: %w", err) + } + + actions, err := a.solver.CalculateNextActions(ctx, game) + if err != nil { + a.log.Error("Failed to calculate all required moves", "err", err) + } + + var wg sync.WaitGroup + wg.Add(len(actions)) + for _, action := range actions { + go a.performAction(ctx, &wg, action) + } + wg.Wait() + return nil +} + +func (a *Agent) performAction(ctx context.Context, wg *sync.WaitGroup, action types.Action) { + defer wg.Done() + actionLog := a.log.New("action", action.Type) + if action.Type == types.ActionTypeStep { + containsOracleData := action.OracleData != nil + isLocal := containsOracleData && action.OracleData.IsLocal + actionLog = actionLog.New( + "is_attack", action.IsAttack, + "parent", action.ParentClaim.ContractIndex, + "prestate", common.Bytes2Hex(action.PreState), + "proof", common.Bytes2Hex(action.ProofData), + "containsOracleData", containsOracleData, + "isLocalPreimage", isLocal, + ) + if action.OracleData != nil { + actionLog = actionLog.New("oracleKey", common.Bytes2Hex(action.OracleData.OracleKey)) + } + } else if action.Type == types.ActionTypeMove { + actionLog = actionLog.New("is_attack", action.IsAttack, "parent", action.ParentClaim.ContractIndex, "value", action.Value) + } + + switch action.Type { + case types.ActionTypeMove: + a.metrics.RecordGameMove() + case types.ActionTypeStep: + a.metrics.RecordGameStep() + case types.ActionTypeChallengeL2BlockNumber: + a.metrics.RecordGameL2Challenge() + } + actionLog.Info("Performing action") + err := a.responder.PerformAction(ctx, action) + if err != nil { + actionLog.Error("Action failed", "err", err) + } +} + +// tryResolve resolves the game if it is in a winning state +// Returns true if the game is resolvable (regardless of whether it was actually resolved) +func (a *Agent) tryResolve(ctx context.Context) bool { + if err := a.resolveClaims(ctx); err != nil { + a.log.Error("Failed to resolve claims", "err", err) + return false + } + status, err := a.responder.CallResolve(ctx) + if err != nil || status == gameTypes.GameStatusInProgress { + return false + } + a.log.Info("Resolving game") + if err := a.responder.Resolve(); err != nil { + a.log.Error("Failed to resolve the game", "err", err) + } + return true +} + +var errNoResolvableClaims = errors.New("no resolvable claims") + +func (a *Agent) tryResolveClaims(ctx context.Context) error { + claims, err := a.loader.GetAllClaims(ctx, rpcblock.Latest) + if err != nil { + return fmt.Errorf("failed to fetch claims: %w", err) + } + if len(claims) == 0 { + return errNoResolvableClaims + } + + var resolvableClaims []uint64 + for _, claim := range claims { + var parent types.Claim + if !claim.IsRootPosition() { + parent = claims[claim.ParentContractIndex] + } + if types.ChessClock(a.l1Clock.Now(), claim, parent) <= a.maxClockDuration { + continue + } + if a.selective { + a.log.Trace("Selective claim resolution, checking if claim is incentivized", "claimIdx", claim.ContractIndex) + isUncounteredClaim := slices.Contains(a.claimants, claim.Claimant) && claim.CounteredBy == common.Address{} + ourCounter := slices.Contains(a.claimants, claim.CounteredBy) + if !isUncounteredClaim && !ourCounter { + a.log.Debug("Skipping claim to check resolution", "claimIdx", claim.ContractIndex) + continue + } + } + a.log.Trace("Checking if claim is resolvable", "claimIdx", claim.ContractIndex) + if err := a.responder.CallResolveClaim(ctx, uint64(claim.ContractIndex)); err == nil { + a.log.Info("Resolving claim", "claimIdx", claim.ContractIndex) + resolvableClaims = append(resolvableClaims, uint64(claim.ContractIndex)) + } + } + if len(resolvableClaims) == 0 { + return errNoResolvableClaims + } + a.log.Info("Resolving claims", "numClaims", len(resolvableClaims)) + + if err := a.responder.ResolveClaims(resolvableClaims...); err != nil { + a.log.Error("Failed to resolve claims", "err", err) + } + return nil +} + +func (a *Agent) resolveClaims(ctx context.Context) error { + start := a.systemClock.Now() + defer func() { + a.metrics.RecordClaimResolutionTime(a.systemClock.Since(start).Seconds()) + }() + for { + err := a.tryResolveClaims(ctx) + switch err { + case errNoResolvableClaims: + return nil + case nil: + continue + default: + return err + } + } +} + +// newGameFromContracts initializes a new game state from the state in the contract +func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) { + claims, err := a.loader.GetAllClaims(ctx, rpcblock.Latest) + if err != nil { + return nil, fmt.Errorf("failed to fetch claims: %w", err) + } + if len(claims) == 0 { + return nil, errors.New("no claims") + } + game := types.NewGameState(claims, a.maxDepth) + return game, nil +} diff --git a/op-challenger2/game/fault/agent_test.go b/op-challenger2/game/fault/agent_test.go new file mode 100644 index 000000000000..1d15b76f42e2 --- /dev/null +++ b/op-challenger2/game/fault/agent_test.go @@ -0,0 +1,265 @@ +package fault + +import ( + "context" + "errors" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/alphabet" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/testlog" +) + +var l1Time = time.UnixMilli(100) + +func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + callResolveStatus gameTypes.GameStatus + }{ + { + name: "DefenderWon", + callResolveStatus: gameTypes.GameStatusDefenderWon, + }, + { + name: "ChallengerWon", + callResolveStatus: gameTypes.GameStatusChallengerWon, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + agent, claimLoader, responder := setupTestAgent(t) + responder.callResolveStatus = test.callResolveStatus + + require.NoError(t, agent.Act(ctx)) + + require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable") + require.Equal(t, 1, claimLoader.callCount, "should fetch claims once for resolveClaim") + + require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game") + }) + } +} + +func TestDoNotMakeMovesWhenL2BlockNumberChallenged(t *testing.T) { + ctx := context.Background() + + agent, claimLoader, responder := setupTestAgent(t) + claimLoader.blockNumChallenged = true + + require.NoError(t, agent.Act(ctx)) + + require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable") + require.Equal(t, 1, claimLoader.callCount, "should fetch claims only once for resolveClaim") +} + +func createClaimsWithClaimants(t *testing.T, d types.Depth) []types.Claim { + claimBuilder := test.NewClaimBuilder(t, d, alphabet.NewTraceProvider(big.NewInt(0), d)) + rootClaim := claimBuilder.CreateRootClaim() + claim1 := rootClaim + claim1.Claimant = common.BigToAddress(big.NewInt(1)) + claim2 := claimBuilder.AttackClaim(claim1) + claim2.Claimant = common.BigToAddress(big.NewInt(2)) + claim3 := claimBuilder.AttackClaim(claim2) + claim3.Claimant = common.BigToAddress(big.NewInt(3)) + return []types.Claim{claim1, claim2, claim3} +} + +func TestAgent_SelectiveClaimResolution(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + callResolveStatus gameTypes.GameStatus + selective bool + claimants []common.Address + claims []types.Claim + expectedResolveCount int + }{ + { + name: "NonSelectiveEmptyClaimants", + callResolveStatus: gameTypes.GameStatusDefenderWon, + selective: false, + claimants: []common.Address{}, + claims: createClaimsWithClaimants(t, types.Depth(4)), + expectedResolveCount: 3, + }, + { + name: "NonSelectiveWithClaimants", + callResolveStatus: gameTypes.GameStatusDefenderWon, + selective: false, + claimants: []common.Address{common.BigToAddress(big.NewInt(1))}, + claims: createClaimsWithClaimants(t, types.Depth(4)), + expectedResolveCount: 3, + }, + { + name: "SelectiveEmptyClaimants", + callResolveStatus: gameTypes.GameStatusDefenderWon, + selective: true, + claimants: []common.Address{}, + claims: createClaimsWithClaimants(t, types.Depth(4)), + }, + { + name: "SelectiveWithClaimants", + callResolveStatus: gameTypes.GameStatusDefenderWon, + selective: true, + claimants: []common.Address{common.BigToAddress(big.NewInt(1))}, + claims: createClaimsWithClaimants(t, types.Depth(4)), + expectedResolveCount: 1, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + agent, claimLoader, responder := setupTestAgent(t) + agent.selective = test.selective + agent.claimants = test.claimants + claimLoader.maxLoads = 1 + claimLoader.claims = test.claims + responder.callResolveStatus = test.callResolveStatus + + require.NoError(t, agent.Act(ctx)) + + require.Equal(t, test.expectedResolveCount, responder.callResolveClaimCount, "should check if game is resolvable") + require.Equal(t, test.expectedResolveCount, responder.resolveClaimCount, "should check if game is resolvable") + }) + } +} + +func TestSkipAttemptingToResolveClaimsWhenClockNotExpired(t *testing.T) { + agent, claimLoader, responder := setupTestAgent(t) + responder.callResolveErr = errors.New("game is not resolvable") + responder.callResolveClaimErr = errors.New("claim is not resolvable") + depth := types.Depth(4) + claimBuilder := test.NewClaimBuilder(t, depth, alphabet.NewTraceProvider(big.NewInt(0), depth)) + + rootTime := l1Time.Add(-agent.maxClockDuration - 5*time.Minute) + gameBuilder := claimBuilder.GameBuilder(test.WithClock(rootTime, 0)) + gameBuilder.Seq(). + Attack(test.WithClock(rootTime.Add(5*time.Minute), 5*time.Minute)). + Defend(test.WithClock(rootTime.Add(7*time.Minute), 2*time.Minute)). + Attack(test.WithClock(rootTime.Add(11*time.Minute), 4*time.Minute)) + claimLoader.claims = gameBuilder.Game.Claims() + + require.NoError(t, agent.Act(context.Background())) + + // Currently tries to resolve the first two claims because their clock's have expired, but doesn't detect that + // they have unresolvable children. + require.Equal(t, 2, responder.callResolveClaimCount) +} + +func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { + // Checks that if the game isn't resolvable, that the agent continues on to start checking claims + agent, claimLoader, responder := setupTestAgent(t) + responder.callResolveErr = errors.New("game is not resolvable") + responder.callResolveClaimErr = errors.New("claim is not resolvable") + depth := types.Depth(4) + claimBuilder := test.NewClaimBuilder(t, depth, alphabet.NewTraceProvider(big.NewInt(0), depth)) + + claimLoader.claims = []types.Claim{ + claimBuilder.CreateRootClaim(), + } + + require.NoError(t, agent.Act(context.Background())) + + require.EqualValues(t, 2, claimLoader.callCount, "should load claims for unresolvable game") + require.EqualValues(t, responder.callResolveClaimCount, 1, "should check if claim is resolvable") + require.Zero(t, responder.resolveClaimCount, "should not send resolveClaim") +} + +func setupTestAgent(t *testing.T) (*Agent, *stubClaimLoader, *stubResponder) { + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := &stubClaimLoader{} + depth := types.Depth(4) + gameDuration := 3 * time.Minute + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) + l1Clock := clock.NewDeterministicClock(l1Time) + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}) + return agent, claimLoader, responder +} + +type stubClaimLoader struct { + callCount int + maxLoads int + claims []types.Claim + blockNumChallenged bool +} + +func (s *stubClaimLoader) IsL2BlockNumberChallenged(_ context.Context, _ rpcblock.Block) (bool, error) { + return s.blockNumChallenged, nil +} + +func (s *stubClaimLoader) GetAllClaims(_ context.Context, _ rpcblock.Block) ([]types.Claim, error) { + s.callCount++ + if s.callCount > s.maxLoads && s.maxLoads != 0 { + return []types.Claim{}, nil + } + return s.claims, nil +} + +type stubResponder struct { + l sync.Mutex + callResolveCount int + callResolveStatus gameTypes.GameStatus + callResolveErr error + + resolveCount int + resolveErr error + + callResolveClaimCount int + callResolveClaimErr error + resolveClaimCount int +} + +func (s *stubResponder) CallResolve(_ context.Context) (gameTypes.GameStatus, error) { + s.l.Lock() + defer s.l.Unlock() + s.callResolveCount++ + return s.callResolveStatus, s.callResolveErr +} + +func (s *stubResponder) Resolve() error { + s.l.Lock() + defer s.l.Unlock() + s.resolveCount++ + return s.resolveErr +} + +func (s *stubResponder) CallResolveClaim(_ context.Context, _ uint64) error { + s.l.Lock() + defer s.l.Unlock() + s.callResolveClaimCount++ + return s.callResolveClaimErr +} + +func (s *stubResponder) ResolveClaims(claims ...uint64) error { + s.l.Lock() + defer s.l.Unlock() + s.resolveClaimCount += len(claims) + return nil +} + +func (s *stubResponder) PerformAction(_ context.Context, _ types.Action) error { + return nil +} diff --git a/op-challenger2/game/fault/claims/claimer.go b/op-challenger2/game/fault/claims/claimer.go new file mode 100644 index 000000000000..06972cf675fc --- /dev/null +++ b/op-challenger2/game/fault/claims/claimer.go @@ -0,0 +1,96 @@ +package claims + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +type TxSender interface { + SendAndWaitSimple(txPurpose string, txs ...txmgr.TxCandidate) error +} + +type BondClaimMetrics interface { + RecordBondClaimed(amount uint64) +} + +type BondContract interface { + GetCredit(ctx context.Context, recipient common.Address) (*big.Int, types.GameStatus, error) + ClaimCreditTx(ctx context.Context, recipient common.Address) (txmgr.TxCandidate, error) +} + +type BondContractCreator func(game types.GameMetadata) (BondContract, error) + +type Claimer struct { + logger log.Logger + metrics BondClaimMetrics + contractCreator BondContractCreator + txSender TxSender + claimants []common.Address +} + +var _ BondClaimer = (*Claimer)(nil) + +func NewBondClaimer(l log.Logger, m BondClaimMetrics, contractCreator BondContractCreator, txSender TxSender, claimants ...common.Address) *Claimer { + return &Claimer{ + logger: l, + metrics: m, + contractCreator: contractCreator, + txSender: txSender, + claimants: claimants, + } +} + +func (c *Claimer) ClaimBonds(ctx context.Context, games []types.GameMetadata) (err error) { + for _, game := range games { + for _, claimant := range c.claimants { + err = errors.Join(err, c.claimBond(ctx, game, claimant)) + } + } + return err +} + +func (c *Claimer) claimBond(ctx context.Context, game types.GameMetadata, addr common.Address) error { + c.logger.Debug("Attempting to claim bonds for", "game", game.Proxy, "addr", addr) + + contract, err := c.contractCreator(game) + if err != nil { + return fmt.Errorf("failed to create bond contract: %w", err) + } + + credit, status, err := contract.GetCredit(ctx, addr) + if err != nil { + return fmt.Errorf("failed to get credit: %w", err) + } + + if status == types.GameStatusInProgress { + c.logger.Debug("Not claiming credit from in progress game", "game", game.Proxy, "addr", addr, "status", status) + return nil + } + if credit.Cmp(big.NewInt(0)) == 0 { + c.logger.Debug("No credit to claim", "game", game.Proxy, "addr", addr) + return nil + } + + candidate, err := contract.ClaimCreditTx(ctx, addr) + if errors.Is(err, contracts.ErrSimulationFailed) { + c.logger.Debug("Credit still locked", "game", game.Proxy, "addr", addr) + return nil + } else if err != nil { + return fmt.Errorf("failed to create credit claim tx: %w", err) + } + + if err = c.txSender.SendAndWaitSimple("claim credit", candidate); err != nil { + return fmt.Errorf("failed to claim credit: %w", err) + } + + c.metrics.RecordBondClaimed(credit.Uint64()) + return nil +} diff --git a/op-challenger2/game/fault/claims/claimer_test.go b/op-challenger2/game/fault/claims/claimer_test.go new file mode 100644 index 000000000000..b9f9c9c44eb5 --- /dev/null +++ b/op-challenger2/game/fault/claims/claimer_test.go @@ -0,0 +1,173 @@ +package claims + +import ( + "context" + "errors" + "fmt" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var ( + mockTxMgrSendError = errors.New("mock tx mgr send error") +) + +func TestClaimer_ClaimBonds(t *testing.T) { + t.Run("MultipleBondClaimsSucceed", func(t *testing.T) { + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t) + contract.credit[txSender.From()] = 1 + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}, {Proxy: gameAddr}, {Proxy: gameAddr}}) + require.NoError(t, err) + require.Equal(t, 3, txSender.sends) + require.Equal(t, 3, m.RecordBondClaimedCalls) + }) + + t.Run("BondClaimSucceeds", func(t *testing.T) { + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t) + contract.credit[txSender.From()] = 1 + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}}) + require.NoError(t, err) + require.Equal(t, 1, txSender.sends) + require.Equal(t, 1, m.RecordBondClaimedCalls) + }) + + t.Run("BondClaimSucceedsForMultipleAddresses", func(t *testing.T) { + claimant1 := common.Address{0xaa} + claimant2 := common.Address{0xbb} + claimant3 := common.Address{0xcc} + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t, claimant1, claimant2, claimant3) + contract.credit[claimant1] = 1 + contract.credit[claimant2] = 2 + contract.credit[claimant3] = 0 + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}}) + require.NoError(t, err) + require.Equal(t, 2, txSender.sends) + require.Equal(t, 2, m.RecordBondClaimedCalls) + }) + + t.Run("BondClaimSkippedForInProgressGame", func(t *testing.T) { + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t) + contract.credit[txSender.From()] = 1 + contract.status = types.GameStatusInProgress + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}}) + require.NoError(t, err) + require.Equal(t, 0, txSender.sends) + require.Equal(t, 0, m.RecordBondClaimedCalls) + }) + + t.Run("BondClaimFails", func(t *testing.T) { + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t) + txSender.sendFails = true + contract.credit[txSender.From()] = 1 + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}}) + require.ErrorIs(t, err, mockTxMgrSendError) + require.Equal(t, 1, txSender.sends) + require.Equal(t, 0, m.RecordBondClaimedCalls) + }) + + t.Run("BondStillLocked", func(t *testing.T) { + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t) + contract.claimSimulationFails = true + contract.credit[txSender.From()] = 1 + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}}) + require.NoError(t, err) + require.Equal(t, 0, txSender.sends) + require.Equal(t, 0, m.RecordBondClaimedCalls) + }) + + t.Run("ZeroCreditReturnsNil", func(t *testing.T) { + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t) + contract.credit[txSender.From()] = 0 + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}}) + require.NoError(t, err) + require.Equal(t, 0, txSender.sends) + require.Equal(t, 0, m.RecordBondClaimedCalls) + }) + + t.Run("MultipleBondClaimFails", func(t *testing.T) { + gameAddr := common.HexToAddress("0x1234") + c, m, contract, txSender := newTestClaimer(t) + contract.credit[txSender.From()] = 1 + txSender.sendFails = true + err := c.ClaimBonds(context.Background(), []types.GameMetadata{{Proxy: gameAddr}, {Proxy: gameAddr}, {Proxy: gameAddr}}) + require.ErrorIs(t, err, mockTxMgrSendError) + require.Equal(t, 3, txSender.sends) + require.Equal(t, 0, m.RecordBondClaimedCalls) + }) +} + +func newTestClaimer(t *testing.T, claimants ...common.Address) (*Claimer, *mockClaimMetrics, *stubBondContract, *mockTxSender) { + logger := testlog.Logger(t, log.LvlDebug) + m := &mockClaimMetrics{} + txSender := &mockTxSender{} + bondContract := &stubBondContract{status: types.GameStatusChallengerWon, credit: make(map[common.Address]int64)} + contractCreator := func(game types.GameMetadata) (BondContract, error) { + return bondContract, nil + } + if len(claimants) == 0 { + claimants = []common.Address{txSender.From()} + } + c := NewBondClaimer(logger, m, contractCreator, txSender, claimants...) + return c, m, bondContract, txSender +} + +type mockClaimMetrics struct { + RecordBondClaimedCalls int +} + +func (m *mockClaimMetrics) RecordBondClaimed(amount uint64) { + m.RecordBondClaimedCalls++ +} + +type mockTxSender struct { + sends int + sendFails bool + statusFail bool +} + +func (s *mockTxSender) From() common.Address { + return common.HexToAddress("0x33333") +} + +func (s *mockTxSender) SendAndWaitSimple(_ string, _ ...txmgr.TxCandidate) error { + s.sends++ + if s.sendFails { + return mockTxMgrSendError + } + if s.statusFail { + return errors.New("transaction reverted") + } + return nil +} + +type stubBondContract struct { + credit map[common.Address]int64 + status types.GameStatus + claimSimulationFails bool +} + +func (s *stubBondContract) GetCredit(_ context.Context, addr common.Address) (*big.Int, types.GameStatus, error) { + return big.NewInt(s.credit[addr]), s.status, nil +} + +func (s *stubBondContract) ClaimCreditTx(_ context.Context, _ common.Address) (txmgr.TxCandidate, error) { + if s.claimSimulationFails { + return txmgr.TxCandidate{}, fmt.Errorf("failed: %w", contracts.ErrSimulationFailed) + } + return txmgr.TxCandidate{}, nil +} diff --git a/op-challenger2/game/fault/claims/scheduler.go b/op-challenger2/game/fault/claims/scheduler.go new file mode 100644 index 000000000000..9bf1e1c1ef98 --- /dev/null +++ b/op-challenger2/game/fault/claims/scheduler.go @@ -0,0 +1,77 @@ +package claims + +import ( + "context" + "sync" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum/go-ethereum/log" +) + +type BondClaimer interface { + ClaimBonds(ctx context.Context, games []types.GameMetadata) error +} + +type BondClaimScheduler struct { + log log.Logger + metrics BondClaimSchedulerMetrics + ch chan schedulerMessage + claimer BondClaimer + cancel func() + wg sync.WaitGroup +} + +type BondClaimSchedulerMetrics interface { + RecordBondClaimFailed() +} + +type schedulerMessage struct { + blockNumber uint64 + games []types.GameMetadata +} + +func NewBondClaimScheduler(logger log.Logger, metrics BondClaimSchedulerMetrics, claimer BondClaimer) *BondClaimScheduler { + return &BondClaimScheduler{ + log: logger, + metrics: metrics, + ch: make(chan schedulerMessage, 1), + claimer: claimer, + } +} + +func (s *BondClaimScheduler) Start(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + s.cancel = cancel + s.wg.Add(1) + go s.run(ctx) +} + +func (s *BondClaimScheduler) Close() error { + s.cancel() + s.wg.Wait() + return nil +} + +func (s *BondClaimScheduler) run(ctx context.Context) { + defer s.wg.Done() + for { + select { + case <-ctx.Done(): + return + case msg := <-s.ch: + if err := s.claimer.ClaimBonds(ctx, msg.games); err != nil { + s.metrics.RecordBondClaimFailed() + s.log.Error("Failed to claim bonds", "blockNumber", msg.blockNumber, "err", err) + } + } + } +} + +func (s *BondClaimScheduler) Schedule(blockNumber uint64, games []types.GameMetadata) error { + select { + case s.ch <- schedulerMessage{blockNumber, games}: + default: + s.log.Trace("Skipping game bond claim while claiming in progress") + } + return nil +} diff --git a/op-challenger2/game/fault/claims/scheduler_test.go b/op-challenger2/game/fault/claims/scheduler_test.go new file mode 100644 index 000000000000..1d4f82f5df62 --- /dev/null +++ b/op-challenger2/game/fault/claims/scheduler_test.go @@ -0,0 +1,99 @@ +package claims + +import ( + "context" + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var mockClaimError = errors.New("mock claim error") + +func TestBondClaimScheduler_Schedule(t *testing.T) { + tests := []struct { + name string + claimErr error + games []types.GameMetadata + expectedMetricCalls int + expectedClaimCalls int + }{ + { + name: "SingleGame_Succeeds", + games: []types.GameMetadata{{}}, + expectedMetricCalls: 0, + expectedClaimCalls: 1, + }, + { + name: "SingleGame_Fails", + claimErr: mockClaimError, + games: []types.GameMetadata{{}}, + expectedMetricCalls: 1, + expectedClaimCalls: 1, + }, + { + name: "MultipleGames_Succeed", + games: []types.GameMetadata{{}, {}, {}}, + expectedMetricCalls: 0, + expectedClaimCalls: 1, + }, + { + name: "MultipleGames_Fails", + claimErr: mockClaimError, + games: []types.GameMetadata{{}, {}, {}}, + expectedMetricCalls: 1, + expectedClaimCalls: 1, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + scheduler, metrics, claimer := setupTestBondClaimScheduler(t) + claimer.claimErr = test.claimErr + scheduler.Start(ctx) + defer scheduler.Close() + + err := scheduler.Schedule(1, test.games) + require.NoError(t, err) + require.Eventually(t, func() bool { + return int(claimer.claimCalls.Load()) == test.expectedClaimCalls + }, 10*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { + return int(metrics.failedCalls.Load()) == test.expectedMetricCalls + }, 10*time.Second, 10*time.Millisecond) + }) + } +} + +func setupTestBondClaimScheduler(t *testing.T) (*BondClaimScheduler, *stubMetrics, *stubClaimer) { + logger := testlog.Logger(t, log.LvlInfo) + metrics := &stubMetrics{} + claimer := &stubClaimer{} + scheduler := NewBondClaimScheduler(logger, metrics, claimer) + return scheduler, metrics, claimer +} + +type stubMetrics struct { + failedCalls atomic.Int64 +} + +func (s *stubMetrics) RecordBondClaimFailed() { + s.failedCalls.Add(1) +} + +type stubClaimer struct { + claimCalls atomic.Int64 + claimErr error +} + +func (s *stubClaimer) ClaimBonds(ctx context.Context, games []types.GameMetadata) error { + s.claimCalls.Add(1) + return s.claimErr +} diff --git a/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-0.18.1.json b/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-0.18.1.json new file mode 100644 index 000000000000..e8c4133bb317 --- /dev/null +++ b/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-0.18.1.json @@ -0,0 +1,926 @@ +[ + { + "inputs": [ + { + "internalType": "GameType", + "name": "_gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "_absolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "_clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "_maxClockDuration", + "type": "uint64" + }, + { + "internalType": "contract IBigStepper", + "name": "_vm", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "_weth", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "_anchorStateRegistry", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_l2ChainId", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "absolutePrestate", + "outputs": [ + { + "internalType": "Claim", + "name": "absolutePrestate_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_ident", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_execLeafIdx", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_partOffset", + "type": "uint256" + } + ], + "name": "addLocalData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "anchorStateRegistry", + "outputs": [ + { + "internalType": "contract IAnchorStateRegistry", + "name": "registry_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "attack", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_recipient", + "type": "address" + } + ], + "name": "claimCredit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "claimData", + "outputs": [ + { + "internalType": "uint32", + "name": "parentIndex", + "type": "uint32" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + }, + { + "internalType": "address", + "name": "claimant", + "type": "address" + }, + { + "internalType": "uint128", + "name": "bond", + "type": "uint128" + }, + { + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "internalType": "Position", + "name": "position", + "type": "uint128" + }, + { + "internalType": "Clock", + "name": "clock", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "claimDataLen", + "outputs": [ + { + "internalType": "uint256", + "name": "len_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Hash", + "name": "", + "type": "bytes32" + } + ], + "name": "claims", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "clockExtension", + "outputs": [ + { + "internalType": "Duration", + "name": "clockExtension_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "createdAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "credit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "defend", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "extraData", + "outputs": [ + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameCreator", + "outputs": [ + { + "internalType": "address", + "name": "creator_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameData", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "gameType", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getChallengerDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "duration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getNumToResolve", + "outputs": [ + { + "internalType": "uint256", + "name": "numRemainingChildren_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Position", + "name": "_position", + "type": "uint128" + } + ], + "name": "getRequiredBond", + "outputs": [ + { + "internalType": "uint256", + "name": "requiredBond_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "initialize", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "l1Head", + "outputs": [ + { + "internalType": "Hash", + "name": "l1Head_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "l2BlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2ChainId", + "outputs": [ + { + "internalType": "uint256", + "name": "l2ChainId_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxClockDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "maxClockDuration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxGameDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "maxGameDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_challengeIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + } + ], + "name": "move", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolutionCheckpoints", + "outputs": [ + { + "internalType": "bool", + "name": "initialCheckpointComplete", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "subgameIndex", + "type": "uint32" + }, + { + "internalType": "Position", + "name": "leftmostPosition", + "type": "uint128" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "resolve", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "status_", + "type": "uint8" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_numToResolve", + "type": "uint256" + } + ], + "name": "resolveClaim", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "resolvedAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolvedSubgames", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "rootClaim", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "splitDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "splitDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingBlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "startingBlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingOutputRoot", + "outputs": [ + { + "internalType": "Hash", + "name": "root", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "l2BlockNumber", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingRootHash", + "outputs": [ + { + "internalType": "Hash", + "name": "startingRootHash_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "status", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + }, + { + "internalType": "bytes", + "name": "_stateData", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "_proof", + "type": "bytes" + } + ], + "name": "step", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "subgames", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "vm", + "outputs": [ + { + "internalType": "contract IBigStepper", + "name": "vm_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "weth", + "outputs": [ + { + "internalType": "contract IDelayedWETH", + "name": "weth_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "parentIndex", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "claimant", + "type": "address" + } + ], + "name": "Move", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "enum GameStatus", + "name": "status", + "type": "uint8" + } + ], + "name": "Resolved", + "type": "event" + }, + { + "inputs": [], + "name": "AlreadyInitialized", + "type": "error" + }, + { + "inputs": [], + "name": "AnchorRootNotFound", + "type": "error" + }, + { + "inputs": [], + "name": "BondTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "CannotDefendRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAboveSplit", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyExists", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyResolved", + "type": "error" + }, + { + "inputs": [], + "name": "ClockNotExpired", + "type": "error" + }, + { + "inputs": [], + "name": "ClockTimeExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "DuplicateStep", + "type": "error" + }, + { + "inputs": [], + "name": "GameDepthExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotInProgress", + "type": "error" + }, + { + "inputs": [], + "name": "IncorrectBondAmount", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidClockExtension", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidLocalIdent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidParent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidPrestate", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidSplitDepth", + "type": "error" + }, + { + "inputs": [], + "name": "MaxDepthTooLarge", + "type": "error" + }, + { + "inputs": [], + "name": "NoCreditToClaim", + "type": "error" + }, + { + "inputs": [], + "name": "OutOfOrderResolution", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "rootClaim", + "type": "bytes32" + } + ], + "name": "UnexpectedRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "ValidStep", + "type": "error" + } +] \ No newline at end of file diff --git a/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-0.8.0.json b/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-0.8.0.json new file mode 100644 index 000000000000..8bd94969b07c --- /dev/null +++ b/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-0.8.0.json @@ -0,0 +1,741 @@ +[ + { + "inputs": [ + { + "internalType": "GameType", + "name": "_gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "_absolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "_gameDuration", + "type": "uint64" + }, + { + "internalType": "contract IBigStepper", + "name": "_vm", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "_weth", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "_anchorStateRegistry", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_l2ChainId", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "stateMutability": "payable", + "type": "fallback" + }, + { + "stateMutability": "payable", + "type": "receive" + }, + { + "inputs": [], + "name": "absolutePrestate", + "outputs": [ + { + "internalType": "Claim", + "name": "absolutePrestate_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_ident", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_execLeafIdx", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_partOffset", + "type": "uint256" + } + ], + "name": "addLocalData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "attack", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_recipient", + "type": "address" + } + ], + "name": "claimCredit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "claimData", + "outputs": [ + { + "internalType": "uint32", + "name": "parentIndex", + "type": "uint32" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + }, + { + "internalType": "address", + "name": "claimant", + "type": "address" + }, + { + "internalType": "uint128", + "name": "bond", + "type": "uint128" + }, + { + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "internalType": "Position", + "name": "position", + "type": "uint128" + }, + { + "internalType": "Clock", + "name": "clock", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "claimDataLen", + "outputs": [ + { + "internalType": "uint256", + "name": "len_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "createdAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "credit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "defend", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "extraData", + "outputs": [ + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameData", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "gameDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "gameDuration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "gameType", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Position", + "name": "_position", + "type": "uint128" + } + ], + "name": "getRequiredBond", + "outputs": [ + { + "internalType": "uint256", + "name": "requiredBond_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "initialize", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "l1Head", + "outputs": [ + { + "internalType": "Hash", + "name": "l1Head_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "l2BlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2ChainId", + "outputs": [ + { + "internalType": "uint256", + "name": "l2ChainId_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxGameDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "maxGameDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_challengeIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + } + ], + "name": "move", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "resolve", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "status_", + "type": "uint8" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "resolveClaim", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "resolvedAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "rootClaim", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "splitDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "splitDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingBlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "startingBlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingOutputRoot", + "outputs": [ + { + "internalType": "Hash", + "name": "root", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "l2BlockNumber", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingRootHash", + "outputs": [ + { + "internalType": "Hash", + "name": "startingRootHash_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "status", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + }, + { + "internalType": "bytes", + "name": "_stateData", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "_proof", + "type": "bytes" + } + ], + "name": "step", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "vm", + "outputs": [ + { + "internalType": "contract IBigStepper", + "name": "vm_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "weth", + "outputs": [ + { + "internalType": "contract IDelayedWETH", + "name": "weth_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "parentIndex", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "claimant", + "type": "address" + } + ], + "name": "Move", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "enum GameStatus", + "name": "status", + "type": "uint8" + } + ], + "name": "Resolved", + "type": "event" + }, + { + "inputs": [], + "name": "AlreadyInitialized", + "type": "error" + }, + { + "inputs": [], + "name": "AnchorRootNotFound", + "type": "error" + }, + { + "inputs": [], + "name": "BondTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "CannotDefendRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAboveSplit", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyExists", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyResolved", + "type": "error" + }, + { + "inputs": [], + "name": "ClockNotExpired", + "type": "error" + }, + { + "inputs": [], + "name": "ClockTimeExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "DuplicateStep", + "type": "error" + }, + { + "inputs": [], + "name": "GameDepthExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotInProgress", + "type": "error" + }, + { + "inputs": [], + "name": "IncorrectBondAmount", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidLocalIdent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidParent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidPrestate", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidSplitDepth", + "type": "error" + }, + { + "inputs": [], + "name": "NoCreditToClaim", + "type": "error" + }, + { + "inputs": [], + "name": "OutOfOrderResolution", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "rootClaim", + "type": "bytes32" + } + ], + "name": "UnexpectedRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "ValidStep", + "type": "error" + } +] \ No newline at end of file diff --git a/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-1.1.1.json b/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-1.1.1.json new file mode 100644 index 000000000000..f0558ad5961b --- /dev/null +++ b/op-challenger2/game/fault/contracts/abis/FaultDisputeGame-1.1.1.json @@ -0,0 +1,1042 @@ +[ + { + "inputs": [ + { + "internalType": "GameType", + "name": "_gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "_absolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "_clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "_maxClockDuration", + "type": "uint64" + }, + { + "internalType": "contract IBigStepper", + "name": "_vm", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "_weth", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "_anchorStateRegistry", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_l2ChainId", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "absolutePrestate", + "outputs": [ + { + "internalType": "Claim", + "name": "absolutePrestate_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_ident", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_execLeafIdx", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_partOffset", + "type": "uint256" + } + ], + "name": "addLocalData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "anchorStateRegistry", + "outputs": [ + { + "internalType": "contract IAnchorStateRegistry", + "name": "registry_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "attack", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "bytes32", + "name": "version", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "stateRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "messagePasserStorageRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "latestBlockhash", + "type": "bytes32" + } + ], + "internalType": "struct Types.OutputRootProof", + "name": "_outputRootProof", + "type": "tuple" + }, + { + "internalType": "bytes", + "name": "_headerRLP", + "type": "bytes" + } + ], + "name": "challengeRootL2Block", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_recipient", + "type": "address" + } + ], + "name": "claimCredit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "claimData", + "outputs": [ + { + "internalType": "uint32", + "name": "parentIndex", + "type": "uint32" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + }, + { + "internalType": "address", + "name": "claimant", + "type": "address" + }, + { + "internalType": "uint128", + "name": "bond", + "type": "uint128" + }, + { + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "internalType": "Position", + "name": "position", + "type": "uint128" + }, + { + "internalType": "Clock", + "name": "clock", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "claimDataLen", + "outputs": [ + { + "internalType": "uint256", + "name": "len_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Hash", + "name": "", + "type": "bytes32" + } + ], + "name": "claims", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "clockExtension", + "outputs": [ + { + "internalType": "Duration", + "name": "clockExtension_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "createdAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "credit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "defend", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "extraData", + "outputs": [ + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameCreator", + "outputs": [ + { + "internalType": "address", + "name": "creator_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameData", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "gameType", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getChallengerDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "duration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getNumToResolve", + "outputs": [ + { + "internalType": "uint256", + "name": "numRemainingChildren_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Position", + "name": "_position", + "type": "uint128" + } + ], + "name": "getRequiredBond", + "outputs": [ + { + "internalType": "uint256", + "name": "requiredBond_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "initialize", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "l1Head", + "outputs": [ + { + "internalType": "Hash", + "name": "l1Head_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "l2BlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumberChallenged", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumberChallenger", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "l2ChainId", + "outputs": [ + { + "internalType": "uint256", + "name": "l2ChainId_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxClockDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "maxClockDuration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxGameDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "maxGameDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_challengeIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + } + ], + "name": "move", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolutionCheckpoints", + "outputs": [ + { + "internalType": "bool", + "name": "initialCheckpointComplete", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "subgameIndex", + "type": "uint32" + }, + { + "internalType": "Position", + "name": "leftmostPosition", + "type": "uint128" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "resolve", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "status_", + "type": "uint8" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_numToResolve", + "type": "uint256" + } + ], + "name": "resolveClaim", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "resolvedAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolvedSubgames", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "rootClaim", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "splitDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "splitDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingBlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "startingBlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingOutputRoot", + "outputs": [ + { + "internalType": "Hash", + "name": "root", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "l2BlockNumber", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingRootHash", + "outputs": [ + { + "internalType": "Hash", + "name": "startingRootHash_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "status", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + }, + { + "internalType": "bytes", + "name": "_stateData", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "_proof", + "type": "bytes" + } + ], + "name": "step", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "subgames", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "vm", + "outputs": [ + { + "internalType": "contract IBigStepper", + "name": "vm_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "weth", + "outputs": [ + { + "internalType": "contract IDelayedWETH", + "name": "weth_", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "parentIndex", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "claimant", + "type": "address" + } + ], + "name": "Move", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "enum GameStatus", + "name": "status", + "type": "uint8" + } + ], + "name": "Resolved", + "type": "event" + }, + { + "inputs": [], + "name": "AlreadyInitialized", + "type": "error" + }, + { + "inputs": [], + "name": "AnchorRootNotFound", + "type": "error" + }, + { + "inputs": [], + "name": "BlockNumberMatches", + "type": "error" + }, + { + "inputs": [], + "name": "BondTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "CannotDefendRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAboveSplit", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyExists", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyResolved", + "type": "error" + }, + { + "inputs": [], + "name": "ClockNotExpired", + "type": "error" + }, + { + "inputs": [], + "name": "ClockTimeExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "ContentLengthMismatch", + "type": "error" + }, + { + "inputs": [], + "name": "DuplicateStep", + "type": "error" + }, + { + "inputs": [], + "name": "EmptyItem", + "type": "error" + }, + { + "inputs": [], + "name": "GameDepthExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotInProgress", + "type": "error" + }, + { + "inputs": [], + "name": "IncorrectBondAmount", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidClockExtension", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidDataRemainder", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidHeader", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidHeaderRLP", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidLocalIdent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidOutputRootProof", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidParent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidPrestate", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidSplitDepth", + "type": "error" + }, + { + "inputs": [], + "name": "L2BlockNumberChallenged", + "type": "error" + }, + { + "inputs": [], + "name": "MaxDepthTooLarge", + "type": "error" + }, + { + "inputs": [], + "name": "NoCreditToClaim", + "type": "error" + }, + { + "inputs": [], + "name": "OutOfOrderResolution", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedList", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "rootClaim", + "type": "bytes32" + } + ], + "name": "UnexpectedRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedString", + "type": "error" + }, + { + "inputs": [], + "name": "ValidStep", + "type": "error" + } +] \ No newline at end of file diff --git a/op-challenger2/game/fault/contracts/delayed_weth.go b/op-challenger2/game/fault/contracts/delayed_weth.go new file mode 100644 index 000000000000..b4cf4f100f0b --- /dev/null +++ b/op-challenger2/game/fault/contracts/delayed_weth.go @@ -0,0 +1,58 @@ +package contracts + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/common" +) + +var ( + methodWithdrawals = "withdrawals" +) + +type DelayedWETHContract struct { + metrics metrics.ContractMetricer + multiCaller *batching.MultiCaller + contract *batching.BoundContract +} + +type WithdrawalRequest struct { + Amount *big.Int + Timestamp *big.Int +} + +func NewDelayedWETHContract(metrics metrics.ContractMetricer, addr common.Address, caller *batching.MultiCaller) *DelayedWETHContract { + contractAbi := snapshots.LoadDelayedWETHABI() + return &DelayedWETHContract{ + metrics: metrics, + multiCaller: caller, + contract: batching.NewBoundContract(contractAbi, addr), + } +} + +// GetWithdrawals returns all withdrawals made from the contract since the given block. +func (d *DelayedWETHContract) GetWithdrawals(ctx context.Context, block rpcblock.Block, gameAddr common.Address, recipients ...common.Address) ([]*WithdrawalRequest, error) { + defer d.metrics.StartContractRequest("GetWithdrawals")() + calls := make([]batching.Call, 0, len(recipients)) + for _, recipient := range recipients { + calls = append(calls, d.contract.Call(methodWithdrawals, gameAddr, recipient)) + } + results, err := d.multiCaller.Call(ctx, block, calls...) + if err != nil { + return nil, fmt.Errorf("failed to fetch withdrawals: %w", err) + } + withdrawals := make([]*WithdrawalRequest, len(recipients)) + for i, result := range results { + withdrawals[i] = &WithdrawalRequest{ + Amount: result.GetBigInt(0), + Timestamp: result.GetBigInt(1), + } + } + return withdrawals, nil +} diff --git a/op-challenger2/game/fault/contracts/delayed_weth_test.go b/op-challenger2/game/fault/contracts/delayed_weth_test.go new file mode 100644 index 000000000000..d59f20575822 --- /dev/null +++ b/op-challenger2/game/fault/contracts/delayed_weth_test.go @@ -0,0 +1,50 @@ +package contracts + +import ( + "context" + "math/big" + "testing" + + contractMetrics "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + batchingTest "github.com/ethereum-optimism/optimism/op-service/sources/batching/test" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +var ( + delayedWeth = common.HexToAddress("0x24112842371dFC380576ebb09Ae16Cb6B6caD7CB") +) + +func TestDelayedWeth_GetWithdrawals(t *testing.T) { + stubRpc, weth := setupDelayedWethTest(t) + block := rpcblock.ByNumber(482) + + addrs := []common.Address{{0x01}, {0x02}} + expected := [][]*big.Int{ + []*big.Int{big.NewInt(123), big.NewInt(456)}, + []*big.Int{big.NewInt(123), big.NewInt(456)}, + } + + for i, addr := range addrs { + stubRpc.SetResponse(delayedWeth, methodWithdrawals, block, []interface{}{fdgAddr, addr}, []interface{}{expected[i][0], expected[i][1]}) + } + + actual, err := weth.GetWithdrawals(context.Background(), block, fdgAddr, addrs...) + require.NoError(t, err) + require.Equal(t, len(expected), len(actual)) + for i := range expected { + require.Zerof(t, expected[i][0].Cmp(actual[i].Amount), "expected: %v actual: %v", expected[i][1], actual[i].Amount) + require.Zerof(t, expected[i][1].Cmp(actual[i].Timestamp), "expected: %v actual: %v", expected[i][0], actual[i].Timestamp) + } +} + +func setupDelayedWethTest(t *testing.T) (*batchingTest.AbiBasedRpc, *DelayedWETHContract) { + delayedWethAbi := snapshots.LoadDelayedWETHABI() + stubRpc := batchingTest.NewAbiBasedRpc(t, delayedWeth, delayedWethAbi) + caller := batching.NewMultiCaller(stubRpc, batching.DefaultBatchSize) + weth := NewDelayedWETHContract(contractMetrics.NoopContractMetrics, delayedWeth, caller) + return stubRpc, weth +} diff --git a/op-challenger2/game/fault/contracts/faultdisputegame.go b/op-challenger2/game/fault/contracts/faultdisputegame.go new file mode 100644 index 000000000000..615c55ac996f --- /dev/null +++ b/op-challenger2/game/fault/contracts/faultdisputegame.go @@ -0,0 +1,627 @@ +package contracts + +import ( + "bytes" + "context" + "errors" + "fmt" + "math" + "math/big" + "strings" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" +) + +// The maximum number of children that will be processed during a call to `resolveClaim` +var maxChildChecks = big.NewInt(512) + +var ( + methodVersion = "version" + methodMaxClockDuration = "maxClockDuration" + methodMaxGameDepth = "maxGameDepth" + methodAbsolutePrestate = "absolutePrestate" + methodStatus = "status" + methodRootClaim = "rootClaim" + methodClaimCount = "claimDataLen" + methodClaim = "claimData" + methodL1Head = "l1Head" + methodResolvedSubgames = "resolvedSubgames" + methodResolve = "resolve" + methodResolveClaim = "resolveClaim" + methodAttack = "attack" + methodDefend = "defend" + methodStep = "step" + methodAddLocalData = "addLocalData" + methodVM = "vm" + methodStartingBlockNumber = "startingBlockNumber" + methodStartingRootHash = "startingRootHash" + methodSplitDepth = "splitDepth" + methodL2BlockNumber = "l2BlockNumber" + methodRequiredBond = "getRequiredBond" + methodClaimCredit = "claimCredit" + methodCredit = "credit" + methodWETH = "weth" + methodL2BlockNumberChallenged = "l2BlockNumberChallenged" + methodL2BlockNumberChallenger = "l2BlockNumberChallenger" + methodChallengeRootL2Block = "challengeRootL2Block" +) + +var ( + ErrSimulationFailed = errors.New("tx simulation failed") + ErrChallengeL2BlockNotSupported = errors.New("contract version does not support challenging L2 block number") +) + +type FaultDisputeGameContractLatest struct { + metrics metrics.ContractMetricer + multiCaller *batching.MultiCaller + contract *batching.BoundContract +} + +type Proposal struct { + L2BlockNumber *big.Int + OutputRoot common.Hash +} + +// outputRootProof is designed to match the solidity OutputRootProof struct. +type outputRootProof struct { + Version [32]byte + StateRoot [32]byte + MessagePasserStorageRoot [32]byte + LatestBlockhash [32]byte +} + +func NewFaultDisputeGameContract(ctx context.Context, metrics metrics.ContractMetricer, addr common.Address, caller *batching.MultiCaller) (FaultDisputeGameContract, error) { + contractAbi := snapshots.LoadFaultDisputeGameABI() + + result, err := caller.SingleCall(ctx, rpcblock.Latest, batching.NewContractCall(contractAbi, addr, methodVersion)) + if err != nil { + return nil, fmt.Errorf("failed to retrieve version of dispute game %v: %w", addr, err) + } + version := result.GetString(0) + + if strings.HasPrefix(version, "0.8.") { + // Detected an older version of contracts, use a compatibility shim. + legacyAbi := mustParseAbi(faultDisputeGameAbi020) + return &FaultDisputeGameContract080{ + FaultDisputeGameContractLatest: FaultDisputeGameContractLatest{ + metrics: metrics, + multiCaller: caller, + contract: batching.NewBoundContract(legacyAbi, addr), + }, + }, nil + } else if strings.HasPrefix(version, "0.18.") || strings.HasPrefix(version, "1.0.") { + // Detected an older version of contracts, use a compatibility shim. + legacyAbi := mustParseAbi(faultDisputeGameAbi0180) + return &FaultDisputeGameContract0180{ + FaultDisputeGameContractLatest: FaultDisputeGameContractLatest{ + metrics: metrics, + multiCaller: caller, + contract: batching.NewBoundContract(legacyAbi, addr), + }, + }, nil + } else if strings.HasPrefix(version, "1.1.") { + // Detected an older version of contracts, use a compatibility shim. + legacyAbi := mustParseAbi(faultDisputeGameAbi111) + return &FaultDisputeGameContract111{ + FaultDisputeGameContractLatest: FaultDisputeGameContractLatest{ + metrics: metrics, + multiCaller: caller, + contract: batching.NewBoundContract(legacyAbi, addr), + }, + }, nil + } else { + return &FaultDisputeGameContractLatest{ + metrics: metrics, + multiCaller: caller, + contract: batching.NewBoundContract(contractAbi, addr), + }, nil + } +} + +func mustParseAbi(json []byte) *abi.ABI { + loaded, err := abi.JSON(bytes.NewReader(json)) + if err != nil { + panic(err) + } + return &loaded +} + +// GetBalance returns the total amount of ETH controlled by this contract. +// Note that the ETH is actually held by the DelayedWETH contract which may be shared by multiple games. +// Returns the balance and the address of the contract that actually holds the balance. +func (f *FaultDisputeGameContractLatest) GetBalance(ctx context.Context, block rpcblock.Block) (*big.Int, common.Address, error) { + defer f.metrics.StartContractRequest("GetBalance")() + result, err := f.multiCaller.SingleCall(ctx, block, f.contract.Call(methodWETH)) + if err != nil { + return nil, common.Address{}, fmt.Errorf("failed to load weth address: %w", err) + } + wethAddr := result.GetAddress(0) + result, err = f.multiCaller.SingleCall(ctx, block, batching.NewBalanceCall(wethAddr)) + if err != nil { + return nil, common.Address{}, fmt.Errorf("failed to retrieve game balance: %w", err) + } + return result.GetBigInt(0), wethAddr, nil +} + +// GetBlockRange returns the block numbers of the absolute pre-state block (typically genesis or the bedrock activation block) +// and the post-state block (that the proposed output root is for). +func (f *FaultDisputeGameContractLatest) GetBlockRange(ctx context.Context) (prestateBlock uint64, poststateBlock uint64, retErr error) { + defer f.metrics.StartContractRequest("GetBlockRange")() + results, err := f.multiCaller.Call(ctx, rpcblock.Latest, + f.contract.Call(methodStartingBlockNumber), + f.contract.Call(methodL2BlockNumber)) + if err != nil { + retErr = fmt.Errorf("failed to retrieve game block range: %w", err) + return + } + if len(results) != 2 { + retErr = fmt.Errorf("expected 2 results but got %v", len(results)) + return + } + prestateBlock = results[0].GetBigInt(0).Uint64() + poststateBlock = results[1].GetBigInt(0).Uint64() + return +} + +type GameMetadata struct { + L1Head common.Hash + L2BlockNum uint64 + RootClaim common.Hash + Status gameTypes.GameStatus + MaxClockDuration uint64 + L2BlockNumberChallenged bool + L2BlockNumberChallenger common.Address +} + +// GetGameMetadata returns the game's L1 head, L2 block number, root claim, status, max clock duration, and is l2 block number challenged. +func (f *FaultDisputeGameContractLatest) GetGameMetadata(ctx context.Context, block rpcblock.Block) (GameMetadata, error) { + defer f.metrics.StartContractRequest("GetGameMetadata")() + results, err := f.multiCaller.Call(ctx, block, + f.contract.Call(methodL1Head), + f.contract.Call(methodL2BlockNumber), + f.contract.Call(methodRootClaim), + f.contract.Call(methodStatus), + f.contract.Call(methodMaxClockDuration), + f.contract.Call(methodL2BlockNumberChallenged), + f.contract.Call(methodL2BlockNumberChallenger), + ) + if err != nil { + return GameMetadata{}, fmt.Errorf("failed to retrieve game metadata: %w", err) + } + if len(results) != 7 { + return GameMetadata{}, fmt.Errorf("expected 6 results but got %v", len(results)) + } + l1Head := results[0].GetHash(0) + l2BlockNumber := results[1].GetBigInt(0).Uint64() + rootClaim := results[2].GetHash(0) + status, err := gameTypes.GameStatusFromUint8(results[3].GetUint8(0)) + if err != nil { + return GameMetadata{}, fmt.Errorf("failed to convert game status: %w", err) + } + duration := results[4].GetUint64(0) + blockChallenged := results[5].GetBool(0) + blockChallenger := results[6].GetAddress(0) + return GameMetadata{ + L1Head: l1Head, + L2BlockNum: l2BlockNumber, + RootClaim: rootClaim, + Status: status, + MaxClockDuration: duration, + L2BlockNumberChallenged: blockChallenged, + L2BlockNumberChallenger: blockChallenger, + }, nil +} + +func (f *FaultDisputeGameContractLatest) GetStartingRootHash(ctx context.Context) (common.Hash, error) { + defer f.metrics.StartContractRequest("GetStartingRootHash")() + startingRootHash, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodStartingRootHash)) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to retrieve genesis output root: %w", err) + } + return startingRootHash.GetHash(0), nil +} + +func (f *FaultDisputeGameContractLatest) GetSplitDepth(ctx context.Context) (types.Depth, error) { + defer f.metrics.StartContractRequest("GetSplitDepth")() + splitDepth, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodSplitDepth)) + if err != nil { + return 0, fmt.Errorf("failed to retrieve split depth: %w", err) + } + return types.Depth(splitDepth.GetBigInt(0).Uint64()), nil +} + +func (f *FaultDisputeGameContractLatest) GetCredit(ctx context.Context, recipient common.Address) (*big.Int, gameTypes.GameStatus, error) { + defer f.metrics.StartContractRequest("GetCredit")() + results, err := f.multiCaller.Call(ctx, rpcblock.Latest, + f.contract.Call(methodCredit, recipient), + f.contract.Call(methodStatus)) + if err != nil { + return nil, gameTypes.GameStatusInProgress, err + } + if len(results) != 2 { + return nil, gameTypes.GameStatusInProgress, fmt.Errorf("expected 2 results but got %v", len(results)) + } + credit := results[0].GetBigInt(0) + status, err := gameTypes.GameStatusFromUint8(results[1].GetUint8(0)) + if err != nil { + return nil, gameTypes.GameStatusInProgress, fmt.Errorf("invalid game status %v: %w", status, err) + } + return credit, status, nil +} + +func (f *FaultDisputeGameContractLatest) GetRequiredBonds(ctx context.Context, block rpcblock.Block, positions ...*big.Int) ([]*big.Int, error) { + calls := make([]batching.Call, 0, len(positions)) + for _, position := range positions { + calls = append(calls, f.contract.Call(methodRequiredBond, position)) + } + results, err := f.multiCaller.Call(ctx, block, calls...) + if err != nil { + return nil, fmt.Errorf("failed to retrieve required bonds: %w", err) + } + requiredBonds := make([]*big.Int, 0, len(positions)) + for _, result := range results { + requiredBonds = append(requiredBonds, result.GetBigInt(0)) + } + return requiredBonds, nil +} + +func (f *FaultDisputeGameContractLatest) GetCredits(ctx context.Context, block rpcblock.Block, recipients ...common.Address) ([]*big.Int, error) { + defer f.metrics.StartContractRequest("GetCredits")() + calls := make([]batching.Call, 0, len(recipients)) + for _, recipient := range recipients { + calls = append(calls, f.contract.Call(methodCredit, recipient)) + } + results, err := f.multiCaller.Call(ctx, block, calls...) + if err != nil { + return nil, fmt.Errorf("failed to retrieve credit: %w", err) + } + credits := make([]*big.Int, 0, len(recipients)) + for _, result := range results { + credits = append(credits, result.GetBigInt(0)) + } + return credits, nil +} + +func (f *FaultDisputeGameContractLatest) ClaimCreditTx(ctx context.Context, recipient common.Address) (txmgr.TxCandidate, error) { + defer f.metrics.StartContractRequest("ClaimCredit")() + call := f.contract.Call(methodClaimCredit, recipient) + _, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, call) + if err != nil { + return txmgr.TxCandidate{}, fmt.Errorf("%w: %v", ErrSimulationFailed, err.Error()) + } + return call.ToTxCandidate() +} + +func (f *FaultDisputeGameContractLatest) GetRequiredBond(ctx context.Context, position types.Position) (*big.Int, error) { + defer f.metrics.StartContractRequest("GetRequiredBond")() + bond, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodRequiredBond, position.ToGIndex())) + if err != nil { + return nil, fmt.Errorf("failed to retrieve required bond: %w", err) + } + return bond.GetBigInt(0), nil +} + +func (f *FaultDisputeGameContractLatest) UpdateOracleTx(ctx context.Context, claimIdx uint64, data *types.PreimageOracleData) (txmgr.TxCandidate, error) { + if data.IsLocal { + return f.addLocalDataTx(claimIdx, data) + } + return f.addGlobalDataTx(ctx, data) +} + +func (f *FaultDisputeGameContractLatest) addLocalDataTx(claimIdx uint64, data *types.PreimageOracleData) (txmgr.TxCandidate, error) { + call := f.contract.Call( + methodAddLocalData, + data.GetIdent(), + new(big.Int).SetUint64(claimIdx), + new(big.Int).SetUint64(uint64(data.OracleOffset)), + ) + return call.ToTxCandidate() +} + +func (f *FaultDisputeGameContractLatest) addGlobalDataTx(ctx context.Context, data *types.PreimageOracleData) (txmgr.TxCandidate, error) { + oracle, err := f.GetOracle(ctx) + if err != nil { + return txmgr.TxCandidate{}, err + } + return oracle.AddGlobalDataTx(data) +} + +func (f *FaultDisputeGameContractLatest) GetWithdrawals(ctx context.Context, block rpcblock.Block, gameAddr common.Address, recipients ...common.Address) ([]*WithdrawalRequest, error) { + defer f.metrics.StartContractRequest("GetWithdrawals")() + delayedWETH, err := f.getDelayedWETH(ctx) + if err != nil { + return nil, err + } + return delayedWETH.GetWithdrawals(ctx, block, gameAddr, recipients...) +} + +func (f *FaultDisputeGameContractLatest) getDelayedWETH(ctx context.Context) (*DelayedWETHContract, error) { + defer f.metrics.StartContractRequest("GetDelayedWETH")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodWETH)) + if err != nil { + return nil, fmt.Errorf("failed to fetch WETH addr: %w", err) + } + return NewDelayedWETHContract(f.metrics, result.GetAddress(0), f.multiCaller), nil +} + +func (f *FaultDisputeGameContractLatest) GetOracle(ctx context.Context) (*PreimageOracleContract, error) { + defer f.metrics.StartContractRequest("GetOracle")() + vm, err := f.vm(ctx) + if err != nil { + return nil, err + } + return vm.Oracle(ctx) +} + +func (f *FaultDisputeGameContractLatest) GetMaxClockDuration(ctx context.Context) (time.Duration, error) { + defer f.metrics.StartContractRequest("GetMaxClockDuration")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodMaxClockDuration)) + if err != nil { + return 0, fmt.Errorf("failed to fetch max clock duration: %w", err) + } + return time.Duration(result.GetUint64(0)) * time.Second, nil +} + +func (f *FaultDisputeGameContractLatest) GetMaxGameDepth(ctx context.Context) (types.Depth, error) { + defer f.metrics.StartContractRequest("GetMaxGameDepth")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodMaxGameDepth)) + if err != nil { + return 0, fmt.Errorf("failed to fetch max game depth: %w", err) + } + return types.Depth(result.GetBigInt(0).Uint64()), nil +} + +func (f *FaultDisputeGameContractLatest) GetAbsolutePrestateHash(ctx context.Context) (common.Hash, error) { + defer f.metrics.StartContractRequest("GetAbsolutePrestateHash")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodAbsolutePrestate)) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to fetch absolute prestate hash: %w", err) + } + return result.GetHash(0), nil +} + +func (f *FaultDisputeGameContractLatest) GetL1Head(ctx context.Context) (common.Hash, error) { + defer f.metrics.StartContractRequest("GetL1Head")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodL1Head)) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to fetch L1 head: %w", err) + } + return result.GetHash(0), nil +} + +func (f *FaultDisputeGameContractLatest) GetStatus(ctx context.Context) (gameTypes.GameStatus, error) { + defer f.metrics.StartContractRequest("GetStatus")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodStatus)) + if err != nil { + return 0, fmt.Errorf("failed to fetch status: %w", err) + } + return gameTypes.GameStatusFromUint8(result.GetUint8(0)) +} + +func (f *FaultDisputeGameContractLatest) GetClaimCount(ctx context.Context) (uint64, error) { + defer f.metrics.StartContractRequest("GetClaimCount")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodClaimCount)) + if err != nil { + return 0, fmt.Errorf("failed to fetch claim count: %w", err) + } + return result.GetBigInt(0).Uint64(), nil +} + +func (f *FaultDisputeGameContractLatest) GetClaim(ctx context.Context, idx uint64) (types.Claim, error) { + defer f.metrics.StartContractRequest("GetClaim")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodClaim, new(big.Int).SetUint64(idx))) + if err != nil { + return types.Claim{}, fmt.Errorf("failed to fetch claim %v: %w", idx, err) + } + return f.decodeClaim(result, int(idx)), nil +} + +func (f *FaultDisputeGameContractLatest) GetAllClaims(ctx context.Context, block rpcblock.Block) ([]types.Claim, error) { + defer f.metrics.StartContractRequest("GetAllClaims")() + results, err := batching.ReadArray(ctx, f.multiCaller, block, f.contract.Call(methodClaimCount), func(i *big.Int) *batching.ContractCall { + return f.contract.Call(methodClaim, i) + }) + if err != nil { + return nil, fmt.Errorf("failed to load claims: %w", err) + } + + var claims []types.Claim + for idx, result := range results { + claims = append(claims, f.decodeClaim(result, idx)) + } + return claims, nil +} + +func (f *FaultDisputeGameContractLatest) IsResolved(ctx context.Context, block rpcblock.Block, claims ...types.Claim) ([]bool, error) { + defer f.metrics.StartContractRequest("IsResolved")() + calls := make([]batching.Call, 0, len(claims)) + for _, claim := range claims { + calls = append(calls, f.contract.Call(methodResolvedSubgames, big.NewInt(int64(claim.ContractIndex)))) + } + results, err := f.multiCaller.Call(ctx, block, calls...) + if err != nil { + return nil, fmt.Errorf("failed to retrieve resolved subgames: %w", err) + } + resolved := make([]bool, 0, len(claims)) + for _, result := range results { + resolved = append(resolved, result.GetBool(0)) + } + return resolved, nil +} + +func (f *FaultDisputeGameContractLatest) vm(ctx context.Context) (*VMContract, error) { + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodVM)) + if err != nil { + return nil, fmt.Errorf("failed to fetch VM addr: %w", err) + } + vmAddr := result.GetAddress(0) + return NewVMContract(vmAddr, f.multiCaller), nil +} + +func (f *FaultDisputeGameContractLatest) IsL2BlockNumberChallenged(ctx context.Context, block rpcblock.Block) (bool, error) { + defer f.metrics.StartContractRequest("IsL2BlockNumberChallenged")() + result, err := f.multiCaller.SingleCall(ctx, block, f.contract.Call(methodL2BlockNumberChallenged)) + if err != nil { + return false, fmt.Errorf("failed to fetch block number challenged: %w", err) + } + return result.GetBool(0), nil +} + +func (f *FaultDisputeGameContractLatest) ChallengeL2BlockNumberTx(challenge *types.InvalidL2BlockNumberChallenge) (txmgr.TxCandidate, error) { + headerRlp, err := rlp.EncodeToBytes(challenge.Header) + if err != nil { + return txmgr.TxCandidate{}, fmt.Errorf("failed to serialize header: %w", err) + } + return f.contract.Call(methodChallengeRootL2Block, outputRootProof{ + Version: challenge.Output.Version, + StateRoot: challenge.Output.StateRoot, + MessagePasserStorageRoot: challenge.Output.WithdrawalStorageRoot, + LatestBlockhash: challenge.Output.BlockRef.Hash, + }, headerRlp).ToTxCandidate() +} + +func (f *FaultDisputeGameContractLatest) AttackTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodAttack, parent.Value, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Attack(), call) +} + +func (f *FaultDisputeGameContractLatest) DefendTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodDefend, parent.Value, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Defend(), call) +} + +func (f *FaultDisputeGameContractLatest) txWithBond(ctx context.Context, position types.Position, call *batching.ContractCall) (txmgr.TxCandidate, error) { + tx, err := call.ToTxCandidate() + if err != nil { + return txmgr.TxCandidate{}, fmt.Errorf("failed to create transaction: %w", err) + } + tx.Value, err = f.GetRequiredBond(ctx, position) + if err != nil { + return txmgr.TxCandidate{}, fmt.Errorf("failed to fetch required bond: %w", err) + } + return tx, nil +} + +func (f *FaultDisputeGameContractLatest) StepTx(claimIdx uint64, isAttack bool, stateData []byte, proof []byte) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodStep, new(big.Int).SetUint64(claimIdx), isAttack, stateData, proof) + return call.ToTxCandidate() +} + +func (f *FaultDisputeGameContractLatest) CallResolveClaim(ctx context.Context, claimIdx uint64) error { + defer f.metrics.StartContractRequest("CallResolveClaim")() + call := f.resolveClaimCall(claimIdx) + _, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, call) + if err != nil { + return fmt.Errorf("failed to call resolve claim: %w", err) + } + return nil +} + +func (f *FaultDisputeGameContractLatest) ResolveClaimTx(claimIdx uint64) (txmgr.TxCandidate, error) { + call := f.resolveClaimCall(claimIdx) + return call.ToTxCandidate() +} + +func (f *FaultDisputeGameContractLatest) resolveClaimCall(claimIdx uint64) *batching.ContractCall { + return f.contract.Call(methodResolveClaim, new(big.Int).SetUint64(claimIdx), maxChildChecks) +} + +func (f *FaultDisputeGameContractLatest) CallResolve(ctx context.Context) (gameTypes.GameStatus, error) { + defer f.metrics.StartContractRequest("CallResolve")() + call := f.resolveCall() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, call) + if err != nil { + return gameTypes.GameStatusInProgress, fmt.Errorf("failed to call resolve: %w", err) + } + return gameTypes.GameStatusFromUint8(result.GetUint8(0)) +} + +func (f *FaultDisputeGameContractLatest) ResolveTx() (txmgr.TxCandidate, error) { + call := f.resolveCall() + return call.ToTxCandidate() +} + +func (f *FaultDisputeGameContractLatest) resolveCall() *batching.ContractCall { + return f.contract.Call(methodResolve) +} + +// decodeClock decodes a uint128 into a Clock duration and timestamp. +func decodeClock(clock *big.Int) types.Clock { + maxUint64 := new(big.Int).Add(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(1)) + remainder := new(big.Int) + quotient, _ := new(big.Int).QuoRem(clock, maxUint64, remainder) + return types.NewClock(time.Duration(quotient.Int64())*time.Second, time.Unix(remainder.Int64(), 0)) +} + +// packClock packs the Clock duration and timestamp into a uint128. +func packClock(c types.Clock) *big.Int { + duration := big.NewInt(int64(c.Duration.Seconds())) + encoded := new(big.Int).Lsh(duration, 64) + return new(big.Int).Or(encoded, big.NewInt(c.Timestamp.Unix())) +} + +func (f *FaultDisputeGameContractLatest) decodeClaim(result *batching.CallResult, contractIndex int) types.Claim { + parentIndex := result.GetUint32(0) + counteredBy := result.GetAddress(1) + claimant := result.GetAddress(2) + bond := result.GetBigInt(3) + claim := result.GetHash(4) + position := result.GetBigInt(5) + clock := result.GetBigInt(6) + return types.Claim{ + ClaimData: types.ClaimData{ + Value: claim, + Position: types.NewPositionFromGIndex(position), + Bond: bond, + }, + CounteredBy: counteredBy, + Claimant: claimant, + Clock: decodeClock(clock), + ContractIndex: contractIndex, + ParentContractIndex: int(parentIndex), + } +} + +type FaultDisputeGameContract interface { + GetBalance(ctx context.Context, block rpcblock.Block) (*big.Int, common.Address, error) + GetBlockRange(ctx context.Context) (prestateBlock uint64, poststateBlock uint64, retErr error) + GetGameMetadata(ctx context.Context, block rpcblock.Block) (GameMetadata, error) + GetStartingRootHash(ctx context.Context) (common.Hash, error) + GetSplitDepth(ctx context.Context) (types.Depth, error) + GetCredit(ctx context.Context, recipient common.Address) (*big.Int, gameTypes.GameStatus, error) + GetRequiredBonds(ctx context.Context, block rpcblock.Block, positions ...*big.Int) ([]*big.Int, error) + GetCredits(ctx context.Context, block rpcblock.Block, recipients ...common.Address) ([]*big.Int, error) + ClaimCreditTx(ctx context.Context, recipient common.Address) (txmgr.TxCandidate, error) + GetRequiredBond(ctx context.Context, position types.Position) (*big.Int, error) + UpdateOracleTx(ctx context.Context, claimIdx uint64, data *types.PreimageOracleData) (txmgr.TxCandidate, error) + GetWithdrawals(ctx context.Context, block rpcblock.Block, gameAddr common.Address, recipients ...common.Address) ([]*WithdrawalRequest, error) + GetOracle(ctx context.Context) (*PreimageOracleContract, error) + GetMaxClockDuration(ctx context.Context) (time.Duration, error) + GetMaxGameDepth(ctx context.Context) (types.Depth, error) + GetAbsolutePrestateHash(ctx context.Context) (common.Hash, error) + GetL1Head(ctx context.Context) (common.Hash, error) + GetStatus(ctx context.Context) (gameTypes.GameStatus, error) + GetClaimCount(ctx context.Context) (uint64, error) + GetClaim(ctx context.Context, idx uint64) (types.Claim, error) + GetAllClaims(ctx context.Context, block rpcblock.Block) ([]types.Claim, error) + IsResolved(ctx context.Context, block rpcblock.Block, claims ...types.Claim) ([]bool, error) + IsL2BlockNumberChallenged(ctx context.Context, block rpcblock.Block) (bool, error) + ChallengeL2BlockNumberTx(challenge *types.InvalidL2BlockNumberChallenge) (txmgr.TxCandidate, error) + AttackTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) + DefendTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) + StepTx(claimIdx uint64, isAttack bool, stateData []byte, proof []byte) (txmgr.TxCandidate, error) + CallResolveClaim(ctx context.Context, claimIdx uint64) error + ResolveClaimTx(claimIdx uint64) (txmgr.TxCandidate, error) + CallResolve(ctx context.Context) (gameTypes.GameStatus, error) + ResolveTx() (txmgr.TxCandidate, error) +} diff --git a/op-challenger2/game/fault/contracts/faultdisputegame0180.go b/op-challenger2/game/fault/contracts/faultdisputegame0180.go new file mode 100644 index 000000000000..10bf420fe23b --- /dev/null +++ b/op-challenger2/game/fault/contracts/faultdisputegame0180.go @@ -0,0 +1,73 @@ +package contracts + +import ( + "context" + _ "embed" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" +) + +//go:embed abis/FaultDisputeGame-0.18.1.json +var faultDisputeGameAbi0180 []byte + +type FaultDisputeGameContract0180 struct { + FaultDisputeGameContractLatest +} + +// GetGameMetadata returns the game's L1 head, L2 block number, root claim, status, and max clock duration. +func (f *FaultDisputeGameContract0180) GetGameMetadata(ctx context.Context, block rpcblock.Block) (GameMetadata, error) { + defer f.metrics.StartContractRequest("GetGameMetadata")() + results, err := f.multiCaller.Call(ctx, block, + f.contract.Call(methodL1Head), + f.contract.Call(methodL2BlockNumber), + f.contract.Call(methodRootClaim), + f.contract.Call(methodStatus), + f.contract.Call(methodMaxClockDuration), + ) + if err != nil { + return GameMetadata{}, fmt.Errorf("failed to retrieve game metadata: %w", err) + } + if len(results) != 5 { + return GameMetadata{}, fmt.Errorf("expected 5 results but got %v", len(results)) + } + l1Head := results[0].GetHash(0) + l2BlockNumber := results[1].GetBigInt(0).Uint64() + rootClaim := results[2].GetHash(0) + status, err := gameTypes.GameStatusFromUint8(results[3].GetUint8(0)) + if err != nil { + return GameMetadata{}, fmt.Errorf("failed to convert game status: %w", err) + } + duration := results[4].GetUint64(0) + return GameMetadata{ + L1Head: l1Head, + L2BlockNum: l2BlockNumber, + RootClaim: rootClaim, + Status: status, + MaxClockDuration: duration, + L2BlockNumberChallenged: false, + }, nil +} + +func (f *FaultDisputeGameContract0180) IsL2BlockNumberChallenged(_ context.Context, _ rpcblock.Block) (bool, error) { + return false, nil +} + +func (f *FaultDisputeGameContract0180) ChallengeL2BlockNumberTx(_ *types.InvalidL2BlockNumberChallenge) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, ErrChallengeL2BlockNotSupported +} + +func (f *FaultDisputeGameContract0180) AttackTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodAttack, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Attack(), call) +} + +func (f *FaultDisputeGameContract0180) DefendTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodDefend, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Defend(), call) +} diff --git a/op-challenger2/game/fault/contracts/faultdisputegame080.go b/op-challenger2/game/fault/contracts/faultdisputegame080.go new file mode 100644 index 000000000000..81326597601c --- /dev/null +++ b/op-challenger2/game/fault/contracts/faultdisputegame080.go @@ -0,0 +1,159 @@ +package contracts + +import ( + "context" + _ "embed" + "fmt" + "math/big" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" +) + +//go:embed abis/FaultDisputeGame-0.8.0.json +var faultDisputeGameAbi020 []byte + +var resolvedBondAmount = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 128), big.NewInt(1)) + +var ( + methodGameDuration = "gameDuration" +) + +type FaultDisputeGameContract080 struct { + FaultDisputeGameContractLatest +} + +// GetGameMetadata returns the game's L1 head, L2 block number, root claim, status, and max clock duration. +func (f *FaultDisputeGameContract080) GetGameMetadata(ctx context.Context, block rpcblock.Block) (GameMetadata, error) { + defer f.metrics.StartContractRequest("GetGameMetadata")() + results, err := f.multiCaller.Call(ctx, block, + f.contract.Call(methodL1Head), + f.contract.Call(methodL2BlockNumber), + f.contract.Call(methodRootClaim), + f.contract.Call(methodStatus), + f.contract.Call(methodGameDuration)) + if err != nil { + return GameMetadata{}, fmt.Errorf("failed to retrieve game metadata: %w", err) + } + if len(results) != 5 { + return GameMetadata{}, fmt.Errorf("expected 5 results but got %v", len(results)) + } + l1Head := results[0].GetHash(0) + l2BlockNumber := results[1].GetBigInt(0).Uint64() + rootClaim := results[2].GetHash(0) + status, err := gameTypes.GameStatusFromUint8(results[3].GetUint8(0)) + if err != nil { + return GameMetadata{}, fmt.Errorf("failed to convert game status: %w", err) + } + duration := results[4].GetUint64(0) + return GameMetadata{ + L1Head: l1Head, + L2BlockNum: l2BlockNumber, + RootClaim: rootClaim, + Status: status, + MaxClockDuration: duration / 2, + L2BlockNumberChallenged: false, + }, nil +} + +func (f *FaultDisputeGameContract080) GetMaxClockDuration(ctx context.Context) (time.Duration, error) { + defer f.metrics.StartContractRequest("GetMaxClockDuration")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodGameDuration)) + if err != nil { + return 0, fmt.Errorf("failed to fetch game duration: %w", err) + } + return time.Duration(result.GetUint64(0)) * time.Second / 2, nil +} + +func (f *FaultDisputeGameContract080) GetClaim(ctx context.Context, idx uint64) (types.Claim, error) { + claim, err := f.FaultDisputeGameContractLatest.GetClaim(ctx, idx) + if err != nil { + return types.Claim{}, err + } + // Replace the resolved sentinel with what the bond would have been + if claim.Bond.Cmp(resolvedBondAmount) == 0 { + bond, err := f.GetRequiredBond(ctx, claim.Position) + if err != nil { + return types.Claim{}, err + } + claim.Bond = bond + } + return claim, nil +} + +func (f *FaultDisputeGameContract080) GetAllClaims(ctx context.Context, block rpcblock.Block) ([]types.Claim, error) { + claims, err := f.FaultDisputeGameContractLatest.GetAllClaims(ctx, block) + if err != nil { + return nil, err + } + resolvedClaims := make([]*types.Claim, 0, len(claims)) + positions := make([]*big.Int, 0, len(claims)) + for i, claim := range claims { + if claim.Bond.Cmp(resolvedBondAmount) == 0 { + resolvedClaims = append(resolvedClaims, &claims[i]) + positions = append(positions, claim.Position.ToGIndex()) + } + } + bonds, err := f.GetRequiredBonds(ctx, block, positions...) + if err != nil { + return nil, fmt.Errorf("failed to get required bonds for resolved claims: %w", err) + } + for i, bond := range bonds { + resolvedClaims[i].Bond = bond + } + return claims, nil +} + +func (f *FaultDisputeGameContract080) IsResolved(ctx context.Context, block rpcblock.Block, claims ...types.Claim) ([]bool, error) { + rawClaims, err := f.FaultDisputeGameContractLatest.GetAllClaims(ctx, block) + if err != nil { + return nil, fmt.Errorf("failed to get raw claim data: %w", err) + } + results := make([]bool, len(claims)) + for i, claim := range claims { + results[i] = rawClaims[claim.ContractIndex].Bond.Cmp(resolvedBondAmount) == 0 + } + return results, nil +} + +func (f *FaultDisputeGameContract080) CallResolveClaim(ctx context.Context, claimIdx uint64) error { + defer f.metrics.StartContractRequest("CallResolveClaim")() + call := f.resolveClaimCall(claimIdx) + _, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, call) + if err != nil { + return fmt.Errorf("failed to call resolve claim: %w", err) + } + return nil +} + +func (f *FaultDisputeGameContract080) ResolveClaimTx(claimIdx uint64) (txmgr.TxCandidate, error) { + call := f.resolveClaimCall(claimIdx) + return call.ToTxCandidate() +} + +func (f *FaultDisputeGameContract080) resolveClaimCall(claimIdx uint64) *batching.ContractCall { + return f.contract.Call(methodResolveClaim, new(big.Int).SetUint64(claimIdx)) +} + +func (f *FaultDisputeGameContract080) IsL2BlockNumberChallenged(_ context.Context, _ rpcblock.Block) (bool, error) { + return false, nil +} + +func (f *FaultDisputeGameContract080) ChallengeL2BlockNumberTx(_ *types.InvalidL2BlockNumberChallenge) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, ErrChallengeL2BlockNotSupported +} + +func (f *FaultDisputeGameContract080) AttackTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodAttack, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Attack(), call) +} + +func (f *FaultDisputeGameContract080) DefendTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodDefend, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Defend(), call) +} diff --git a/op-challenger2/game/fault/contracts/faultdisputegame111.go b/op-challenger2/game/fault/contracts/faultdisputegame111.go new file mode 100644 index 000000000000..a5c542011b37 --- /dev/null +++ b/op-challenger2/game/fault/contracts/faultdisputegame111.go @@ -0,0 +1,28 @@ +package contracts + +import ( + "context" + _ "embed" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" +) + +//go:embed abis/FaultDisputeGame-1.1.1.json +var faultDisputeGameAbi111 []byte + +type FaultDisputeGameContract111 struct { + FaultDisputeGameContractLatest +} + +func (f *FaultDisputeGameContract111) AttackTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodAttack, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Attack(), call) +} + +func (f *FaultDisputeGameContract111) DefendTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) { + call := f.contract.Call(methodDefend, big.NewInt(int64(parent.ContractIndex)), pivot) + return f.txWithBond(ctx, parent.Position.Defend(), call) +} diff --git a/op-challenger2/game/fault/contracts/faultdisputegame_test.go b/op-challenger2/game/fault/contracts/faultdisputegame_test.go new file mode 100644 index 000000000000..c8922045ae72 --- /dev/null +++ b/op-challenger2/game/fault/contracts/faultdisputegame_test.go @@ -0,0 +1,789 @@ +package contracts + +import ( + "context" + "errors" + "fmt" + "math" + "math/big" + "math/rand" + "slices" + "testing" + "time" + + contractMetrics "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + faultTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + batchingTest "github.com/ethereum-optimism/optimism/op-service/sources/batching/test" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" +) + +var ( + fdgAddr = common.HexToAddress("0x24112842371dFC380576ebb09Ae16Cb6B6caD7CB") + vmAddr = common.HexToAddress("0x33332842371dFC380576ebb09Ae16Cb6B6c3333") + oracleAddr = common.HexToAddress("0x44442842371dFC380576ebb09Ae16Cb6B6ca4444") +) + +type contractVersion struct { + version string + loadAbi func() *abi.ABI +} + +func (c contractVersion) Is(versions ...string) bool { + return slices.Contains(versions, c.version) +} + +const ( + vers080 = "0.8.0" + vers0180 = "0.18.0" + vers111 = "1.1.1" + versLatest = "1.2.0" +) + +var versions = []contractVersion{ + { + version: vers080, + loadAbi: func() *abi.ABI { + return mustParseAbi(faultDisputeGameAbi020) + }, + }, + { + version: vers0180, + loadAbi: func() *abi.ABI { + return mustParseAbi(faultDisputeGameAbi0180) + }, + }, + { + version: vers111, + loadAbi: func() *abi.ABI { + return mustParseAbi(faultDisputeGameAbi111) + }, + }, + { + version: versLatest, + loadAbi: snapshots.LoadFaultDisputeGameABI, + }, +} + +func TestSimpleGetters(t *testing.T) { + tests := []struct { + methodAlias string + method string + args []interface{} + result interface{} + expected interface{} // Defaults to expecting the same as result + call func(game FaultDisputeGameContract) (any, error) + applies func(version contractVersion) bool + }{ + { + methodAlias: "status", + method: methodStatus, + result: types.GameStatusChallengerWon, + call: func(game FaultDisputeGameContract) (any, error) { + return game.GetStatus(context.Background()) + }, + }, + { + methodAlias: "maxClockDuration", + method: methodMaxClockDuration, + result: uint64(5566), + expected: 5566 * time.Second, + call: func(game FaultDisputeGameContract) (any, error) { + return game.GetMaxClockDuration(context.Background()) + }, + applies: func(version contractVersion) bool { + return version.version != vers080 + }, + }, + { + methodAlias: "gameDuration", + method: methodGameDuration, + result: uint64(5566) * 2, + expected: 5566 * time.Second, + call: func(game FaultDisputeGameContract) (any, error) { + return game.GetMaxClockDuration(context.Background()) + }, + applies: func(version contractVersion) bool { + return version.version == vers080 + }, + }, + { + methodAlias: "maxGameDepth", + method: methodMaxGameDepth, + result: big.NewInt(128), + expected: faultTypes.Depth(128), + call: func(game FaultDisputeGameContract) (any, error) { + return game.GetMaxGameDepth(context.Background()) + }, + }, + { + methodAlias: "absolutePrestate", + method: methodAbsolutePrestate, + result: common.Hash{0xab}, + call: func(game FaultDisputeGameContract) (any, error) { + return game.GetAbsolutePrestateHash(context.Background()) + }, + }, + { + methodAlias: "claimCount", + method: methodClaimCount, + result: big.NewInt(9876), + expected: uint64(9876), + call: func(game FaultDisputeGameContract) (any, error) { + return game.GetClaimCount(context.Background()) + }, + }, + { + methodAlias: "l1Head", + method: methodL1Head, + result: common.Hash{0xdd, 0xbb}, + call: func(game FaultDisputeGameContract) (any, error) { + return game.GetL1Head(context.Background()) + }, + }, + { + methodAlias: "resolve", + method: methodResolve, + result: types.GameStatusInProgress, + call: func(game FaultDisputeGameContract) (any, error) { + return game.CallResolve(context.Background()) + }, + }, + } + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + for _, test := range tests { + test := test + t.Run(test.methodAlias, func(t *testing.T) { + if test.applies != nil && !test.applies(version) { + t.Skip("Skipping for this version") + } + stubRpc, game := setupFaultDisputeGameTest(t, version) + stubRpc.SetResponse(fdgAddr, test.method, rpcblock.Latest, nil, []interface{}{test.result}) + status, err := test.call(game) + require.NoError(t, err) + expected := test.expected + if expected == nil { + expected = test.result + } + require.Equal(t, expected, status) + }) + } + }) + } +} + +func TestClock_EncodingDecoding(t *testing.T) { + t.Run("DurationAndTimestamp", func(t *testing.T) { + by := common.FromHex("00000000000000050000000000000002") + encoded := new(big.Int).SetBytes(by) + clock := decodeClock(encoded) + require.Equal(t, 5*time.Second, clock.Duration) + require.Equal(t, time.Unix(2, 0), clock.Timestamp) + require.Equal(t, encoded, packClock(clock)) + }) + + t.Run("ZeroDuration", func(t *testing.T) { + by := common.FromHex("00000000000000000000000000000002") + encoded := new(big.Int).SetBytes(by) + clock := decodeClock(encoded) + require.Equal(t, 0*time.Second, clock.Duration) + require.Equal(t, time.Unix(2, 0), clock.Timestamp) + require.Equal(t, encoded, packClock(clock)) + }) + + t.Run("ZeroTimestamp", func(t *testing.T) { + by := common.FromHex("00000000000000050000000000000000") + encoded := new(big.Int).SetBytes(by) + clock := decodeClock(encoded) + require.Equal(t, 5*time.Second, clock.Duration) + require.Equal(t, time.Unix(0, 0), clock.Timestamp) + require.Equal(t, encoded, packClock(clock)) + }) + + t.Run("ZeroClock", func(t *testing.T) { + by := common.FromHex("00000000000000000000000000000000") + encoded := new(big.Int).SetBytes(by) + clock := decodeClock(encoded) + require.Equal(t, 0*time.Second, clock.Duration) + require.Equal(t, time.Unix(0, 0), clock.Timestamp) + require.Equal(t, encoded.Uint64(), packClock(clock).Uint64()) + }) +} + +func TestGetOracleAddr(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + stubRpc.SetResponse(fdgAddr, methodVM, rpcblock.Latest, nil, []interface{}{vmAddr}) + stubRpc.SetResponse(vmAddr, methodOracle, rpcblock.Latest, nil, []interface{}{oracleAddr}) + + actual, err := game.GetOracle(context.Background()) + require.NoError(t, err) + require.Equal(t, oracleAddr, actual.Addr()) + }) + } +} + +func TestGetClaim(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + idx := big.NewInt(2) + parentIndex := uint32(1) + counteredBy := common.Address{0x01} + claimant := common.Address{0x02} + bond := big.NewInt(5) + value := common.Hash{0xab} + position := big.NewInt(2) + clock := big.NewInt(1234) + stubRpc.SetResponse(fdgAddr, methodClaim, rpcblock.Latest, []interface{}{idx}, []interface{}{parentIndex, counteredBy, claimant, bond, value, position, clock}) + status, err := game.GetClaim(context.Background(), idx.Uint64()) + require.NoError(t, err) + require.Equal(t, faultTypes.Claim{ + ClaimData: faultTypes.ClaimData{ + Value: value, + Position: faultTypes.NewPositionFromGIndex(position), + Bond: bond, + }, + CounteredBy: counteredBy, + Claimant: claimant, + Clock: decodeClock(big.NewInt(1234)), + ContractIndex: int(idx.Uint64()), + ParentContractIndex: 1, + }, status) + }) + } +} + +func TestGetAllClaims(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + claim0 := faultTypes.Claim{ + ClaimData: faultTypes.ClaimData{ + Value: common.Hash{0xaa}, + Position: faultTypes.NewPositionFromGIndex(big.NewInt(1)), + Bond: big.NewInt(5), + }, + CounteredBy: common.Address{0x01}, + Claimant: common.Address{0x02}, + Clock: decodeClock(big.NewInt(1234)), + ContractIndex: 0, + ParentContractIndex: math.MaxUint32, + } + claim1 := faultTypes.Claim{ + ClaimData: faultTypes.ClaimData{ + Value: common.Hash{0xab}, + Position: faultTypes.NewPositionFromGIndex(big.NewInt(2)), + Bond: big.NewInt(5), + }, + CounteredBy: common.Address{0x02}, + Claimant: common.Address{0x01}, + Clock: decodeClock(big.NewInt(4455)), + ContractIndex: 1, + ParentContractIndex: 0, + } + claim2 := faultTypes.Claim{ + ClaimData: faultTypes.ClaimData{ + Value: common.Hash{0xbb}, + Position: faultTypes.NewPositionFromGIndex(big.NewInt(6)), + Bond: big.NewInt(5), + }, + Claimant: common.Address{0x02}, + Clock: decodeClock(big.NewInt(7777)), + ContractIndex: 2, + ParentContractIndex: 1, + } + expectedClaims := []faultTypes.Claim{claim0, claim1, claim2} + block := rpcblock.ByNumber(42) + stubRpc.SetResponse(fdgAddr, methodClaimCount, block, nil, []interface{}{big.NewInt(int64(len(expectedClaims)))}) + for _, claim := range expectedClaims { + expectGetClaim(stubRpc, block, claim) + } + claims, err := game.GetAllClaims(context.Background(), block) + require.NoError(t, err) + require.Equal(t, expectedClaims, claims) + }) + } +} + +func TestGetBalance(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + wethAddr := common.Address{0x11, 0x55, 0x66} + balance := big.NewInt(9995877) + block := rpcblock.ByNumber(424) + stubRpc, game := setupFaultDisputeGameTest(t, version) + stubRpc.SetResponse(fdgAddr, methodWETH, block, nil, []interface{}{wethAddr}) + stubRpc.AddExpectedCall(batchingTest.NewGetBalanceCall(wethAddr, block, balance)) + + actualBalance, actualAddr, err := game.GetBalance(context.Background(), block) + require.NoError(t, err) + require.Equal(t, wethAddr, actualAddr) + require.Truef(t, balance.Cmp(actualBalance) == 0, "Expected balance %v but was %v", balance, actualBalance) + }) + } +} + +func TestCallResolveClaim(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + if version.version == vers080 { + stubRpc.SetResponse(fdgAddr, methodResolveClaim, rpcblock.Latest, []interface{}{big.NewInt(123)}, nil) + } else { + stubRpc.SetResponse(fdgAddr, methodResolveClaim, rpcblock.Latest, []interface{}{big.NewInt(123), maxChildChecks}, nil) + } + err := game.CallResolveClaim(context.Background(), 123) + require.NoError(t, err) + }) + } +} + +func TestResolveClaimTxTest(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + if version.version == vers080 { + stubRpc.SetResponse(fdgAddr, methodResolveClaim, rpcblock.Latest, []interface{}{big.NewInt(123)}, nil) + } else { + stubRpc.SetResponse(fdgAddr, methodResolveClaim, rpcblock.Latest, []interface{}{big.NewInt(123), maxChildChecks}, nil) + } + tx, err := game.ResolveClaimTx(123) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + } +} + +func TestResolveTx(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + stubRpc.SetResponse(fdgAddr, methodResolve, rpcblock.Latest, nil, nil) + tx, err := game.ResolveTx() + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + } +} + +func TestAttackTx(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + bond := big.NewInt(1044) + value := common.Hash{0xaa} + parent := faultTypes.Claim{ClaimData: faultTypes.ClaimData{Value: common.Hash{0xbb}}, ContractIndex: 111} + stubRpc.SetResponse(fdgAddr, methodRequiredBond, rpcblock.Latest, []interface{}{parent.Position.Attack().ToGIndex()}, []interface{}{bond}) + if version.Is(vers080, vers0180, vers111) { + stubRpc.SetResponse(fdgAddr, methodAttack, rpcblock.Latest, []interface{}{big.NewInt(111), value}, nil) + } else { + stubRpc.SetResponse(fdgAddr, methodAttack, rpcblock.Latest, []interface{}{parent.Value, big.NewInt(111), value}, nil) + } + tx, err := game.AttackTx(context.Background(), parent, value) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + require.Equal(t, bond, tx.Value) + }) + } +} + +func TestDefendTx(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + bond := big.NewInt(1044) + value := common.Hash{0xaa} + parent := faultTypes.Claim{ClaimData: faultTypes.ClaimData{Value: common.Hash{0xbb}}, ContractIndex: 111} + stubRpc.SetResponse(fdgAddr, methodRequiredBond, rpcblock.Latest, []interface{}{parent.Position.Defend().ToGIndex()}, []interface{}{bond}) + if version.Is(vers080, vers0180, vers111) { + stubRpc.SetResponse(fdgAddr, methodDefend, rpcblock.Latest, []interface{}{big.NewInt(111), value}, nil) + } else { + stubRpc.SetResponse(fdgAddr, methodDefend, rpcblock.Latest, []interface{}{parent.Value, big.NewInt(111), value}, nil) + } + tx, err := game.DefendTx(context.Background(), parent, value) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + require.Equal(t, bond, tx.Value) + }) + } +} + +func TestStepTx(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + stateData := []byte{1, 2, 3} + proofData := []byte{4, 5, 6, 7, 8, 9} + stubRpc.SetResponse(fdgAddr, methodStep, rpcblock.Latest, []interface{}{big.NewInt(111), true, stateData, proofData}, nil) + tx, err := game.StepTx(111, true, stateData, proofData) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + } +} + +func expectGetClaim(stubRpc *batchingTest.AbiBasedRpc, block rpcblock.Block, claim faultTypes.Claim) { + stubRpc.SetResponse( + fdgAddr, + methodClaim, + block, + []interface{}{big.NewInt(int64(claim.ContractIndex))}, + []interface{}{ + uint32(claim.ParentContractIndex), + claim.CounteredBy, + claim.Claimant, + claim.Bond, + claim.Value, + claim.Position.ToGIndex(), + packClock(claim.Clock), + }) +} + +func TestGetBlockRange(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, contract := setupFaultDisputeGameTest(t, version) + expectedStart := uint64(65) + expectedEnd := uint64(102) + stubRpc.SetResponse(fdgAddr, methodStartingBlockNumber, rpcblock.Latest, nil, []interface{}{new(big.Int).SetUint64(expectedStart)}) + stubRpc.SetResponse(fdgAddr, methodL2BlockNumber, rpcblock.Latest, nil, []interface{}{new(big.Int).SetUint64(expectedEnd)}) + start, end, err := contract.GetBlockRange(context.Background()) + require.NoError(t, err) + require.Equal(t, expectedStart, start) + require.Equal(t, expectedEnd, end) + }) + } +} + +func TestGetSplitDepth(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, contract := setupFaultDisputeGameTest(t, version) + expectedSplitDepth := faultTypes.Depth(15) + stubRpc.SetResponse(fdgAddr, methodSplitDepth, rpcblock.Latest, nil, []interface{}{new(big.Int).SetUint64(uint64(expectedSplitDepth))}) + splitDepth, err := contract.GetSplitDepth(context.Background()) + require.NoError(t, err) + require.Equal(t, expectedSplitDepth, splitDepth) + }) + } +} + +func TestGetGameMetadata(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, contract := setupFaultDisputeGameTest(t, version) + expectedL1Head := common.Hash{0x0a, 0x0b} + expectedL2BlockNumber := uint64(123) + expectedMaxClockDuration := uint64(456) + expectedRootClaim := common.Hash{0x01, 0x02} + expectedStatus := types.GameStatusChallengerWon + expectedL2BlockNumberChallenged := true + expectedL2BlockNumberChallenger := common.Address{0xee} + block := rpcblock.ByNumber(889) + stubRpc.SetResponse(fdgAddr, methodL1Head, block, nil, []interface{}{expectedL1Head}) + stubRpc.SetResponse(fdgAddr, methodL2BlockNumber, block, nil, []interface{}{new(big.Int).SetUint64(expectedL2BlockNumber)}) + stubRpc.SetResponse(fdgAddr, methodRootClaim, block, nil, []interface{}{expectedRootClaim}) + stubRpc.SetResponse(fdgAddr, methodStatus, block, nil, []interface{}{expectedStatus}) + if version.version == vers080 { + expectedL2BlockNumberChallenged = false + expectedL2BlockNumberChallenger = common.Address{} + stubRpc.SetResponse(fdgAddr, methodGameDuration, block, nil, []interface{}{expectedMaxClockDuration * 2}) + } else if version.version == vers0180 { + expectedL2BlockNumberChallenged = false + expectedL2BlockNumberChallenger = common.Address{} + stubRpc.SetResponse(fdgAddr, methodMaxClockDuration, block, nil, []interface{}{expectedMaxClockDuration}) + } else { + stubRpc.SetResponse(fdgAddr, methodMaxClockDuration, block, nil, []interface{}{expectedMaxClockDuration}) + stubRpc.SetResponse(fdgAddr, methodL2BlockNumberChallenged, block, nil, []interface{}{expectedL2BlockNumberChallenged}) + stubRpc.SetResponse(fdgAddr, methodL2BlockNumberChallenger, block, nil, []interface{}{expectedL2BlockNumberChallenger}) + } + actual, err := contract.GetGameMetadata(context.Background(), block) + expected := GameMetadata{ + L1Head: expectedL1Head, + L2BlockNum: expectedL2BlockNumber, + RootClaim: expectedRootClaim, + Status: expectedStatus, + MaxClockDuration: expectedMaxClockDuration, + L2BlockNumberChallenged: expectedL2BlockNumberChallenged, + L2BlockNumberChallenger: expectedL2BlockNumberChallenger, + } + require.NoError(t, err) + require.Equal(t, expected, actual) + }) + } +} + +func TestGetStartingRootHash(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, contract := setupFaultDisputeGameTest(t, version) + expectedOutputRoot := common.HexToHash("0x1234") + stubRpc.SetResponse(fdgAddr, methodStartingRootHash, rpcblock.Latest, nil, []interface{}{expectedOutputRoot}) + startingOutputRoot, err := contract.GetStartingRootHash(context.Background()) + require.NoError(t, err) + require.Equal(t, expectedOutputRoot, startingOutputRoot) + }) + } +} + +func TestFaultDisputeGame_UpdateOracleTx(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + t.Run("Local", func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + data := faultTypes.NewPreimageOracleData(common.Hash{0x01, 0xbc}.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7}, 16) + claimIdx := uint64(6) + stubRpc.SetResponse(fdgAddr, methodAddLocalData, rpcblock.Latest, []interface{}{ + data.GetIdent(), + new(big.Int).SetUint64(claimIdx), + new(big.Int).SetUint64(uint64(data.OracleOffset)), + }, nil) + tx, err := game.UpdateOracleTx(context.Background(), claimIdx, data) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + + t.Run("Global", func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + data := faultTypes.NewPreimageOracleData(common.Hash{0x02, 0xbc}.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15}, 16) + claimIdx := uint64(6) + stubRpc.SetResponse(fdgAddr, methodVM, rpcblock.Latest, nil, []interface{}{vmAddr}) + stubRpc.SetResponse(vmAddr, methodOracle, rpcblock.Latest, nil, []interface{}{oracleAddr}) + stubRpc.SetResponse(oracleAddr, methodLoadKeccak256PreimagePart, rpcblock.Latest, []interface{}{ + new(big.Int).SetUint64(uint64(data.OracleOffset)), + data.GetPreimageWithoutSize(), + }, nil) + tx, err := game.UpdateOracleTx(context.Background(), claimIdx, data) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + }) + } +} + +func TestFaultDisputeGame_GetCredit(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + addr := common.Address{0x01} + expectedCredit := big.NewInt(4284) + expectedStatus := types.GameStatusChallengerWon + stubRpc.SetResponse(fdgAddr, methodCredit, rpcblock.Latest, []interface{}{addr}, []interface{}{expectedCredit}) + stubRpc.SetResponse(fdgAddr, methodStatus, rpcblock.Latest, nil, []interface{}{expectedStatus}) + + actualCredit, actualStatus, err := game.GetCredit(context.Background(), addr) + require.NoError(t, err) + require.Equal(t, expectedCredit, actualCredit) + require.Equal(t, expectedStatus, actualStatus) + }) + } +} + +func TestFaultDisputeGame_GetCredits(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + + block := rpcblock.ByNumber(482) + + addrs := []common.Address{{0x01}, {0x02}, {0x03}} + expected := []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(0)} + + for i, addr := range addrs { + stubRpc.SetResponse(fdgAddr, methodCredit, block, []interface{}{addr}, []interface{}{expected[i]}) + } + + actual, err := game.GetCredits(context.Background(), block, addrs...) + require.NoError(t, err) + require.Equal(t, len(expected), len(actual)) + for i := range expected { + require.Zerof(t, expected[i].Cmp(actual[i]), "expected: %v actual: %v", expected[i], actual[i]) + } + }) + } +} + +func TestFaultDisputeGame_ClaimCreditTx(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + t.Run("Success", func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + addr := common.Address{0xaa} + + stubRpc.SetResponse(fdgAddr, methodClaimCredit, rpcblock.Latest, []interface{}{addr}, nil) + tx, err := game.ClaimCreditTx(context.Background(), addr) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + + t.Run("SimulationFails", func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + addr := common.Address{0xaa} + + stubRpc.SetError(fdgAddr, methodClaimCredit, rpcblock.Latest, []interface{}{addr}, errors.New("still locked")) + tx, err := game.ClaimCreditTx(context.Background(), addr) + require.ErrorIs(t, err, ErrSimulationFailed) + require.Equal(t, txmgr.TxCandidate{}, tx) + }) + }) + } +} + +func TestFaultDisputeGame_IsResolved(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + stubRpc, game := setupFaultDisputeGameTest(t, version) + + block := rpcblock.ByNumber(482) + + claims := []faultTypes.Claim{ + {ContractIndex: 1}, + {ContractIndex: 5}, + {ContractIndex: 13}, + } + claimIdxs := []*big.Int{big.NewInt(1), big.NewInt(5), big.NewInt(13)} + expected := []bool{false, true, true} + + if version.version == vers080 { + claimCount := 14 + stubRpc.SetResponse(fdgAddr, methodClaimCount, block, nil, []interface{}{big.NewInt(int64(claimCount))}) + for idx := 0; idx < claimCount; idx++ { + bond := big.NewInt(42) + if idx == 5 || idx == 13 { // The two claims expected to be resolved + bond = resolvedBondAmount + } + expectGetClaim(stubRpc, block, faultTypes.Claim{ + ContractIndex: idx, + ClaimData: faultTypes.ClaimData{ + Bond: bond, + }, + }) + } + } else { + for i, idx := range claimIdxs { + stubRpc.SetResponse(fdgAddr, methodResolvedSubgames, block, []interface{}{idx}, []interface{}{expected[i]}) + } + } + + actual, err := game.IsResolved(context.Background(), block, claims...) + require.NoError(t, err) + require.Equal(t, len(expected), len(actual)) + for i := range expected { + require.Equal(t, expected[i], actual[i]) + } + }) + } +} + +func TestFaultDisputeGameContractLatest_IsL2BlockNumberChallenged(t *testing.T) { + for _, version := range versions { + version := version + for _, expected := range []bool{true, false} { + expected := expected + t.Run(fmt.Sprintf("%v-%v", version.version, expected), func(t *testing.T) { + block := rpcblock.ByHash(common.Hash{0x43}) + stubRpc, game := setupFaultDisputeGameTest(t, version) + supportsL2BlockNumChallenge := version.version != vers080 && version.version != vers0180 + if supportsL2BlockNumChallenge { + stubRpc.SetResponse(fdgAddr, methodL2BlockNumberChallenged, block, nil, []interface{}{expected}) + } else if expected { + t.Skip("Can't have challenged L2 block number on this contract version") + } + challenged, err := game.IsL2BlockNumberChallenged(context.Background(), block) + require.NoError(t, err) + require.Equal(t, expected, challenged) + }) + } + } +} + +func TestFaultDisputeGameContractLatest_ChallengeL2BlockNumberTx(t *testing.T) { + for _, version := range versions { + version := version + t.Run(version.version, func(t *testing.T) { + rng := rand.New(rand.NewSource(0)) + stubRpc, game := setupFaultDisputeGameTest(t, version) + challenge := &faultTypes.InvalidL2BlockNumberChallenge{ + Output: ð.OutputResponse{ + Version: eth.Bytes32{}, + OutputRoot: eth.Bytes32{0xaa}, + BlockRef: eth.L2BlockRef{Hash: common.Hash{0xbb}}, + WithdrawalStorageRoot: common.Hash{0xcc}, + StateRoot: common.Hash{0xdd}, + }, + Header: testutils.RandomHeader(rng), + } + supportsL2BlockNumChallenge := version.version != vers080 && version.version != vers0180 + if supportsL2BlockNumChallenge { + headerRlp, err := rlp.EncodeToBytes(challenge.Header) + require.NoError(t, err) + stubRpc.SetResponse(fdgAddr, methodChallengeRootL2Block, rpcblock.Latest, []interface{}{ + outputRootProof{ + Version: challenge.Output.Version, + StateRoot: challenge.Output.StateRoot, + MessagePasserStorageRoot: challenge.Output.WithdrawalStorageRoot, + LatestBlockhash: challenge.Output.BlockRef.Hash, + }, + headerRlp, + }, nil) + } + tx, err := game.ChallengeL2BlockNumberTx(challenge) + if supportsL2BlockNumChallenge { + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + } else { + require.ErrorIs(t, err, ErrChallengeL2BlockNotSupported) + require.Equal(t, txmgr.TxCandidate{}, tx) + } + }) + } +} + +func setupFaultDisputeGameTest(t *testing.T, version contractVersion) (*batchingTest.AbiBasedRpc, FaultDisputeGameContract) { + fdgAbi := version.loadAbi() + + vmAbi := snapshots.LoadMIPSABI() + oracleAbi := snapshots.LoadPreimageOracleABI() + + stubRpc := batchingTest.NewAbiBasedRpc(t, fdgAddr, fdgAbi) + stubRpc.AddContract(vmAddr, vmAbi) + stubRpc.AddContract(oracleAddr, oracleAbi) + caller := batching.NewMultiCaller(stubRpc, batching.DefaultBatchSize) + + stubRpc.SetResponse(fdgAddr, methodVersion, rpcblock.Latest, nil, []interface{}{version.version}) + game, err := NewFaultDisputeGameContract(context.Background(), contractMetrics.NoopContractMetrics, fdgAddr, caller) + require.NoError(t, err) + return stubRpc, game +} diff --git a/op-challenger2/game/fault/contracts/gamefactory.go b/op-challenger2/game/fault/contracts/gamefactory.go new file mode 100644 index 000000000000..27c458ebcd4f --- /dev/null +++ b/op-challenger2/game/fault/contracts/gamefactory.go @@ -0,0 +1,203 @@ +package contracts + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" +) + +const ( + methodGameCount = "gameCount" + methodGameAtIndex = "gameAtIndex" + methodGameImpls = "gameImpls" + methodInitBonds = "initBonds" + methodCreateGame = "create" + methodGames = "games" + + eventDisputeGameCreated = "DisputeGameCreated" +) + +var ( + ErrEventNotFound = errors.New("event not found") +) + +type DisputeGameFactoryContract struct { + metrics metrics.ContractMetricer + multiCaller *batching.MultiCaller + contract *batching.BoundContract + abi *abi.ABI +} + +func NewDisputeGameFactoryContract(m metrics.ContractMetricer, addr common.Address, caller *batching.MultiCaller) *DisputeGameFactoryContract { + factoryAbi := snapshots.LoadDisputeGameFactoryABI() + return &DisputeGameFactoryContract{ + metrics: m, + multiCaller: caller, + contract: batching.NewBoundContract(factoryAbi, addr), + abi: factoryAbi, + } +} + +func (f *DisputeGameFactoryContract) GetGameFromParameters(ctx context.Context, traceType uint32, outputRoot common.Hash, l2BlockNum uint64) (common.Address, error) { + defer f.metrics.StartContractRequest("GetGameFromParameters")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodGames, traceType, outputRoot, common.BigToHash(big.NewInt(int64(l2BlockNum))).Bytes())) + if err != nil { + return common.Address{}, fmt.Errorf("failed to fetch game from parameters: %w", err) + } + return result.GetAddress(0), nil +} + +func (f *DisputeGameFactoryContract) GetGameCount(ctx context.Context, blockHash common.Hash) (uint64, error) { + defer f.metrics.StartContractRequest("GetGameCount")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.ByHash(blockHash), f.contract.Call(methodGameCount)) + if err != nil { + return 0, fmt.Errorf("failed to load game count: %w", err) + } + return result.GetBigInt(0).Uint64(), nil +} + +func (f *DisputeGameFactoryContract) GetGame(ctx context.Context, idx uint64, blockHash common.Hash) (types.GameMetadata, error) { + defer f.metrics.StartContractRequest("GetGame")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.ByHash(blockHash), f.contract.Call(methodGameAtIndex, new(big.Int).SetUint64(idx))) + if err != nil { + return types.GameMetadata{}, fmt.Errorf("failed to load game %v: %w", idx, err) + } + return f.decodeGame(idx, result), nil +} + +func (f *DisputeGameFactoryContract) GetGameImpl(ctx context.Context, gameType uint32) (common.Address, error) { + defer f.metrics.StartContractRequest("GetGameImpl")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodGameImpls, gameType)) + if err != nil { + return common.Address{}, fmt.Errorf("failed to load game impl for type %v: %w", gameType, err) + } + return result.GetAddress(0), nil +} + +func (f *DisputeGameFactoryContract) GetGamesAtOrAfter(ctx context.Context, blockHash common.Hash, earliestTimestamp uint64) ([]types.GameMetadata, error) { + defer f.metrics.StartContractRequest("GetGamesAtOrAfter")() + count, err := f.GetGameCount(ctx, blockHash) + if err != nil { + return nil, err + } + batchSize := uint64(f.multiCaller.BatchSize()) + rangeEnd := count + + var games []types.GameMetadata + for { + if rangeEnd == uint64(0) { + // rangeEnd is exclusive so if its 0 we've reached the end. + return games, nil + } + rangeStart := uint64(0) + if rangeEnd > batchSize { + rangeStart = rangeEnd - batchSize + } + calls := make([]batching.Call, 0, rangeEnd-rangeStart) + for i := rangeEnd - 1; ; i-- { + calls = append(calls, f.contract.Call(methodGameAtIndex, new(big.Int).SetUint64(i))) + // Break once we've added the last call to avoid underflow when rangeStart == 0 + if i == rangeStart { + break + } + } + + results, err := f.multiCaller.Call(ctx, rpcblock.ByHash(blockHash), calls...) + if err != nil { + return nil, fmt.Errorf("failed to fetch games: %w", err) + } + + for i, result := range results { + idx := rangeEnd - uint64(i) - 1 + game := f.decodeGame(idx, result) + if game.Timestamp < earliestTimestamp { + return games, nil + } + games = append(games, game) + } + rangeEnd = rangeStart + } +} + +func (f *DisputeGameFactoryContract) GetAllGames(ctx context.Context, blockHash common.Hash) ([]types.GameMetadata, error) { + defer f.metrics.StartContractRequest("GetAllGames")() + count, err := f.GetGameCount(ctx, blockHash) + if err != nil { + return nil, err + } + + calls := make([]batching.Call, count) + for i := uint64(0); i < count; i++ { + calls[i] = f.contract.Call(methodGameAtIndex, new(big.Int).SetUint64(i)) + } + + results, err := f.multiCaller.Call(ctx, rpcblock.ByHash(blockHash), calls...) + if err != nil { + return nil, fmt.Errorf("failed to fetch games: %w", err) + } + + var games []types.GameMetadata + for i, result := range results { + games = append(games, f.decodeGame(uint64(i), result)) + } + return games, nil +} + +func (f *DisputeGameFactoryContract) CreateTx(ctx context.Context, traceType uint32, outputRoot common.Hash, l2BlockNum uint64) (txmgr.TxCandidate, error) { + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodInitBonds, traceType)) + if err != nil { + return txmgr.TxCandidate{}, fmt.Errorf("failed to fetch init bond: %w", err) + } + initBond := result.GetBigInt(0) + call := f.contract.Call(methodCreateGame, traceType, outputRoot, common.BigToHash(big.NewInt(int64(l2BlockNum))).Bytes()) + candidate, err := call.ToTxCandidate() + if err != nil { + return txmgr.TxCandidate{}, err + } + candidate.Value = initBond + return candidate, err +} + +func (f *DisputeGameFactoryContract) DecodeDisputeGameCreatedLog(rcpt *ethTypes.Receipt) (common.Address, uint32, common.Hash, error) { + for _, log := range rcpt.Logs { + if log.Address != f.contract.Addr() { + // Not from this contract + continue + } + name, result, err := f.contract.DecodeEvent(log) + if err != nil { + // Not a valid event + continue + } + if name != eventDisputeGameCreated { + // Not the event we're looking for + continue + } + + return result.GetAddress(0), result.GetUint32(1), result.GetHash(2), nil + } + return common.Address{}, 0, common.Hash{}, fmt.Errorf("%w: %v", ErrEventNotFound, eventDisputeGameCreated) +} + +func (f *DisputeGameFactoryContract) decodeGame(idx uint64, result *batching.CallResult) types.GameMetadata { + gameType := result.GetUint32(0) + timestamp := result.GetUint64(1) + proxy := result.GetAddress(2) + return types.GameMetadata{ + Index: idx, + GameType: gameType, + Timestamp: timestamp, + Proxy: proxy, + } +} diff --git a/op-challenger2/game/fault/contracts/gamefactory_test.go b/op-challenger2/game/fault/contracts/gamefactory_test.go new file mode 100644 index 000000000000..4372c8c46aca --- /dev/null +++ b/op-challenger2/game/fault/contracts/gamefactory_test.go @@ -0,0 +1,308 @@ +package contracts + +import ( + "context" + "fmt" + "math/big" + "slices" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + batchingTest "github.com/ethereum-optimism/optimism/op-service/sources/batching/test" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +var ( + factoryAddr = common.HexToAddress("0x24112842371dFC380576ebb09Ae16Cb6B6caD7CB") + batchSize = 5 +) + +func TestDisputeGameFactorySimpleGetters(t *testing.T) { + blockHash := common.Hash{0xbb, 0xcd} + tests := []struct { + method string + args []interface{} + result interface{} + expected interface{} // Defaults to expecting the same as result + call func(game *DisputeGameFactoryContract) (any, error) + }{ + { + method: methodGameCount, + result: big.NewInt(9876), + expected: uint64(9876), + call: func(game *DisputeGameFactoryContract) (any, error) { + return game.GetGameCount(context.Background(), blockHash) + }, + }, + } + for _, test := range tests { + test := test + t.Run(test.method, func(t *testing.T) { + stubRpc, factory := setupDisputeGameFactoryTest(t) + stubRpc.SetResponse(factoryAddr, test.method, rpcblock.ByHash(blockHash), nil, []interface{}{test.result}) + status, err := test.call(factory) + require.NoError(t, err) + expected := test.expected + if expected == nil { + expected = test.result + } + require.Equal(t, expected, status) + }) + } +} + +func TestLoadGame(t *testing.T) { + blockHash := common.Hash{0xbb, 0xce} + stubRpc, factory := setupDisputeGameFactoryTest(t) + game0 := types.GameMetadata{ + Index: 0, + GameType: 0, + Timestamp: 1234, + Proxy: common.Address{0xaa}, + } + game1 := types.GameMetadata{ + Index: 1, + GameType: 1, + Timestamp: 5678, + Proxy: common.Address{0xbb}, + } + game2 := types.GameMetadata{ + Index: 2, + GameType: 99, + Timestamp: 9988, + Proxy: common.Address{0xcc}, + } + expectedGames := []types.GameMetadata{game0, game1, game2} + for idx, expected := range expectedGames { + expectGetGame(stubRpc, idx, blockHash, expected) + actual, err := factory.GetGame(context.Background(), uint64(idx), blockHash) + require.NoError(t, err) + require.Equal(t, expected, actual) + } +} + +func TestGetAllGames(t *testing.T) { + blockHash := common.Hash{0xbb, 0xce} + stubRpc, factory := setupDisputeGameFactoryTest(t) + game0 := types.GameMetadata{ + Index: 0, + GameType: 0, + Timestamp: 1234, + Proxy: common.Address{0xaa}, + } + game1 := types.GameMetadata{ + Index: 1, + GameType: 1, + Timestamp: 5678, + Proxy: common.Address{0xbb}, + } + game2 := types.GameMetadata{ + Index: 2, + GameType: 99, + Timestamp: 9988, + Proxy: common.Address{0xcc}, + } + + expectedGames := []types.GameMetadata{game0, game1, game2} + stubRpc.SetResponse(factoryAddr, methodGameCount, rpcblock.ByHash(blockHash), nil, []interface{}{big.NewInt(int64(len(expectedGames)))}) + for idx, expected := range expectedGames { + expectGetGame(stubRpc, idx, blockHash, expected) + } + actualGames, err := factory.GetAllGames(context.Background(), blockHash) + require.NoError(t, err) + require.Equal(t, expectedGames, actualGames) +} + +func TestGetAllGamesAtOrAfter(t *testing.T) { + tests := []struct { + gameCount int + earliestGameIdx int + }{ + {gameCount: batchSize * 4, earliestGameIdx: batchSize + 3}, + {gameCount: 0, earliestGameIdx: 0}, + {gameCount: batchSize * 2, earliestGameIdx: batchSize}, + {gameCount: batchSize * 2, earliestGameIdx: batchSize + 1}, + {gameCount: batchSize * 2, earliestGameIdx: batchSize - 1}, + {gameCount: batchSize * 2, earliestGameIdx: batchSize * 2}, + {gameCount: batchSize * 2, earliestGameIdx: batchSize*2 + 1}, + {gameCount: batchSize - 2, earliestGameIdx: batchSize - 3}, + } + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("Count_%v_Start_%v", test.gameCount, test.earliestGameIdx), func(t *testing.T) { + blockHash := common.Hash{0xbb, 0xce} + stubRpc, factory := setupDisputeGameFactoryTest(t) + var allGames []types.GameMetadata + for i := 0; i < test.gameCount; i++ { + allGames = append(allGames, types.GameMetadata{ + Index: uint64(i), + GameType: uint32(i), + Timestamp: uint64(i), + Proxy: common.Address{byte(i)}, + }) + } + + stubRpc.SetResponse(factoryAddr, methodGameCount, rpcblock.ByHash(blockHash), nil, []interface{}{big.NewInt(int64(len(allGames)))}) + for idx, expected := range allGames { + expectGetGame(stubRpc, idx, blockHash, expected) + } + // Set an earliest timestamp that's in the middle of a batch + earliestTimestamp := uint64(test.earliestGameIdx) + actualGames, err := factory.GetGamesAtOrAfter(context.Background(), blockHash, earliestTimestamp) + require.NoError(t, err) + // Games come back in descending timestamp order + var expectedGames []types.GameMetadata + if test.earliestGameIdx < len(allGames) { + expectedGames = slices.Clone(allGames[test.earliestGameIdx:]) + } + slices.Reverse(expectedGames) + require.Equal(t, len(expectedGames), len(actualGames)) + if len(expectedGames) != 0 { + // Don't assert equal for empty arrays, we accept nil or empty array + require.Equal(t, expectedGames, actualGames) + } + }) + } +} + +func TestGetGameFromParameters(t *testing.T) { + stubRpc, factory := setupDisputeGameFactoryTest(t) + traceType := uint32(123) + outputRoot := common.Hash{0x01} + l2BlockNum := common.BigToHash(big.NewInt(456)).Bytes() + stubRpc.SetResponse( + factoryAddr, + methodGames, + rpcblock.Latest, + []interface{}{traceType, outputRoot, l2BlockNum}, + []interface{}{common.Address{0xaa}, uint64(1)}, + ) + addr, err := factory.GetGameFromParameters(context.Background(), traceType, outputRoot, uint64(456)) + require.NoError(t, err) + require.Equal(t, common.Address{0xaa}, addr) +} + +func TestGetGameImpl(t *testing.T) { + stubRpc, factory := setupDisputeGameFactoryTest(t) + gameType := uint32(3) + gameImplAddr := common.Address{0xaa} + stubRpc.SetResponse( + factoryAddr, + methodGameImpls, + rpcblock.Latest, + []interface{}{gameType}, + []interface{}{gameImplAddr}) + actual, err := factory.GetGameImpl(context.Background(), gameType) + require.NoError(t, err) + require.Equal(t, gameImplAddr, actual) +} + +func TestDecodeDisputeGameCreatedLog(t *testing.T) { + _, factory := setupDisputeGameFactoryTest(t) + fdgAbi := snapshots.LoadDisputeGameFactoryABI() + eventAbi := fdgAbi.Events[eventDisputeGameCreated] + gameAddr := common.Address{0x11} + gameType := uint32(4) + rootClaim := common.Hash{0xaa, 0xbb, 0xcc} + + createValidReceipt := func() *ethTypes.Receipt { + return ðTypes.Receipt{ + Status: ethTypes.ReceiptStatusSuccessful, + ContractAddress: fdgAddr, + Logs: []*ethTypes.Log{ + { + Address: fdgAddr, + Topics: []common.Hash{ + eventAbi.ID, + common.BytesToHash(gameAddr.Bytes()), + common.BytesToHash(big.NewInt(int64(gameType)).Bytes()), + rootClaim, + }, + }, + }, + } + } + + t.Run("IgnoreIncorrectContract", func(t *testing.T) { + rcpt := createValidReceipt() + rcpt.Logs[0].Address = common.Address{0xff} + _, _, _, err := factory.DecodeDisputeGameCreatedLog(rcpt) + require.ErrorIs(t, err, ErrEventNotFound) + }) + + t.Run("IgnoreInvalidEvent", func(t *testing.T) { + rcpt := createValidReceipt() + rcpt.Logs[0].Topics = rcpt.Logs[0].Topics[0:2] + _, _, _, err := factory.DecodeDisputeGameCreatedLog(rcpt) + require.ErrorIs(t, err, ErrEventNotFound) + }) + + t.Run("IgnoreWrongEvent", func(t *testing.T) { + rcpt := createValidReceipt() + rcpt.Logs[0].Topics = []common.Hash{ + fdgAbi.Events["ImplementationSet"].ID, + common.BytesToHash(common.Address{0x11}.Bytes()), // Implementation addr + common.BytesToHash(big.NewInt(4).Bytes()), // Game type + + } + // Check the log is a valid ImplementationSet + name, _, err := factory.contract.DecodeEvent(rcpt.Logs[0]) + require.NoError(t, err) + require.Equal(t, "ImplementationSet", name) + + _, _, _, err = factory.DecodeDisputeGameCreatedLog(rcpt) + require.ErrorIs(t, err, ErrEventNotFound) + }) + + t.Run("ValidEvent", func(t *testing.T) { + rcpt := createValidReceipt() + actualGameAddr, actualGameType, actualRootClaim, err := factory.DecodeDisputeGameCreatedLog(rcpt) + require.NoError(t, err) + require.Equal(t, gameAddr, actualGameAddr) + require.Equal(t, gameType, actualGameType) + require.Equal(t, rootClaim, actualRootClaim) + }) +} + +func expectGetGame(stubRpc *batchingTest.AbiBasedRpc, idx int, blockHash common.Hash, game types.GameMetadata) { + stubRpc.SetResponse( + factoryAddr, + methodGameAtIndex, + rpcblock.ByHash(blockHash), + []interface{}{big.NewInt(int64(idx))}, + []interface{}{ + game.GameType, + game.Timestamp, + game.Proxy, + }) +} + +func TestCreateTx(t *testing.T) { + stubRpc, factory := setupDisputeGameFactoryTest(t) + traceType := uint32(123) + outputRoot := common.Hash{0x01} + l2BlockNum := common.BigToHash(big.NewInt(456)).Bytes() + bond := big.NewInt(49284294829) + stubRpc.SetResponse(factoryAddr, methodInitBonds, rpcblock.Latest, []interface{}{traceType}, []interface{}{bond}) + stubRpc.SetResponse(factoryAddr, methodCreateGame, rpcblock.Latest, []interface{}{traceType, outputRoot, l2BlockNum}, nil) + tx, err := factory.CreateTx(context.Background(), traceType, outputRoot, uint64(456)) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + require.NotNil(t, tx.Value) + require.Truef(t, bond.Cmp(tx.Value) == 0, "Expected bond %v but was %v", bond, tx.Value) +} + +func setupDisputeGameFactoryTest(t *testing.T) (*batchingTest.AbiBasedRpc, *DisputeGameFactoryContract) { + fdgAbi := snapshots.LoadDisputeGameFactoryABI() + + stubRpc := batchingTest.NewAbiBasedRpc(t, factoryAddr, fdgAbi) + caller := batching.NewMultiCaller(stubRpc, batchSize) + factory := NewDisputeGameFactoryContract(metrics.NoopContractMetrics, factoryAddr, caller) + return stubRpc, factory +} diff --git a/op-challenger2/game/fault/contracts/metrics/metrics.go b/op-challenger2/game/fault/contracts/metrics/metrics.go new file mode 100644 index 000000000000..9a47a0669517 --- /dev/null +++ b/op-challenger2/game/fault/contracts/metrics/metrics.go @@ -0,0 +1,54 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ConstractSubsystem = "contracts" + +type EndTimer func() + +type Factory interface { + NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec + NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec +} + +type ContractMetricer interface { + StartContractRequest(name string) EndTimer +} + +type ContractMetrics struct { + ContractRequestsTotal *prometheus.CounterVec + ContractRequestDurationSeconds *prometheus.HistogramVec +} + +var _ ContractMetricer = (*ContractMetrics)(nil) + +func MakeContractMetrics(ns string, factory Factory) *ContractMetrics { + return &ContractMetrics{ + ContractRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: ConstractSubsystem, + Name: "requests_total", + Help: "Total requests to the contracts", + }, []string{ + "method", + }), + ContractRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: ns, + Subsystem: ConstractSubsystem, + Name: "requests_duration_seconds", + Help: "Histogram of contract request durations", + }, []string{ + "method", + }), + } +} + +func (m *ContractMetrics) StartContractRequest(method string) EndTimer { + m.ContractRequestsTotal.WithLabelValues(method).Inc() + timer := prometheus.NewTimer(m.ContractRequestDurationSeconds.WithLabelValues(method)) + return func() { + timer.ObserveDuration() + } +} diff --git a/op-challenger2/game/fault/contracts/metrics/noop.go b/op-challenger2/game/fault/contracts/metrics/noop.go new file mode 100644 index 000000000000..fae673e46d96 --- /dev/null +++ b/op-challenger2/game/fault/contracts/metrics/noop.go @@ -0,0 +1,12 @@ +package metrics + +type NoopMetrics struct { +} + +func (n *NoopMetrics) StartContractRequest(_ string) EndTimer { + return func() {} +} + +var _ ContractMetricer = (*NoopMetrics)(nil) + +var NoopContractMetrics = &NoopMetrics{} diff --git a/op-challenger2/game/fault/contracts/oracle.go b/op-challenger2/game/fault/contracts/oracle.go new file mode 100644 index 000000000000..112f53153e06 --- /dev/null +++ b/op-challenger2/game/fault/contracts/oracle.go @@ -0,0 +1,421 @@ +package contracts + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "math" + "math/big" + "sync/atomic" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/merkle" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/common" +) + +const ( + methodInitLPP = "initLPP" + methodAddLeavesLPP = "addLeavesLPP" + methodSqueezeLPP = "squeezeLPP" + methodLoadKeccak256PreimagePart = "loadKeccak256PreimagePart" + methodLoadSha256PreimagePart = "loadSha256PreimagePart" + methodLoadBlobPreimagePart = "loadBlobPreimagePart" + methodLoadPrecompilePreimagePart = "loadPrecompilePreimagePart" + methodProposalCount = "proposalCount" + methodProposals = "proposals" + methodProposalMetadata = "proposalMetadata" + methodProposalBlocksLen = "proposalBlocksLen" + methodProposalBlocks = "proposalBlocks" + methodPreimagePartOk = "preimagePartOk" + methodMinProposalSize = "minProposalSize" + methodChallengeFirstLPP = "challengeFirstLPP" + methodChallengeLPP = "challengeLPP" + methodChallengePeriod = "challengePeriod" + methodGetTreeRootLPP = "getTreeRootLPP" + methodMinBondSizeLPP = "MIN_BOND_SIZE" +) + +var ( + ErrInvalidAddLeavesCall = errors.New("tx is not a valid addLeaves call") + ErrInvalidPreimageKey = errors.New("invalid preimage key") + ErrUnsupportedKeyType = errors.New("unsupported preimage key type") +) + +// preimageOracleLeaf matches the contract representation of a large preimage leaf +type preimageOracleLeaf struct { + Input []byte + Index *big.Int + StateCommitment [32]byte +} + +// libKeccakStateMatrix matches the contract representation of a keccak state matrix +type libKeccakStateMatrix struct { + State [25]uint64 +} + +// PreimageOracleContract is a binding that works with contracts implementing the IPreimageOracle interface +type PreimageOracleContract struct { + addr common.Address + multiCaller *batching.MultiCaller + contract *batching.BoundContract + + // challengePeriod caches the challenge period from the contract once it has been loaded. + // 0 indicates the period has not been loaded yet. + challengePeriod atomic.Uint64 + // minBondSizeLPP caches the minimum bond size for large preimages from the contract once it has been loaded. + // 0 indicates the value has not been loaded yet. + minBondSizeLPP atomic.Uint64 +} + +// toPreimageOracleLeaf converts a Leaf to the contract format. +func toPreimageOracleLeaf(l keccakTypes.Leaf) preimageOracleLeaf { + return preimageOracleLeaf{ + Input: l.Input[:], + Index: new(big.Int).SetUint64(l.Index), + StateCommitment: l.StateCommitment, + } +} + +func NewPreimageOracleContract(addr common.Address, caller *batching.MultiCaller) *PreimageOracleContract { + oracleAbi := snapshots.LoadPreimageOracleABI() + + return &PreimageOracleContract{ + addr: addr, + multiCaller: caller, + contract: batching.NewBoundContract(oracleAbi, addr), + } +} + +func (c *PreimageOracleContract) Addr() common.Address { + return c.addr +} + +func (c *PreimageOracleContract) AddGlobalDataTx(data *types.PreimageOracleData) (txmgr.TxCandidate, error) { + if len(data.OracleKey) == 0 { + return txmgr.TxCandidate{}, ErrInvalidPreimageKey + } + keyType := preimage.KeyType(data.OracleKey[0]) + switch keyType { + case preimage.Keccak256KeyType: + call := c.contract.Call(methodLoadKeccak256PreimagePart, new(big.Int).SetUint64(uint64(data.OracleOffset)), data.GetPreimageWithoutSize()) + return call.ToTxCandidate() + case preimage.Sha256KeyType: + call := c.contract.Call(methodLoadSha256PreimagePart, new(big.Int).SetUint64(uint64(data.OracleOffset)), data.GetPreimageWithoutSize()) + return call.ToTxCandidate() + case preimage.BlobKeyType: + call := c.contract.Call(methodLoadBlobPreimagePart, + new(big.Int).SetUint64(data.BlobFieldIndex), + new(big.Int).SetBytes(data.GetPreimageWithoutSize()), + data.BlobCommitment, + data.BlobProof, + new(big.Int).SetUint64(uint64(data.OracleOffset))) + return call.ToTxCandidate() + case preimage.PrecompileKeyType: + call := c.contract.Call(methodLoadPrecompilePreimagePart, + new(big.Int).SetUint64(uint64(data.OracleOffset)), + data.GetPrecompileAddress(), + data.GetPrecompileInput()) + return call.ToTxCandidate() + default: + return txmgr.TxCandidate{}, fmt.Errorf("%w: %v", ErrUnsupportedKeyType, keyType) + } +} + +func (c *PreimageOracleContract) InitLargePreimage(uuid *big.Int, partOffset uint32, claimedSize uint32) (txmgr.TxCandidate, error) { + call := c.contract.Call(methodInitLPP, uuid, partOffset, claimedSize) + return call.ToTxCandidate() +} + +func (c *PreimageOracleContract) AddLeaves(uuid *big.Int, startingBlockIndex *big.Int, input []byte, commitments []common.Hash, finalize bool) (txmgr.TxCandidate, error) { + call := c.contract.Call(methodAddLeavesLPP, uuid, startingBlockIndex, input, commitments, finalize) + return call.ToTxCandidate() +} + +// MinLargePreimageSize returns the minimum size of a large preimage. +func (c *PreimageOracleContract) MinLargePreimageSize(ctx context.Context) (uint64, error) { + result, err := c.multiCaller.SingleCall(ctx, rpcblock.Latest, c.contract.Call(methodMinProposalSize)) + if err != nil { + return 0, fmt.Errorf("failed to fetch min lpp size bytes: %w", err) + } + return result.GetBigInt(0).Uint64(), nil +} + +// ChallengePeriod returns the challenge period for large preimages. +func (c *PreimageOracleContract) ChallengePeriod(ctx context.Context) (uint64, error) { + if period := c.challengePeriod.Load(); period != 0 { + return period, nil + } + result, err := c.multiCaller.SingleCall(ctx, rpcblock.Latest, c.contract.Call(methodChallengePeriod)) + if err != nil { + return 0, fmt.Errorf("failed to fetch challenge period: %w", err) + } + period := result.GetBigInt(0).Uint64() + c.challengePeriod.Store(period) + return period, nil +} + +func (c *PreimageOracleContract) CallSqueeze( + ctx context.Context, + claimant common.Address, + uuid *big.Int, + prestateMatrix keccakTypes.StateSnapshot, + preState keccakTypes.Leaf, + preStateProof merkle.Proof, + postState keccakTypes.Leaf, + postStateProof merkle.Proof, +) error { + call := c.contract.Call(methodSqueezeLPP, claimant, uuid, abiEncodeSnapshot(prestateMatrix), toPreimageOracleLeaf(preState), preStateProof, toPreimageOracleLeaf(postState), postStateProof) + _, err := c.multiCaller.SingleCall(ctx, rpcblock.Latest, call) + if err != nil { + return fmt.Errorf("failed to call squeeze: %w", err) + } + return nil +} + +func (c *PreimageOracleContract) Squeeze( + claimant common.Address, + uuid *big.Int, + prestateMatrix keccakTypes.StateSnapshot, + preState keccakTypes.Leaf, + preStateProof merkle.Proof, + postState keccakTypes.Leaf, + postStateProof merkle.Proof, +) (txmgr.TxCandidate, error) { + call := c.contract.Call( + methodSqueezeLPP, + claimant, + uuid, + abiEncodeSnapshot(prestateMatrix), + toPreimageOracleLeaf(preState), + preStateProof, + toPreimageOracleLeaf(postState), + postStateProof, + ) + return call.ToTxCandidate() +} + +func abiEncodeSnapshot(packedState keccakTypes.StateSnapshot) libKeccakStateMatrix { + return libKeccakStateMatrix{State: packedState} +} + +func (c *PreimageOracleContract) GetActivePreimages(ctx context.Context, blockHash common.Hash) ([]keccakTypes.LargePreimageMetaData, error) { + block := rpcblock.ByHash(blockHash) + results, err := batching.ReadArray(ctx, c.multiCaller, block, c.contract.Call(methodProposalCount), func(i *big.Int) *batching.ContractCall { + return c.contract.Call(methodProposals, i) + }) + if err != nil { + return nil, fmt.Errorf("failed to load claims: %w", err) + } + + var idents []keccakTypes.LargePreimageIdent + for _, result := range results { + idents = append(idents, c.decodePreimageIdent(result)) + } + + return c.GetProposalMetadata(ctx, block, idents...) +} + +func (c *PreimageOracleContract) GetProposalMetadata(ctx context.Context, block rpcblock.Block, idents ...keccakTypes.LargePreimageIdent) ([]keccakTypes.LargePreimageMetaData, error) { + var calls []batching.Call + for _, ident := range idents { + calls = append(calls, c.contract.Call(methodProposalMetadata, ident.Claimant, ident.UUID)) + } + results, err := c.multiCaller.Call(ctx, block, calls...) + if err != nil { + return nil, fmt.Errorf("failed to load proposal metadata: %w", err) + } + var proposals []keccakTypes.LargePreimageMetaData + for i, result := range results { + meta := metadata(result.GetBytes32(0)) + proposals = append(proposals, keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: idents[i], + Timestamp: meta.timestamp(), + PartOffset: meta.partOffset(), + ClaimedSize: meta.claimedSize(), + BlocksProcessed: meta.blocksProcessed(), + BytesProcessed: meta.bytesProcessed(), + Countered: meta.countered(), + }) + } + return proposals, nil +} + +func (c *PreimageOracleContract) GetProposalTreeRoot(ctx context.Context, block rpcblock.Block, ident keccakTypes.LargePreimageIdent) (common.Hash, error) { + call := c.contract.Call(methodGetTreeRootLPP, ident.Claimant, ident.UUID) + result, err := c.multiCaller.SingleCall(ctx, block, call) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get tree root: %w", err) + } + return result.GetHash(0), nil +} + +func (c *PreimageOracleContract) GetInputDataBlocks(ctx context.Context, block rpcblock.Block, ident keccakTypes.LargePreimageIdent) ([]uint64, error) { + results, err := batching.ReadArray(ctx, c.multiCaller, block, + c.contract.Call(methodProposalBlocksLen, ident.Claimant, ident.UUID), + func(i *big.Int) *batching.ContractCall { + return c.contract.Call(methodProposalBlocks, ident.Claimant, ident.UUID, i) + }) + if err != nil { + return nil, fmt.Errorf("failed to load proposal blocks: %w", err) + } + blockNums := make([]uint64, 0, len(results)) + for _, result := range results { + blockNums = append(blockNums, result.GetUint64(0)) + } + return blockNums, nil +} + +// DecodeInputData returns the UUID and [keccakTypes.InputData] being added to the preimage via a addLeavesLPP call. +// An [ErrInvalidAddLeavesCall] error is returned if the call is not a valid call to addLeavesLPP. +// Otherwise, the uuid and input data is returned. The raw data supplied is returned so long as it can be parsed. +// Specifically the length of the input data is not validated to ensure it is consistent with the number of commitments. +func (c *PreimageOracleContract) DecodeInputData(data []byte) (*big.Int, keccakTypes.InputData, error) { + method, args, err := c.contract.DecodeCall(data) + if errors.Is(err, batching.ErrUnknownMethod) { + return nil, keccakTypes.InputData{}, ErrInvalidAddLeavesCall + } else if err != nil { + return nil, keccakTypes.InputData{}, err + } + if method != methodAddLeavesLPP { + return nil, keccakTypes.InputData{}, fmt.Errorf("%w: %v", ErrInvalidAddLeavesCall, method) + } + uuid := args.GetBigInt(0) + // Arg 1 is the starting block index which we don't current use + input := args.GetBytes(2) + stateCommitments := args.GetBytes32Slice(3) + finalize := args.GetBool(4) + + commitments := make([]common.Hash, 0, len(stateCommitments)) + for _, c := range stateCommitments { + commitments = append(commitments, c) + } + return uuid, keccakTypes.InputData{ + Input: input, + Commitments: commitments, + Finalize: finalize, + }, nil +} + +func (c *PreimageOracleContract) GlobalDataExists(ctx context.Context, data *types.PreimageOracleData) (bool, error) { + call := c.contract.Call(methodPreimagePartOk, common.Hash(data.OracleKey), new(big.Int).SetUint64(uint64(data.OracleOffset))) + results, err := c.multiCaller.SingleCall(ctx, rpcblock.Latest, call) + if err != nil { + return false, fmt.Errorf("failed to get preimagePartOk: %w", err) + } + return results.GetBool(0), nil +} + +func (c *PreimageOracleContract) ChallengeTx(ident keccakTypes.LargePreimageIdent, challenge keccakTypes.Challenge) (txmgr.TxCandidate, error) { + var call *batching.ContractCall + if challenge.Prestate == (keccakTypes.Leaf{}) { + call = c.contract.Call( + methodChallengeFirstLPP, + ident.Claimant, + ident.UUID, + toPreimageOracleLeaf(challenge.Poststate), + challenge.PoststateProof) + } else { + call = c.contract.Call( + methodChallengeLPP, + ident.Claimant, + ident.UUID, + abiEncodeSnapshot(challenge.StateMatrix), + toPreimageOracleLeaf(challenge.Prestate), + challenge.PrestateProof, + toPreimageOracleLeaf(challenge.Poststate), + challenge.PoststateProof) + } + return call.ToTxCandidate() +} + +func (c *PreimageOracleContract) GetMinBondLPP(ctx context.Context) (*big.Int, error) { + if bondSize := c.minBondSizeLPP.Load(); bondSize != 0 { + return big.NewInt(int64(bondSize)), nil + } + result, err := c.multiCaller.SingleCall(ctx, rpcblock.Latest, c.contract.Call(methodMinBondSizeLPP)) + if err != nil { + return nil, fmt.Errorf("failed to fetch min bond size for LPPs: %w", err) + } + period := result.GetBigInt(0) + c.minBondSizeLPP.Store(period.Uint64()) + return period, nil +} + +func (c *PreimageOracleContract) decodePreimageIdent(result *batching.CallResult) keccakTypes.LargePreimageIdent { + return keccakTypes.LargePreimageIdent{ + Claimant: result.GetAddress(0), + UUID: result.GetBigInt(1), + } +} + +// metadata is the packed preimage metadata +// ┌─────────────┬────────────────────────────────────────────┐ +// │ Bit Offsets │ Description │ +// ├─────────────┼────────────────────────────────────────────┤ +// │ [0, 64) │ Timestamp (Finalized - All data available) │ +// │ [64, 96) │ Part Offset │ +// │ [96, 128) │ Claimed Size │ +// │ [128, 160) │ Blocks Processed (Inclusive of Padding) │ +// │ [160, 192) │ Bytes Processed (Non-inclusive of Padding) │ +// │ [192, 256) │ Countered │ +// └─────────────┴────────────────────────────────────────────┘ +type metadata [32]byte + +func (m *metadata) setTimestamp(timestamp uint64) { + binary.BigEndian.PutUint64(m[0:8], timestamp) +} + +func (m *metadata) timestamp() uint64 { + return binary.BigEndian.Uint64(m[0:8]) +} + +func (m *metadata) setPartOffset(value uint32) { + binary.BigEndian.PutUint32(m[8:12], value) +} + +func (m *metadata) partOffset() uint32 { + return binary.BigEndian.Uint32(m[8:12]) +} + +func (m *metadata) setClaimedSize(value uint32) { + binary.BigEndian.PutUint32(m[12:16], value) +} + +func (m *metadata) claimedSize() uint32 { + return binary.BigEndian.Uint32(m[12:16]) +} + +func (m *metadata) setBlocksProcessed(value uint32) { + binary.BigEndian.PutUint32(m[16:20], value) +} + +func (m *metadata) blocksProcessed() uint32 { + return binary.BigEndian.Uint32(m[16:20]) +} + +func (m *metadata) setBytesProcessed(value uint32) { + binary.BigEndian.PutUint32(m[20:24], value) +} + +func (m *metadata) bytesProcessed() uint32 { + return binary.BigEndian.Uint32(m[20:24]) +} + +func (m *metadata) setCountered(value bool) { + v := uint64(0) + if value { + v = math.MaxUint64 + } + binary.BigEndian.PutUint64(m[24:32], v) +} + +func (m *metadata) countered() bool { + return binary.BigEndian.Uint64(m[24:32]) != 0 +} diff --git a/op-challenger2/game/fault/contracts/oracle_test.go b/op-challenger2/game/fault/contracts/oracle_test.go new file mode 100644 index 000000000000..0cb5ebc70d80 --- /dev/null +++ b/op-challenger2/game/fault/contracts/oracle_test.go @@ -0,0 +1,656 @@ +package contracts + +import ( + "context" + "fmt" + "math" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/merkle" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + batchingTest "github.com/ethereum-optimism/optimism/op-service/sources/batching/test" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestPreimageOracleContract_AddGlobalDataTx(t *testing.T) { + t.Run("UnknownType", func(t *testing.T) { + _, oracle := setupPreimageOracleTest(t) + data := types.NewPreimageOracleData(common.Hash{0xcc}.Bytes(), make([]byte, 20), uint32(545)) + _, err := oracle.AddGlobalDataTx(data) + require.ErrorIs(t, err, ErrUnsupportedKeyType) + }) + + t.Run("Keccak256", func(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + data := types.NewPreimageOracleData(common.Hash{byte(preimage.Keccak256KeyType), 0xcc}.Bytes(), make([]byte, 20), uint32(545)) + stubRpc.SetResponse(oracleAddr, methodLoadKeccak256PreimagePart, rpcblock.Latest, []interface{}{ + new(big.Int).SetUint64(uint64(data.OracleOffset)), + data.GetPreimageWithoutSize(), + }, nil) + + tx, err := oracle.AddGlobalDataTx(data) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + + t.Run("Sha256", func(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + data := types.NewPreimageOracleData(common.Hash{byte(preimage.Sha256KeyType), 0xcc}.Bytes(), make([]byte, 20), uint32(545)) + stubRpc.SetResponse(oracleAddr, methodLoadSha256PreimagePart, rpcblock.Latest, []interface{}{ + new(big.Int).SetUint64(uint64(data.OracleOffset)), + data.GetPreimageWithoutSize(), + }, nil) + + tx, err := oracle.AddGlobalDataTx(data) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + + t.Run("Blob", func(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + fieldData := testutils.RandomData(rand.New(rand.NewSource(23)), 32) + data := types.NewPreimageOracleData(common.Hash{byte(preimage.BlobKeyType), 0xcc}.Bytes(), fieldData, uint32(545)) + stubRpc.SetResponse(oracleAddr, methodLoadBlobPreimagePart, rpcblock.Latest, []interface{}{ + new(big.Int).SetUint64(data.BlobFieldIndex), + new(big.Int).SetBytes(data.GetPreimageWithoutSize()), + data.BlobCommitment, + data.BlobProof, + new(big.Int).SetUint64(uint64(data.OracleOffset)), + }, nil) + + tx, err := oracle.AddGlobalDataTx(data) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) + + t.Run("Precompile", func(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + input := testutils.RandomData(rand.New(rand.NewSource(23)), 200) + data := types.NewPreimageOracleData(common.Hash{byte(preimage.PrecompileKeyType), 0xcc}.Bytes(), input, uint32(545)) + stubRpc.SetResponse(oracleAddr, methodLoadPrecompilePreimagePart, rpcblock.Latest, []interface{}{ + new(big.Int).SetUint64(uint64(data.OracleOffset)), + data.GetPrecompileAddress(), + data.GetPrecompileInput(), + }, nil) + + tx, err := oracle.AddGlobalDataTx(data) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) + }) +} + +func TestPreimageOracleContract_ChallengePeriod(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + stubRpc.SetResponse(oracleAddr, methodChallengePeriod, rpcblock.Latest, + []interface{}{}, + []interface{}{big.NewInt(123)}, + ) + challengePeriod, err := oracle.ChallengePeriod(context.Background()) + require.NoError(t, err) + require.Equal(t, uint64(123), challengePeriod) + + // Should cache responses + stubRpc.ClearResponses() + challengePeriod, err = oracle.ChallengePeriod(context.Background()) + require.NoError(t, err) + require.Equal(t, uint64(123), challengePeriod) +} + +func TestPreimageOracleContract_MinLargePreimageSize(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + stubRpc.SetResponse(oracleAddr, methodMinProposalSize, rpcblock.Latest, + []interface{}{}, + []interface{}{big.NewInt(123)}, + ) + minProposalSize, err := oracle.MinLargePreimageSize(context.Background()) + require.NoError(t, err) + require.Equal(t, uint64(123), minProposalSize) +} + +func TestPreimageOracleContract_MinBondSizeLPP(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + stubRpc.SetResponse(oracleAddr, methodMinBondSizeLPP, rpcblock.Latest, + []interface{}{}, + []interface{}{big.NewInt(123)}, + ) + minBond, err := oracle.GetMinBondLPP(context.Background()) + require.NoError(t, err) + require.Equal(t, big.NewInt(123), minBond) + + // Should cache responses + stubRpc.ClearResponses() + minBond, err = oracle.GetMinBondLPP(context.Background()) + require.NoError(t, err) + require.Equal(t, big.NewInt(123), minBond) +} + +func TestPreimageOracleContract_PreimageDataExists(t *testing.T) { + t.Run("exists", func(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + data := types.NewPreimageOracleData(common.Hash{0xcc}.Bytes(), make([]byte, 20), 545) + stubRpc.SetResponse(oracleAddr, methodPreimagePartOk, rpcblock.Latest, + []interface{}{common.Hash(data.OracleKey), new(big.Int).SetUint64(uint64(data.OracleOffset))}, + []interface{}{true}, + ) + exists, err := oracle.GlobalDataExists(context.Background(), data) + require.NoError(t, err) + require.True(t, exists) + }) + t.Run("does not exist", func(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + data := types.NewPreimageOracleData(common.Hash{0xcc}.Bytes(), make([]byte, 20), 545) + stubRpc.SetResponse(oracleAddr, methodPreimagePartOk, rpcblock.Latest, + []interface{}{common.Hash(data.OracleKey), new(big.Int).SetUint64(uint64(data.OracleOffset))}, + []interface{}{false}, + ) + exists, err := oracle.GlobalDataExists(context.Background(), data) + require.NoError(t, err) + require.False(t, exists) + }) +} + +func TestPreimageOracleContract_InitLargePreimage(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + + uuid := big.NewInt(123) + partOffset := uint32(1) + claimedSize := uint32(2) + stubRpc.SetResponse(oracleAddr, methodInitLPP, rpcblock.Latest, []interface{}{ + uuid, + partOffset, + claimedSize, + }, nil) + + tx, err := oracle.InitLargePreimage(uuid, partOffset, claimedSize) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) +} + +func TestPreimageOracleContract_AddLeaves(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + + uuid := big.NewInt(123) + startingBlockIndex := big.NewInt(0) + input := []byte{0x12} + commitments := []common.Hash{{0x34}} + finalize := true + stubRpc.SetResponse(oracleAddr, methodAddLeavesLPP, rpcblock.Latest, []interface{}{ + uuid, + startingBlockIndex, + input, + commitments, + finalize, + }, nil) + + tx, err := oracle.AddLeaves(uuid, startingBlockIndex, input, commitments, finalize) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) +} + +func TestPreimageOracleContract_Squeeze(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + + claimant := common.Address{0x12} + uuid := big.NewInt(123) + preStateMatrix := keccakTypes.StateSnapshot{0, 1, 2, 3, 4} + preState := keccakTypes.Leaf{ + Input: [keccakTypes.BlockSize]byte{0x12}, + Index: 123, + StateCommitment: common.Hash{0x34}, + } + preStateProof := merkle.Proof{{0x34}} + postState := keccakTypes.Leaf{ + Input: [keccakTypes.BlockSize]byte{0x34}, + Index: 456, + StateCommitment: common.Hash{0x56}, + } + postStateProof := merkle.Proof{{0x56}} + stubRpc.SetResponse(oracleAddr, methodSqueezeLPP, rpcblock.Latest, []interface{}{ + claimant, + uuid, + abiEncodeSnapshot(preStateMatrix), + toPreimageOracleLeaf(preState), + preStateProof, + toPreimageOracleLeaf(postState), + postStateProof, + }, nil) + + tx, err := oracle.Squeeze(claimant, uuid, preStateMatrix, preState, preStateProof, postState, postStateProof) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) +} + +func TestGetActivePreimages(t *testing.T) { + blockHash := common.Hash{0xaa} + _, oracle, proposals := setupPreimageOracleTestWithProposals(t, rpcblock.ByHash(blockHash)) + preimages, err := oracle.GetActivePreimages(context.Background(), blockHash) + require.NoError(t, err) + require.Equal(t, proposals, preimages) +} + +func TestGetProposalMetadata(t *testing.T) { + blockHash := common.Hash{0xaa} + block := rpcblock.ByHash(blockHash) + stubRpc, oracle, proposals := setupPreimageOracleTestWithProposals(t, block) + preimages, err := oracle.GetProposalMetadata( + context.Background(), + block, + proposals[0].LargePreimageIdent, + proposals[1].LargePreimageIdent, + proposals[2].LargePreimageIdent, + ) + require.NoError(t, err) + require.Equal(t, proposals, preimages) + + // Fetching a proposal that doesn't exist should return an empty metadata object. + ident := keccakTypes.LargePreimageIdent{Claimant: common.Address{0x12}, UUID: big.NewInt(123)} + meta := new(metadata) + stubRpc.SetResponse( + oracleAddr, + methodProposalMetadata, + block, + []interface{}{ident.Claimant, ident.UUID}, + []interface{}{meta}) + preimages, err = oracle.GetProposalMetadata(context.Background(), rpcblock.ByHash(blockHash), ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.LargePreimageMetaData{{LargePreimageIdent: ident}}, preimages) +} + +func TestGetProposalTreeRoot(t *testing.T) { + blockHash := common.Hash{0xaa} + expectedRoot := common.Hash{0xbb} + ident := keccakTypes.LargePreimageIdent{Claimant: common.Address{0x12}, UUID: big.NewInt(123)} + stubRpc, oracle := setupPreimageOracleTest(t) + stubRpc.SetResponse(oracleAddr, methodGetTreeRootLPP, rpcblock.ByHash(blockHash), + []interface{}{ident.Claimant, ident.UUID}, + []interface{}{expectedRoot}) + actualRoot, err := oracle.GetProposalTreeRoot(context.Background(), rpcblock.ByHash(blockHash), ident) + require.NoError(t, err) + require.Equal(t, expectedRoot, actualRoot) +} + +func setupPreimageOracleTestWithProposals(t *testing.T, block rpcblock.Block) (*batchingTest.AbiBasedRpc, *PreimageOracleContract, []keccakTypes.LargePreimageMetaData) { + stubRpc, oracle := setupPreimageOracleTest(t) + stubRpc.SetResponse( + oracleAddr, + methodProposalCount, + block, + []interface{}{}, + []interface{}{big.NewInt(3)}) + + preimage1 := keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xaa}, + UUID: big.NewInt(1111), + }, + Timestamp: 1234, + PartOffset: 1, + ClaimedSize: 100, + BlocksProcessed: 10, + BytesProcessed: 100, + Countered: false, + } + preimage2 := keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xbb}, + UUID: big.NewInt(2222), + }, + Timestamp: 2345, + PartOffset: 2, + ClaimedSize: 200, + BlocksProcessed: 20, + BytesProcessed: 200, + Countered: true, + } + preimage3 := keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xcc}, + UUID: big.NewInt(3333), + }, + Timestamp: 0, + PartOffset: 3, + ClaimedSize: 300, + BlocksProcessed: 30, + BytesProcessed: 233, + Countered: false, + } + + proposals := []keccakTypes.LargePreimageMetaData{preimage1, preimage2, preimage3} + + for i, proposal := range proposals { + stubRpc.SetResponse( + oracleAddr, + methodProposals, + block, + []interface{}{big.NewInt(int64(i))}, + []interface{}{ + proposal.Claimant, + proposal.UUID, + }) + meta := new(metadata) + meta.setTimestamp(proposal.Timestamp) + meta.setPartOffset(proposal.PartOffset) + meta.setClaimedSize(proposal.ClaimedSize) + meta.setBlocksProcessed(proposal.BlocksProcessed) + meta.setBytesProcessed(proposal.BytesProcessed) + meta.setCountered(proposal.Countered) + stubRpc.SetResponse( + oracleAddr, + methodProposalMetadata, + block, + []interface{}{proposal.Claimant, proposal.UUID}, + []interface{}{meta}) + } + + return stubRpc, oracle, proposals +} + +func setupPreimageOracleTest(t *testing.T) (*batchingTest.AbiBasedRpc, *PreimageOracleContract) { + oracleAbi := snapshots.LoadPreimageOracleABI() + + stubRpc := batchingTest.NewAbiBasedRpc(t, oracleAddr, oracleAbi) + oracleContract := NewPreimageOracleContract(oracleAddr, batching.NewMultiCaller(stubRpc, batching.DefaultBatchSize)) + + return stubRpc, oracleContract +} + +func TestMetadata(t *testing.T) { + uint32Values := []uint32{0, 1, 2, 3252354, math.MaxUint32} + tests := []struct { + name string + setter func(meta *metadata, val uint32) + getter func(meta *metadata) uint32 + }{ + { + name: "partOffset", + setter: (*metadata).setPartOffset, + getter: (*metadata).partOffset, + }, + { + name: "claimedSize", + setter: (*metadata).setClaimedSize, + getter: (*metadata).claimedSize, + }, + { + name: "blocksProcessed", + setter: (*metadata).setBlocksProcessed, + getter: (*metadata).blocksProcessed, + }, + { + name: "bytesProcessed", + setter: (*metadata).setBytesProcessed, + getter: (*metadata).bytesProcessed, + }, + } + for _, test := range tests { + test := test + for _, value := range uint32Values { + value := value + t.Run(fmt.Sprintf("%v-%v", test.name, value), func(t *testing.T) { + meta := new(metadata) + require.Zero(t, test.getter(meta)) + test.setter(meta, value) + require.Equal(t, value, test.getter(meta)) + }) + } + } +} + +func TestMetadata_Timestamp(t *testing.T) { + values := []uint64{0, 1, 2, 3252354, math.MaxUint32, math.MaxUint32 + 1, math.MaxUint64} + var meta metadata + require.Zero(t, meta.timestamp()) + for _, value := range values { + meta.setTimestamp(value) + require.Equal(t, value, meta.timestamp()) + } +} + +func TestMetadata_Countered(t *testing.T) { + var meta metadata + require.False(t, meta.countered()) + meta.setCountered(true) + require.True(t, meta.countered()) + meta.setCountered(false) + require.False(t, meta.countered()) +} + +func TestGetInputDataBlocks(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + block := rpcblock.ByHash(common.Hash{0xaa}) + + preimage := keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xbb}, + UUID: big.NewInt(2222), + } + + stubRpc.SetResponse( + oracleAddr, + methodProposalBlocksLen, + block, + []interface{}{preimage.Claimant, preimage.UUID}, + []interface{}{big.NewInt(3)}) + + blockNums := []uint64{10, 35, 67} + + for i, blockNum := range blockNums { + stubRpc.SetResponse( + oracleAddr, + methodProposalBlocks, + block, + []interface{}{preimage.Claimant, preimage.UUID, big.NewInt(int64(i))}, + []interface{}{blockNum}) + } + + actual, err := oracle.GetInputDataBlocks(context.Background(), block, preimage) + require.NoError(t, err) + require.Len(t, actual, 3) + require.Equal(t, blockNums, actual) +} + +func TestDecodeInputData(t *testing.T) { + dataOfLength := func(len int) []byte { + data := make([]byte, len) + for i := range data { + data[i] = byte(i) + } + return data + } + ident := keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xaa}, + UUID: big.NewInt(1111), + } + _, oracle := setupPreimageOracleTest(t) + + tests := []struct { + name string + input []byte + inputData keccakTypes.InputData + expectedTxData string + expectedErr error + }{ + { + name: "UnknownMethod", + input: []byte{0xaa, 0xbb, 0xcc, 0xdd}, + expectedTxData: "aabbccdd", + expectedErr: ErrInvalidAddLeavesCall, + }, + { + name: "SingleInput", + inputData: keccakTypes.InputData{ + Input: dataOfLength(keccakTypes.BlockSize), + Commitments: []common.Hash{{0xaa}}, + Finalize: false, + }, + expectedTxData: "7917de1d0000000000000000000000000000000000000000000000000000000000000457000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000088000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f80818283848586870000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001aa00000000000000000000000000000000000000000000000000000000000000", + }, + { + name: "MultipleInputs", + inputData: keccakTypes.InputData{ + Input: dataOfLength(2 * keccakTypes.BlockSize), + Commitments: []common.Hash{{0xaa}, {0xbb}}, + Finalize: false, + }, + expectedTxData: "7917de1d0000000000000000000000000000000000000000000000000000000000000457000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000110000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002aa00000000000000000000000000000000000000000000000000000000000000bb00000000000000000000000000000000000000000000000000000000000000", + }, + { + name: "MultipleInputs-InputTooShort", + inputData: keccakTypes.InputData{ + Input: dataOfLength(2*keccakTypes.BlockSize - 10), + Commitments: []common.Hash{{0xaa}, {0xbb}}, + Finalize: false, + }, + expectedTxData: "7917de1d0000000000000000000000000000000000000000000000000000000000000457000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000106000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff00010203040500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002aa00000000000000000000000000000000000000000000000000000000000000bb00000000000000000000000000000000000000000000000000000000000000", + }, + { + name: "MultipleInputs-FinalizeDoesNotPadInput", + inputData: keccakTypes.InputData{ + Input: dataOfLength(2*keccakTypes.BlockSize - 15), + Commitments: []common.Hash{{0xaa}, {0xbb}}, + Finalize: true, + }, + expectedTxData: "7917de1d0000000000000000000000000000000000000000000000000000000000000457000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000101000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002aa00000000000000000000000000000000000000000000000000000000000000bb00000000000000000000000000000000000000000000000000000000000000", + }, + { + name: "MultipleInputs-FinalizePadding-FullBlock", + inputData: keccakTypes.InputData{ + Input: dataOfLength(2 * keccakTypes.BlockSize), + Commitments: []common.Hash{{0xaa}, {0xbb}}, + Finalize: true, + }, + expectedTxData: "7917de1d0000000000000000000000000000000000000000000000000000000000000457000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000110000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002aa00000000000000000000000000000000000000000000000000000000000000bb00000000000000000000000000000000000000000000000000000000000000", + }, + { + name: "MultipleInputs-FinalizePadding-TrailingZeros", + inputData: keccakTypes.InputData{ + Input: make([]byte, 2*keccakTypes.BlockSize), + Commitments: []common.Hash{{0xaa}, {0xbb}}, + Finalize: true, + }, + expectedTxData: "7917de1d0000000000000000000000000000000000000000000000000000000000000457000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002aa00000000000000000000000000000000000000000000000000000000000000bb00000000000000000000000000000000000000000000000000000000000000", + }, + { + name: "MultipleInputs-FinalizePadding-ShorterThanSingleBlock", + inputData: keccakTypes.InputData{ + Input: dataOfLength(keccakTypes.BlockSize - 5), + Commitments: []common.Hash{{0xaa}, {0xbb}}, + Finalize: true, + }, + expectedTxData: "7917de1d0000000000000000000000000000000000000000000000000000000000000457000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000083000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f80818200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002aa00000000000000000000000000000000000000000000000000000000000000bb00000000000000000000000000000000000000000000000000000000000000", + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + var input []byte + if len(test.input) > 0 { + input = test.input + } else { + input = toAddLeavesTxData(t, oracle, ident.UUID, test.inputData) + } + require.Equal(t, test.expectedTxData, common.Bytes2Hex(input), + "ABI has been changed. Add tests to ensure historic transactions can be parsed before updating expectedTxData") + uuid, leaves, err := oracle.DecodeInputData(input) + if test.expectedErr != nil { + require.ErrorIs(t, err, test.expectedErr) + } else { + require.NoError(t, err) + require.Equal(t, ident.UUID, uuid) + require.Equal(t, test.inputData, leaves) + } + }) + } +} + +func TestChallenge_First(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + + ident := keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xab}, + UUID: big.NewInt(4829), + } + challenge := keccakTypes.Challenge{ + StateMatrix: keccakTypes.StateSnapshot{1, 2, 3, 4, 5}, + Prestate: keccakTypes.Leaf{}, + Poststate: keccakTypes.Leaf{ + Input: [136]byte{5, 4, 3, 2, 1}, + Index: 0, + StateCommitment: common.Hash{0xbb}, + }, + PoststateProof: merkle.Proof{common.Hash{0x01}, common.Hash{0x02}}, + } + stubRpc.SetResponse(oracleAddr, methodChallengeFirstLPP, rpcblock.Latest, + []interface{}{ + ident.Claimant, ident.UUID, + preimageOracleLeaf{ + Input: challenge.Poststate.Input[:], + Index: new(big.Int).SetUint64(challenge.Poststate.Index), + StateCommitment: challenge.Poststate.StateCommitment, + }, + challenge.PoststateProof, + }, + nil) + tx, err := oracle.ChallengeTx(ident, challenge) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) +} + +func TestChallenge_NotFirst(t *testing.T) { + stubRpc, oracle := setupPreimageOracleTest(t) + + ident := keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xab}, + UUID: big.NewInt(4829), + } + challenge := keccakTypes.Challenge{ + StateMatrix: keccakTypes.StateSnapshot{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}, + Prestate: keccakTypes.Leaf{ + Input: [136]byte{9, 8, 7, 6, 5}, + Index: 3, + StateCommitment: common.Hash{0xcc}, + }, + PrestateProof: merkle.Proof{common.Hash{0x01}, common.Hash{0x02}}, + Poststate: keccakTypes.Leaf{ + Input: [136]byte{5, 4, 3, 2, 1}, + Index: 4, + StateCommitment: common.Hash{0xbb}, + }, + PoststateProof: merkle.Proof{common.Hash{0x03}, common.Hash{0x04}}, + } + stubRpc.SetResponse(oracleAddr, methodChallengeLPP, rpcblock.Latest, + []interface{}{ + ident.Claimant, ident.UUID, + libKeccakStateMatrix{State: challenge.StateMatrix}, + preimageOracleLeaf{ + Input: challenge.Prestate.Input[:], + Index: new(big.Int).SetUint64(challenge.Prestate.Index), + StateCommitment: challenge.Prestate.StateCommitment, + }, + challenge.PrestateProof, + preimageOracleLeaf{ + Input: challenge.Poststate.Input[:], + Index: new(big.Int).SetUint64(challenge.Poststate.Index), + StateCommitment: challenge.Poststate.StateCommitment, + }, + challenge.PoststateProof, + }, + nil) + tx, err := oracle.ChallengeTx(ident, challenge) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) +} + +func toAddLeavesTxData(t *testing.T, oracle *PreimageOracleContract, uuid *big.Int, inputData keccakTypes.InputData) []byte { + tx, err := oracle.AddLeaves(uuid, big.NewInt(1), inputData.Input, inputData.Commitments, inputData.Finalize) + require.NoError(t, err) + return tx.TxData +} diff --git a/op-challenger2/game/fault/contracts/vm.go b/op-challenger2/game/fault/contracts/vm.go new file mode 100644 index 000000000000..1d1e22632cfe --- /dev/null +++ b/op-challenger2/game/fault/contracts/vm.go @@ -0,0 +1,38 @@ +package contracts + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/common" +) + +const ( + methodOracle = "oracle" +) + +// VMContract is a binding that works with contracts implementing the IBigStepper interface +type VMContract struct { + multiCaller *batching.MultiCaller + contract *batching.BoundContract +} + +func NewVMContract(addr common.Address, caller *batching.MultiCaller) *VMContract { + mipsAbi := snapshots.LoadMIPSABI() + + return &VMContract{ + multiCaller: caller, + contract: batching.NewBoundContract(mipsAbi, addr), + } +} + +func (c *VMContract) Oracle(ctx context.Context) (*PreimageOracleContract, error) { + results, err := c.multiCaller.SingleCall(ctx, rpcblock.Latest, c.contract.Call(methodOracle)) + if err != nil { + return nil, fmt.Errorf("failed to load oracle address: %w", err) + } + return NewPreimageOracleContract(results.GetAddress(0), c.multiCaller), nil +} diff --git a/op-challenger2/game/fault/contracts/vm_test.go b/op-challenger2/game/fault/contracts/vm_test.go new file mode 100644 index 000000000000..80fb7b42995b --- /dev/null +++ b/op-challenger2/game/fault/contracts/vm_test.go @@ -0,0 +1,32 @@ +package contracts + +import ( + "context" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + batchingTest "github.com/ethereum-optimism/optimism/op-service/sources/batching/test" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestVMContract_Oracle(t *testing.T) { + vmAbi := snapshots.LoadMIPSABI() + + stubRpc := batchingTest.NewAbiBasedRpc(t, vmAddr, vmAbi) + vmContract := NewVMContract(vmAddr, batching.NewMultiCaller(stubRpc, batching.DefaultBatchSize)) + + stubRpc.SetResponse(vmAddr, methodOracle, rpcblock.Latest, nil, []interface{}{oracleAddr}) + + oracleContract, err := vmContract.Oracle(context.Background()) + require.NoError(t, err) + tx, err := oracleContract.AddGlobalDataTx(types.NewPreimageOracleData(common.Hash{byte(preimage.Keccak256KeyType)}.Bytes(), make([]byte, 20), 0)) + require.NoError(t, err) + // This test doesn't care about all the tx details, we just want to confirm the contract binding is using the + // correct address + require.Equal(t, &oracleAddr, tx.To) +} diff --git a/op-challenger2/game/fault/player.go b/op-challenger2/game/fault/player.go new file mode 100644 index 000000000000..1b8c163aac26 --- /dev/null +++ b/op-challenger2/game/fault/player.go @@ -0,0 +1,212 @@ +package fault + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/claims" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/preimages" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/responder" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +type actor func(ctx context.Context) error + +type GameInfo interface { + GetStatus(context.Context) (gameTypes.GameStatus, error) + GetClaimCount(context.Context) (uint64, error) +} + +type SyncValidator interface { + ValidateNodeSynced(ctx context.Context, gameL1Head eth.BlockID) error +} + +type L1HeaderSource interface { + HeaderByHash(context.Context, common.Hash) (*gethTypes.Header, error) +} + +type TxSender interface { + From() common.Address + SendAndWaitSimple(txPurpose string, txs ...txmgr.TxCandidate) error +} + +type GamePlayer struct { + act actor + loader GameInfo + logger log.Logger + syncValidator SyncValidator + prestateValidators []Validator + status gameTypes.GameStatus + gameL1Head eth.BlockID +} + +type GameContract interface { + preimages.PreimageGameContract + responder.GameContract + claims.BondContract + GameInfo + ClaimLoader + GetStatus(ctx context.Context) (gameTypes.GameStatus, error) + GetMaxGameDepth(ctx context.Context) (types.Depth, error) + GetMaxClockDuration(ctx context.Context) (time.Duration, error) + GetOracle(ctx context.Context) (*contracts.PreimageOracleContract, error) + GetL1Head(ctx context.Context) (common.Hash, error) +} + +type resourceCreator func(ctx context.Context, logger log.Logger, gameDepth types.Depth, dir string) (types.TraceAccessor, error) + +func NewGamePlayer( + ctx context.Context, + systemClock clock.Clock, + l1Clock types.ClockReader, + logger log.Logger, + m metrics.Metricer, + dir string, + addr common.Address, + txSender TxSender, + loader GameContract, + syncValidator SyncValidator, + validators []Validator, + creator resourceCreator, + l1HeaderSource L1HeaderSource, + selective bool, + claimants []common.Address, +) (*GamePlayer, error) { + logger = logger.New("game", addr) + + status, err := loader.GetStatus(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch game status: %w", err) + } + if status != gameTypes.GameStatusInProgress { + logger.Info("Game already resolved", "status", status) + // Game is already complete so skip creating the trace provider, loading game inputs etc. + return &GamePlayer{ + logger: logger, + loader: loader, + prestateValidators: validators, + status: status, + // Act function does nothing because the game is already complete + act: func(ctx context.Context) error { + return nil + }, + }, nil + } + + maxClockDuration, err := loader.GetMaxClockDuration(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch the game duration: %w", err) + } + + gameDepth, err := loader.GetMaxGameDepth(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch the game depth: %w", err) + } + + accessor, err := creator(ctx, logger, gameDepth, dir) + if err != nil { + return nil, fmt.Errorf("failed to create trace accessor: %w", err) + } + + oracle, err := loader.GetOracle(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load oracle: %w", err) + } + + l1HeadHash, err := loader.GetL1Head(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load game L1 head: %w", err) + } + l1Header, err := l1HeaderSource.HeaderByHash(ctx, l1HeadHash) + if err != nil { + return nil, fmt.Errorf("failed to load L1 header %v: %w", l1HeadHash, err) + } + l1Head := eth.HeaderBlockID(l1Header) + + minLargePreimageSize, err := oracle.MinLargePreimageSize(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load min large preimage size: %w", err) + } + direct := preimages.NewDirectPreimageUploader(logger, txSender, loader) + large := preimages.NewLargePreimageUploader(logger, l1Clock, txSender, oracle) + uploader := preimages.NewSplitPreimageUploader(direct, large, minLargePreimageSize) + responder, err := responder.NewFaultResponder(logger, txSender, loader, uploader, oracle) + if err != nil { + return nil, fmt.Errorf("failed to create the responder: %w", err) + } + + agent := NewAgent(m, systemClock, l1Clock, loader, gameDepth, maxClockDuration, accessor, responder, logger, selective, claimants) + return &GamePlayer{ + act: agent.Act, + loader: loader, + logger: logger, + status: status, + gameL1Head: l1Head, + syncValidator: syncValidator, + prestateValidators: validators, + }, nil +} + +func (g *GamePlayer) ValidatePrestate(ctx context.Context) error { + for _, validator := range g.prestateValidators { + if err := validator.Validate(ctx); err != nil { + return fmt.Errorf("failed to validate prestate: %w", err) + } + } + return nil +} + +func (g *GamePlayer) Status() gameTypes.GameStatus { + return g.status +} + +func (g *GamePlayer) ProgressGame(ctx context.Context) gameTypes.GameStatus { + if g.status != gameTypes.GameStatusInProgress { + // Game is already complete so don't try to perform further actions. + g.logger.Trace("Skipping completed game") + return g.status + } + if err := g.syncValidator.ValidateNodeSynced(ctx, g.gameL1Head); errors.Is(err, ErrNotInSync) { + g.logger.Warn("Local node not sufficiently up to date", "err", err) + return g.status + } else if err != nil { + g.logger.Error("Could not check local node was in sync", "err", err) + return g.status + } + g.logger.Trace("Checking if actions are required") + if err := g.act(ctx); err != nil { + g.logger.Error("Error when acting on game", "err", err) + } + status, err := g.loader.GetStatus(ctx) + if err != nil { + g.logger.Error("Unable to retrieve game status", "err", err) + return gameTypes.GameStatusInProgress + } + g.logGameStatus(ctx, status) + g.status = status + return status +} + +func (g *GamePlayer) logGameStatus(ctx context.Context, status gameTypes.GameStatus) { + if status == gameTypes.GameStatusInProgress { + claimCount, err := g.loader.GetClaimCount(ctx) + if err != nil { + g.logger.Error("Failed to get claim count for in progress game", "err", err) + return + } + g.logger.Info("Game info", "claims", claimCount, "status", status) + return + } + g.logger.Info("Game resolved", "status", status) +} diff --git a/op-challenger2/game/fault/player_test.go b/op-challenger2/game/fault/player_test.go new file mode 100644 index 000000000000..c4a8f3c4db31 --- /dev/null +++ b/op-challenger2/game/fault/player_test.go @@ -0,0 +1,212 @@ +package fault + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var mockValidatorError = fmt.Errorf("mock validator error") + +func TestProgressGame_LogErrorFromAct(t *testing.T) { + handler, game, actor, _ := setupProgressGameTest(t) + actor.actErr = errors.New("boom") + status := game.ProgressGame(context.Background()) + require.Equal(t, types.GameStatusInProgress, status) + require.Equal(t, 1, actor.callCount, "should perform next actions") + levelFilter := testlog.NewLevelFilter(log.LevelError) + msgFilter := testlog.NewMessageFilter("Error when acting on game") + errLog := handler.FindLog(levelFilter, msgFilter) + require.NotNil(t, errLog, "should log error") + require.Equal(t, actor.actErr, errLog.AttrValue("err")) + + // Should still log game status + levelFilter = testlog.NewLevelFilter(log.LevelInfo) + msgFilter = testlog.NewMessageFilter("Game info") + msg := handler.FindLog(levelFilter, msgFilter) + require.NotNil(t, msg) + require.Equal(t, uint64(1), msg.AttrValue("claims")) +} + +func TestProgressGame_LogGameStatus(t *testing.T) { + tests := []struct { + name string + status types.GameStatus + logMsg string + }{ + { + name: "ChallengerWon", + status: types.GameStatusChallengerWon, + logMsg: "Game resolved", + }, + { + name: "DefenderWon", + status: types.GameStatusDefenderWon, + logMsg: "Game resolved", + }, + { + name: "GameInProgress", + status: types.GameStatusInProgress, + logMsg: "Game info", + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + handler, game, gameState, _ := setupProgressGameTest(t) + gameState.status = test.status + + status := game.ProgressGame(context.Background()) + require.Equal(t, 1, gameState.callCount, "should perform next actions") + require.Equal(t, test.status, status) + levelFilter := testlog.NewLevelFilter(log.LevelInfo) + msgFilter := testlog.NewMessageFilter(test.logMsg) + errLog := handler.FindLog(levelFilter, msgFilter) + require.NotNil(t, errLog, "should log game result") + require.Equal(t, test.status, errLog.AttrValue("status")) + }) + } +} + +func TestDoNotActOnCompleteGame(t *testing.T) { + for _, status := range []types.GameStatus{types.GameStatusChallengerWon, types.GameStatusDefenderWon} { + t.Run(status.String(), func(t *testing.T) { + _, game, gameState, _ := setupProgressGameTest(t) + gameState.status = status + + fetched := game.ProgressGame(context.Background()) + require.Equal(t, 1, gameState.callCount, "acts the first time") + require.Equal(t, status, fetched) + + // Should not act when it knows the game is already complete + fetched = game.ProgressGame(context.Background()) + require.Equal(t, 1, gameState.callCount, "does not act after game is complete") + require.Equal(t, status, fetched) + }) + } +} + +func TestValidateLocalNodeSync(t *testing.T) { + _, game, gameState, syncValidator := setupProgressGameTest(t) + + game.ProgressGame(context.Background()) + require.Equal(t, 1, gameState.callCount, "acts when in sync") + + syncValidator.result = errors.New("boom") + game.ProgressGame(context.Background()) + require.Equal(t, 1, gameState.callCount, "does not act when not in sync") +} + +func TestValidatePrestate(t *testing.T) { + tests := []struct { + name string + validators []Validator + errors bool + }{ + { + name: "SingleValidator", + validators: []Validator{&mockValidator{}}, + errors: false, + }, + { + name: "MultipleValidators", + validators: []Validator{&mockValidator{}, &mockValidator{}}, + errors: false, + }, + { + name: "SingleValidator_Errors", + validators: []Validator{&mockValidator{true}}, + errors: true, + }, + { + name: "MultipleValidators_Errors", + validators: []Validator{&mockValidator{}, &mockValidator{true}}, + errors: true, + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + player := &GamePlayer{ + prestateValidators: test.validators, + } + err := player.ValidatePrestate(context.Background()) + if test.errors { + require.ErrorIs(t, err, mockValidatorError) + } else { + require.NoError(t, err) + } + }) + } +} + +var _ Validator = (*mockValidator)(nil) + +type mockValidator struct { + err bool +} + +func (m *mockValidator) Validate(_ context.Context) error { + if m.err { + return mockValidatorError + } + return nil +} + +func setupProgressGameTest(t *testing.T) (*testlog.CapturingHandler, *GamePlayer, *stubGameState, *stubSyncValidator) { + logger, logs := testlog.CaptureLogger(t, log.LevelDebug) + gameState := &stubGameState{claimCount: 1} + syncValidator := &stubSyncValidator{} + game := &GamePlayer{ + act: gameState.Act, + loader: gameState, + logger: logger, + syncValidator: syncValidator, + gameL1Head: eth.BlockID{ + Hash: common.Hash{0x1a}, + Number: 32, + }, + } + return logs, game, gameState, syncValidator +} + +type stubSyncValidator struct { + result error +} + +func (s *stubSyncValidator) ValidateNodeSynced(_ context.Context, _ eth.BlockID) error { + return s.result +} + +type stubGameState struct { + status types.GameStatus + claimCount uint64 + callCount int + actErr error + Err error +} + +func (s *stubGameState) Act(ctx context.Context) error { + s.callCount++ + return s.actErr +} + +func (s *stubGameState) GetStatus(ctx context.Context) (types.GameStatus, error) { + return s.status, nil +} + +func (s *stubGameState) GetClaimCount(ctx context.Context) (uint64, error) { + return s.claimCount, nil +} + +func (s *stubGameState) GetAbsolutePrestateHash(ctx context.Context) (common.Hash, error) { + return common.Hash{}, s.Err +} diff --git a/op-challenger2/game/fault/preimages/direct.go b/op-challenger2/game/fault/preimages/direct.go new file mode 100644 index 000000000000..91bb9737039d --- /dev/null +++ b/op-challenger2/game/fault/preimages/direct.go @@ -0,0 +1,44 @@ +package preimages + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/log" +) + +var _ PreimageUploader = (*DirectPreimageUploader)(nil) + +type PreimageGameContract interface { + UpdateOracleTx(ctx context.Context, claimIdx uint64, data *types.PreimageOracleData) (txmgr.TxCandidate, error) +} + +// DirectPreimageUploader uploads the provided [types.PreimageOracleData] +// directly to the PreimageOracle contract in a single transaction. +type DirectPreimageUploader struct { + log log.Logger + + txSender TxSender + contract PreimageGameContract +} + +func NewDirectPreimageUploader(logger log.Logger, txSender TxSender, contract PreimageGameContract) *DirectPreimageUploader { + return &DirectPreimageUploader{logger, txSender, contract} +} + +func (d *DirectPreimageUploader) UploadPreimage(ctx context.Context, claimIdx uint64, data *types.PreimageOracleData) error { + if data == nil { + return ErrNilPreimageData + } + d.log.Info("Updating oracle data", "key", fmt.Sprintf("%x", data.OracleKey)) + candidate, err := d.contract.UpdateOracleTx(ctx, claimIdx, data) + if err != nil { + return fmt.Errorf("failed to create pre-image oracle tx: %w", err) + } + if err := d.txSender.SendAndWaitSimple("populate pre-image oracle", candidate); err != nil { + return fmt.Errorf("failed to populate pre-image oracle: %w", err) + } + return nil +} diff --git a/op-challenger2/game/fault/preimages/direct_test.go b/op-challenger2/game/fault/preimages/direct_test.go new file mode 100644 index 000000000000..7592e54f017d --- /dev/null +++ b/op-challenger2/game/fault/preimages/direct_test.go @@ -0,0 +1,94 @@ +package preimages + +import ( + "context" + "errors" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var ( + mockUpdateOracleTxError = errors.New("mock update oracle tx error") + mockTxMgrSendError = errors.New("mock tx mgr send error") + mockInitLPPError = errors.New("mock init LPP error") +) + +func TestDirectPreimageUploader_UploadPreimage(t *testing.T) { + t.Run("UpdateOracleTxFails", func(t *testing.T) { + oracle, txMgr, contract := newTestDirectPreimageUploader(t) + contract.updateFails = true + err := oracle.UploadPreimage(context.Background(), 0, &types.PreimageOracleData{}) + require.ErrorIs(t, err, mockUpdateOracleTxError) + require.Equal(t, 1, contract.updateCalls) + require.Equal(t, 0, txMgr.sends) // verify that the tx was not sent + }) + + t.Run("SendFails", func(t *testing.T) { + oracle, txMgr, contract := newTestDirectPreimageUploader(t) + txMgr.sendFails = true + err := oracle.UploadPreimage(context.Background(), 0, &types.PreimageOracleData{}) + require.ErrorIs(t, err, mockTxMgrSendError) + require.Equal(t, 1, contract.updateCalls) + require.Equal(t, 1, txMgr.sends) + }) + + t.Run("NilPreimageData", func(t *testing.T) { + oracle, _, _ := newTestDirectPreimageUploader(t) + err := oracle.UploadPreimage(context.Background(), 0, nil) + require.ErrorIs(t, err, ErrNilPreimageData) + }) + + t.Run("Success", func(t *testing.T) { + oracle, _, contract := newTestDirectPreimageUploader(t) + err := oracle.UploadPreimage(context.Background(), 0, &types.PreimageOracleData{}) + require.NoError(t, err) + require.Equal(t, 1, contract.updateCalls) + }) +} + +func newTestDirectPreimageUploader(t *testing.T) (*DirectPreimageUploader, *mockTxSender, *mockPreimageGameContract) { + logger := testlog.Logger(t, log.LevelError) + txMgr := &mockTxSender{} + contract := &mockPreimageGameContract{} + return NewDirectPreimageUploader(logger, txMgr, contract), txMgr, contract +} + +type mockPreimageGameContract struct { + updateCalls int + updateFails bool +} + +func (s *mockPreimageGameContract) UpdateOracleTx(_ context.Context, _ uint64, _ *types.PreimageOracleData) (txmgr.TxCandidate, error) { + s.updateCalls++ + if s.updateFails { + return txmgr.TxCandidate{}, mockUpdateOracleTxError + } + return txmgr.TxCandidate{}, nil +} + +type mockTxSender struct { + sends int + sendFails bool + statusFail bool +} + +func (s *mockTxSender) From() common.Address { + return common.Address{} +} + +func (s *mockTxSender) SendAndWaitSimple(_ string, _ ...txmgr.TxCandidate) error { + s.sends++ + if s.sendFails { + return mockTxMgrSendError + } + if s.statusFail { + return errors.New("transaction reverted") + } + return nil +} diff --git a/op-challenger2/game/fault/preimages/large.go b/op-challenger2/game/fault/preimages/large.go new file mode 100644 index 000000000000..6c2a4d5b253c --- /dev/null +++ b/op-challenger2/game/fault/preimages/large.go @@ -0,0 +1,194 @@ +package preimages + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/matrix" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" +) + +var _ PreimageUploader = (*LargePreimageUploader)(nil) + +// ErrChallengePeriodNotOver is returned when the challenge period is not over. +var ErrChallengePeriodNotOver = errors.New("challenge period not over") + +// MaxBlocksPerChunk is the maximum number of keccak blocks per chunk. +const MaxBlocksPerChunk = 300 + +// MaxChunkSize is the maximum size of a preimage chunk in bytes. +// Notice, the max chunk size must be a multiple of the leaf size. +// The max chunk size is roughly 0.04MB to avoid memory expansion. +const MaxChunkSize = MaxBlocksPerChunk * keccakTypes.BlockSize + +// LargePreimageUploader handles uploading large preimages by +// streaming the merkleized preimage to the PreimageOracle contract, +// tightly packed across multiple transactions. +type LargePreimageUploader struct { + log log.Logger + + clock types.ClockReader + txSender TxSender + contract PreimageOracleContract +} + +func NewLargePreimageUploader(logger log.Logger, cl types.ClockReader, txSender TxSender, contract PreimageOracleContract) *LargePreimageUploader { + return &LargePreimageUploader{logger, cl, txSender, contract} +} + +func (p *LargePreimageUploader) UploadPreimage(ctx context.Context, parent uint64, data *types.PreimageOracleData) error { + p.log.Debug("Upload large preimage", "key", hexutil.Bytes(data.OracleKey)) + stateMatrix, calls, err := p.splitCalls(data) + if err != nil { + return fmt.Errorf("failed to split preimage into chunks for data with oracle offset %d: %w", data.OracleOffset, err) + } + + uuid := NewUUID(p.txSender.From(), data) + + // Fetch the current metadata for this preimage data, if it exists. + ident := keccakTypes.LargePreimageIdent{Claimant: p.txSender.From(), UUID: uuid} + metadata, err := p.contract.GetProposalMetadata(ctx, rpcblock.Latest, ident) + if err != nil { + return fmt.Errorf("failed to get pre-image oracle metadata: %w", err) + } + + // The proposal is not initialized if the queried metadata has a claimed size of 0. + if len(metadata) == 1 && metadata[0].ClaimedSize == 0 { + err = p.initLargePreimage(uuid, data.OracleOffset, uint32(len(data.GetPreimageWithoutSize()))) + if err != nil { + return fmt.Errorf("failed to initialize large preimage with uuid: %s: %w", uuid, err) + } + } + + // Filter out any chunks that have already been uploaded to the Preimage Oracle. + if len(metadata) > 0 { + numSkip := metadata[0].BytesProcessed / MaxChunkSize + calls = calls[numSkip:] + // If the timestamp is non-zero, the preimage has been finalized. + if metadata[0].Timestamp != 0 { + calls = calls[len(calls):] + } + } + + err = p.addLargePreimageData(uuid, calls) + if err != nil { + return fmt.Errorf("failed to add leaves to large preimage with uuid: %s: %w", uuid, err) + } + + return p.Squeeze(ctx, uuid, stateMatrix) +} + +// NewUUID generates a new unique identifier for the preimage by hashing the +// concatenated preimage data, preimage offset, and sender address. +func NewUUID(sender common.Address, data *types.PreimageOracleData) *big.Int { + offset := make([]byte, 4) + binary.LittleEndian.PutUint32(offset, data.OracleOffset) + concatenated := append(data.GetPreimageWithoutSize(), offset...) + concatenated = append(concatenated, sender.Bytes()...) + hash := crypto.Keccak256Hash(concatenated) + return hash.Big() +} + +// splitChunks splits the preimage data into chunks of size [MaxChunkSize] (except the last chunk). +// It also returns the state matrix and the data for the squeeze call if possible. +func (p *LargePreimageUploader) splitCalls(data *types.PreimageOracleData) (*matrix.StateMatrix, []keccakTypes.InputData, error) { + // Split the preimage data into chunks of size [MaxChunkSize] (except the last chunk). + stateMatrix := matrix.NewStateMatrix() + var calls []keccakTypes.InputData + in := bytes.NewReader(data.GetPreimageWithoutSize()) + for { + call, err := stateMatrix.AbsorbUpTo(in, MaxChunkSize) + if errors.Is(err, io.EOF) { + calls = append(calls, call) + break + } else if err != nil { + return nil, nil, fmt.Errorf("failed to absorb data: %w", err) + } + calls = append(calls, call) + } + return stateMatrix, calls, nil +} + +func (p *LargePreimageUploader) Squeeze(ctx context.Context, uuid *big.Int, stateMatrix *matrix.StateMatrix) error { + prestateMatrix := stateMatrix.PrestateMatrix() + prestate, prestateProof := stateMatrix.PrestateWithProof() + poststate, poststateProof := stateMatrix.PoststateWithProof() + challengePeriod, err := p.contract.ChallengePeriod(ctx) + if err != nil { + return fmt.Errorf("failed to get challenge period: %w", err) + } + currentTimestamp := p.clock.Now().Unix() + ident := keccakTypes.LargePreimageIdent{Claimant: p.txSender.From(), UUID: uuid} + metadata, err := p.contract.GetProposalMetadata(ctx, rpcblock.Latest, ident) + if err != nil { + return fmt.Errorf("failed to get pre-image oracle metadata: %w", err) + } + if len(metadata) == 0 || metadata[0].ClaimedSize == 0 { + return fmt.Errorf("no metadata found for pre-image oracle with uuid: %s", uuid) + } + if uint64(currentTimestamp) < metadata[0].Timestamp+challengePeriod { + return ErrChallengePeriodNotOver + } + if err := p.contract.CallSqueeze(ctx, p.txSender.From(), uuid, prestateMatrix, prestate, prestateProof, poststate, poststateProof); err != nil { + p.log.Warn("Expected a successful squeeze call", "metadataTimestamp", metadata[0].Timestamp, "currentTimestamp", currentTimestamp, "err", err) + return fmt.Errorf("failed to call squeeze: %w", err) + } + p.log.Info("Squeezing large preimage", "uuid", uuid) + tx, err := p.contract.Squeeze(p.txSender.From(), uuid, prestateMatrix, prestate, prestateProof, poststate, poststateProof) + if err != nil { + return fmt.Errorf("failed to create pre-image oracle tx: %w", err) + } + if err := p.txSender.SendAndWaitSimple("squeeze large preimage", tx); err != nil { + return fmt.Errorf("failed to populate pre-image oracle: %w", err) + } + return nil +} + +// initLargePreimage initializes the large preimage proposal. +// This method *must* be called before adding any leaves. +func (p *LargePreimageUploader) initLargePreimage(uuid *big.Int, partOffset uint32, claimedSize uint32) error { + p.log.Info("Init large preimage upload", "uuid", uuid, "partOffset", partOffset, "size", claimedSize) + candidate, err := p.contract.InitLargePreimage(uuid, partOffset, claimedSize) + if err != nil { + return fmt.Errorf("failed to create pre-image oracle tx: %w", err) + } + bond, err := p.contract.GetMinBondLPP(context.Background()) + if err != nil { + return fmt.Errorf("failed to get min bond for large preimage proposal: %w", err) + } + candidate.Value = bond + if err := p.txSender.SendAndWaitSimple("init large preimage", candidate); err != nil { + return fmt.Errorf("failed to populate pre-image oracle: %w", err) + } + return nil +} + +// addLargePreimageData adds data to the large preimage proposal. +// This method **must** be called after calling [initLargePreimage]. +// SAFETY: submits transactions in a [Queue] for latency while preserving submission order. +func (p *LargePreimageUploader) addLargePreimageData(uuid *big.Int, chunks []keccakTypes.InputData) error { + txs := make([]txmgr.TxCandidate, len(chunks)) + blocksProcessed := int64(0) + for i, chunk := range chunks { + tx, err := p.contract.AddLeaves(uuid, big.NewInt(blocksProcessed), chunk.Input, chunk.Commitments, chunk.Finalize) + if err != nil { + return fmt.Errorf("failed to create pre-image oracle tx: %w", err) + } + blocksProcessed += int64(len(chunk.Input) / keccakTypes.BlockSize) + txs[i] = tx + } + p.log.Info("Adding large preimage leaves", "uuid", uuid, "blocksProcessed", blocksProcessed, "txs", len(txs)) + return p.txSender.SendAndWaitSimple("add leaf to large preimage", txs...) +} diff --git a/op-challenger2/game/fault/preimages/large_test.go b/op-challenger2/game/fault/preimages/large_test.go new file mode 100644 index 000000000000..7a06d09333ec --- /dev/null +++ b/op-challenger2/game/fault/preimages/large_test.go @@ -0,0 +1,351 @@ +package preimages + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "io" + "math/big" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/matrix" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/merkle" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var ( + mockChallengePeriod = uint64(10000000) + mockAddLeavesError = errors.New("mock add leaves error") + mockSqueezeError = errors.New("mock squeeze error") + mockSqueezeCallError = errors.New("mock squeeze call error") +) + +func TestLargePreimageUploader_NewUUID(t *testing.T) { + tests := []struct { + name string + data *types.PreimageOracleData + expectedUUID *big.Int + }{ + { + name: "EmptyOracleData", + data: makePreimageData([]byte{}, 0), + expectedUUID: new(big.Int).SetBytes(common.FromHex("827b659bbda2a0bdecce2c91b8b68462545758f3eba2dbefef18e0daf84f5ccd")), + }, + { + name: "OracleDataAndOffset_Control", + data: makePreimageData([]byte{1, 2, 3}, 0x010203), + expectedUUID: new(big.Int).SetBytes(common.FromHex("641e230bcf3ade8c71b7e591d210184cdb190e853f61ba59a1411c3b7aca9890")), + }, + { + name: "OracleDataAndOffset_DifferentOffset", + data: makePreimageData([]byte{1, 2, 3}, 0x010204), + expectedUUID: new(big.Int).SetBytes(common.FromHex("aec56de44401325420e5793f72b777e3e547778de7d8344004b31be086a3136d")), + }, + { + name: "OracleDataAndOffset_DifferentData", + data: makePreimageData([]byte{1, 2, 3, 4}, 0x010203), + expectedUUID: new(big.Int).SetBytes(common.FromHex("ca38aa17d56805cf26376a050c2c7b15b6be4e709bc422a1c679fe21aa6aa8c7")), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + oracle, _, _, _ := newTestLargePreimageUploader(t) + uuid := NewUUID(oracle.txSender.From(), test.data) + require.Equal(t, test.expectedUUID, uuid) + }) + } +} + +func TestLargePreimageUploader_UploadPreimage_EdgeCases(t *testing.T) { + t.Run("InitFails", func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + contract.initFails = true + data := mockPreimageOracleData() + err := oracle.UploadPreimage(context.Background(), 0, data) + require.ErrorIs(t, err, mockInitLPPError) + require.Equal(t, 1, contract.initCalls) + }) + + t.Run("AddLeavesFails", func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + contract.addFails = true + data := mockPreimageOracleData() + err := oracle.UploadPreimage(context.Background(), 0, data) + require.ErrorIs(t, err, mockAddLeavesError) + require.Equal(t, 1, contract.addCalls) + }) + + t.Run("NoBytesProcessed", func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + data := mockPreimageOracleData() + contract.claimedSize = uint32(len(data.GetPreimageWithoutSize())) + err := oracle.UploadPreimage(context.Background(), 0, data) + require.NoError(t, err) + require.Equal(t, 1, contract.initCalls) + require.Equal(t, 6, contract.addCalls) + require.Equal(t, data.GetPreimageWithoutSize(), contract.addData) + }) + + t.Run("AlreadyInitialized", func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + data := mockPreimageOracleData() + contract.initialized = true + contract.claimedSize = uint32(len(data.GetPreimageWithoutSize())) + err := oracle.UploadPreimage(context.Background(), 0, data) + require.NoError(t, err) + require.Equal(t, 0, contract.initCalls) + require.Equal(t, 6, contract.addCalls) + }) + + t.Run("ChallengePeriodNotElapsed", func(t *testing.T) { + oracle, cl, _, contract := newTestLargePreimageUploader(t) + data := mockPreimageOracleData() + contract.bytesProcessed = 5*MaxChunkSize + 1 + contract.claimedSize = uint32(len(data.GetPreimageWithoutSize())) + contract.timestamp = uint64(cl.Now().Unix()) + err := oracle.UploadPreimage(context.Background(), 0, data) + require.ErrorIs(t, err, ErrChallengePeriodNotOver) + require.Equal(t, 0, contract.squeezeCalls) + // Squeeze should be called once the challenge period has elapsed. + cl.AdvanceTime(time.Duration(mockChallengePeriod) * time.Second) + err = oracle.UploadPreimage(context.Background(), 0, data) + require.NoError(t, err) + require.Equal(t, 1, contract.squeezeCalls) + }) + + t.Run("SqueezeCallFails", func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + data := mockPreimageOracleData() + contract.bytesProcessed = 5*MaxChunkSize + 1 + contract.timestamp = 123 + contract.claimedSize = uint32(len(data.GetPreimageWithoutSize())) + contract.squeezeCallFails = true + err := oracle.UploadPreimage(context.Background(), 0, data) + require.ErrorIs(t, err, mockSqueezeCallError) + require.Equal(t, 0, contract.squeezeCalls) + }) + + t.Run("SqueezeFails", func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + data := mockPreimageOracleData() + contract.bytesProcessed = 5*MaxChunkSize + 1 + contract.timestamp = 123 + contract.claimedSize = uint32(len(data.GetPreimageWithoutSize())) + contract.squeezeFails = true + err := oracle.UploadPreimage(context.Background(), 0, data) + require.ErrorIs(t, err, mockSqueezeError) + require.Equal(t, 1, contract.squeezeCalls) + }) + + t.Run("AllBytesProcessed", func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + data := mockPreimageOracleData() + contract.bytesProcessed = 5*MaxChunkSize + 1 + contract.timestamp = 123 + contract.claimedSize = uint32(len(data.GetPreimageWithoutSize())) + err := oracle.UploadPreimage(context.Background(), 0, data) + require.NoError(t, err) + require.Equal(t, 0, contract.initCalls) + require.Equal(t, 0, contract.addCalls) + require.Empty(t, contract.addData) + }) +} + +func mockPreimageOracleData() *types.PreimageOracleData { + fullLeaf := make([]byte, keccakTypes.BlockSize) + for i := 0; i < keccakTypes.BlockSize; i++ { + fullLeaf[i] = byte(i) + } + oracleData := make([]byte, 5*MaxBlocksPerChunk) + for i := 0; i < 5*MaxBlocksPerChunk; i++ { + oracleData = append(oracleData, fullLeaf...) + } + // Add a single byte to the end to make sure the last leaf is not processed. + oracleData = append(oracleData, byte(1)) + return makePreimageData(oracleData, 0) +} + +func makePreimageData(pre []byte, offset uint32) *types.PreimageOracleData { + key := preimage.Keccak256Key(crypto.Keccak256Hash(pre)).PreimageKey() + // add the length prefix + preimage := make([]byte, 0, 8+len(pre)) + preimage = binary.BigEndian.AppendUint64(preimage, uint64(len(pre))) + preimage = append(preimage, pre...) + return types.NewPreimageOracleData(key[:], preimage, offset) +} + +func TestLargePreimageUploader_UploadPreimage_Succeeds(t *testing.T) { + fullLeaf := new([keccakTypes.BlockSize]byte) + for i := 0; i < keccakTypes.BlockSize; i++ { + fullLeaf[i] = byte(i) + } + chunk := make([]byte, 0, MaxChunkSize) + for i := 0; i < MaxBlocksPerChunk; i++ { + chunk = append(chunk, fullLeaf[:]...) + } + tests := []struct { + name string + input []byte + addCalls int + prestateLeaf keccakTypes.Leaf + poststateLeaf keccakTypes.Leaf + }{ + { + name: "FullLeaf", + input: fullLeaf[:], + addCalls: 1, + }, + { + name: "MultipleLeaves", + input: append(fullLeaf[:], append(fullLeaf[:], fullLeaf[:]...)...), + addCalls: 1, + }, + { + name: "MultipleLeavesUnaligned", + input: append(fullLeaf[:], append(fullLeaf[:], byte(9))...), + addCalls: 1, + }, + { + name: "MultipleChunks", + input: append(chunk, append(fullLeaf[:], fullLeaf[:]...)...), + addCalls: 2, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + oracle, _, _, contract := newTestLargePreimageUploader(t) + data := makePreimageData(test.input, 0) + err := oracle.UploadPreimage(context.Background(), 0, data) + require.NoError(t, err) + require.Equal(t, test.addCalls, contract.addCalls) + // There must always be at least one init and squeeze call + // for successful large preimage upload calls. + require.Equal(t, 1, contract.initCalls) + require.Equal(t, 1, contract.squeezeCalls) + + // Use the StateMatrix to determine the expected leaves so it includes padding correctly. + // We rely on the unit tests for StateMatrix to confirm that it does the right thing. + s := matrix.NewStateMatrix() + _, err = s.AbsorbUpTo(bytes.NewReader(test.input), keccakTypes.BlockSize*10000) + require.ErrorIs(t, err, io.EOF) + prestate, _ := s.PrestateWithProof() + poststate, _ := s.PoststateWithProof() + require.Equal(t, prestate, contract.squeezePrestate) + require.Equal(t, poststate, contract.squeezePoststate) + }) + } +} + +func newTestLargePreimageUploader(t *testing.T) (*LargePreimageUploader, *clock.AdvancingClock, *mockTxSender, *mockPreimageOracleContract) { + logger := testlog.Logger(t, log.LevelError) + cl := clock.NewAdvancingClock(time.Second) + cl.Start() + txSender := &mockTxSender{} + contract := &mockPreimageOracleContract{ + addData: make([]byte, 0), + } + return NewLargePreimageUploader(logger, cl, txSender, contract), cl, txSender, contract +} + +type mockPreimageOracleContract struct { + initCalls int + initFails bool + initialized bool + claimedSize uint32 + bytesProcessed int + timestamp uint64 + addCalls int + addFails bool + addData []byte + squeezeCalls int + squeezeFails bool + squeezeCallFails bool + squeezeCallClaimSize uint32 + squeezePrestate keccakTypes.Leaf + squeezePoststate keccakTypes.Leaf +} + +func (s *mockPreimageOracleContract) InitLargePreimage(_ *big.Int, _ uint32, _ uint32) (txmgr.TxCandidate, error) { + s.initCalls++ + if s.initFails { + return txmgr.TxCandidate{}, mockInitLPPError + } + return txmgr.TxCandidate{}, nil +} + +func (s *mockPreimageOracleContract) AddLeaves(_ *big.Int, _ *big.Int, input []byte, _ []common.Hash, _ bool) (txmgr.TxCandidate, error) { + s.addCalls++ + s.addData = append(s.addData, input...) + if s.addFails { + return txmgr.TxCandidate{}, mockAddLeavesError + } + return txmgr.TxCandidate{}, nil +} + +func (s *mockPreimageOracleContract) Squeeze(_ common.Address, _ *big.Int, _ keccakTypes.StateSnapshot, prestate keccakTypes.Leaf, _ merkle.Proof, poststate keccakTypes.Leaf, _ merkle.Proof) (txmgr.TxCandidate, error) { + s.squeezeCalls++ + s.squeezePrestate = prestate + s.squeezePoststate = poststate + if s.squeezeFails { + return txmgr.TxCandidate{}, mockSqueezeError + } + return txmgr.TxCandidate{}, nil +} + +func (s *mockPreimageOracleContract) ChallengePeriod(_ context.Context) (uint64, error) { + return mockChallengePeriod, nil +} + +func (s *mockPreimageOracleContract) GetProposalMetadata(_ context.Context, _ rpcblock.Block, idents ...keccakTypes.LargePreimageIdent) ([]keccakTypes.LargePreimageMetaData, error) { + if s.squeezeCallClaimSize > 0 { + metadata := make([]keccakTypes.LargePreimageMetaData, 0) + for _, ident := range idents { + metadata = append(metadata, keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: ident, + ClaimedSize: s.squeezeCallClaimSize, + BytesProcessed: uint32(s.bytesProcessed), + Timestamp: s.timestamp, + }) + } + return metadata, nil + } + if s.initialized || s.bytesProcessed > 0 { + metadata := make([]keccakTypes.LargePreimageMetaData, 0) + for _, ident := range idents { + metadata = append(metadata, keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: ident, + ClaimedSize: s.claimedSize, + BytesProcessed: uint32(s.bytesProcessed), + Timestamp: s.timestamp, + }) + } + return metadata, nil + } + s.squeezeCallClaimSize = 1 + return []keccakTypes.LargePreimageMetaData{{LargePreimageIdent: idents[0]}}, nil +} + +func (s *mockPreimageOracleContract) GetMinBondLPP(_ context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} + +func (s *mockPreimageOracleContract) CallSqueeze(_ context.Context, _ common.Address, _ *big.Int, _ keccakTypes.StateSnapshot, _ keccakTypes.Leaf, _ merkle.Proof, _ keccakTypes.Leaf, _ merkle.Proof) error { + if s.squeezeCallFails { + return mockSqueezeCallError + } + return nil +} diff --git a/op-challenger2/game/fault/preimages/split.go b/op-challenger2/game/fault/preimages/split.go new file mode 100644 index 000000000000..fd92f46f3997 --- /dev/null +++ b/op-challenger2/game/fault/preimages/split.go @@ -0,0 +1,33 @@ +package preimages + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" +) + +var _ PreimageUploader = (*SplitPreimageUploader)(nil) + +// SplitPreimageUploader routes preimage uploads to the appropriate uploader +// based on the size of the preimage. +type SplitPreimageUploader struct { + largePreimageSizeThreshold uint64 + directUploader PreimageUploader + largeUploader PreimageUploader +} + +func NewSplitPreimageUploader(directUploader PreimageUploader, largeUploader PreimageUploader, minLargePreimageSize uint64) *SplitPreimageUploader { + return &SplitPreimageUploader{minLargePreimageSize, directUploader, largeUploader} +} + +func (s *SplitPreimageUploader) UploadPreimage(ctx context.Context, parent uint64, data *types.PreimageOracleData) error { + if data == nil { + return ErrNilPreimageData + } + // Always route local preimage uploads to the direct uploader. + if data.IsLocal || uint64(len(data.GetPreimageWithoutSize())) < s.largePreimageSizeThreshold { + return s.directUploader.UploadPreimage(ctx, parent, data) + } else { + return s.largeUploader.UploadPreimage(ctx, parent, data) + } +} diff --git a/op-challenger2/game/fault/preimages/split_test.go b/op-challenger2/game/fault/preimages/split_test.go new file mode 100644 index 000000000000..bd2426e12e49 --- /dev/null +++ b/op-challenger2/game/fault/preimages/split_test.go @@ -0,0 +1,70 @@ +package preimages + +import ( + "context" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/stretchr/testify/require" +) + +var mockLargePreimageSizeThreshold = uint64(100) + +func TestSplitPreimageUploader_UploadPreimage(t *testing.T) { + t.Run("DirectUploadSucceeds", func(t *testing.T) { + oracle, direct, large := newTestSplitPreimageUploader(t, mockLargePreimageSizeThreshold) + err := oracle.UploadPreimage(context.Background(), 0, makePreimageData(nil, 0)) + require.NoError(t, err) + require.Equal(t, 1, direct.updates) + require.Equal(t, 0, large.updates) + }) + + t.Run("LocalDataUploadSucceeds", func(t *testing.T) { + oracle, direct, large := newTestSplitPreimageUploader(t, mockLargePreimageSizeThreshold) + err := oracle.UploadPreimage(context.Background(), 0, &types.PreimageOracleData{IsLocal: true}) + require.NoError(t, err) + require.Equal(t, 1, direct.updates) + require.Equal(t, 0, large.updates) + }) + + t.Run("MaxSizeDirectUploadSucceeds", func(t *testing.T) { + oracle, direct, large := newTestSplitPreimageUploader(t, mockLargePreimageSizeThreshold) + err := oracle.UploadPreimage(context.Background(), 0, makePreimageData(make([]byte, mockLargePreimageSizeThreshold-1), 0)) + require.NoError(t, err) + require.Equal(t, 1, direct.updates) + require.Equal(t, 0, large.updates) + }) + + t.Run("LargeUploadSucceeds", func(t *testing.T) { + oracle, direct, large := newTestSplitPreimageUploader(t, mockLargePreimageSizeThreshold) + err := oracle.UploadPreimage(context.Background(), 0, makePreimageData(make([]byte, mockLargePreimageSizeThreshold), 0)) + require.NoError(t, err) + require.Equal(t, 1, large.updates) + require.Equal(t, 0, direct.updates) + }) + + t.Run("NilPreimageOracleData", func(t *testing.T) { + oracle, _, _ := newTestSplitPreimageUploader(t, mockLargePreimageSizeThreshold) + err := oracle.UploadPreimage(context.Background(), 0, nil) + require.ErrorIs(t, err, ErrNilPreimageData) + }) +} + +type mockPreimageUploader struct { + updates int + uploadFails bool +} + +func (s *mockPreimageUploader) UploadPreimage(ctx context.Context, parent uint64, data *types.PreimageOracleData) error { + s.updates++ + if s.uploadFails { + return mockUpdateOracleTxError + } + return nil +} + +func newTestSplitPreimageUploader(t *testing.T, threshold uint64) (*SplitPreimageUploader, *mockPreimageUploader, *mockPreimageUploader) { + direct := &mockPreimageUploader{} + large := &mockPreimageUploader{} + return NewSplitPreimageUploader(direct, large, threshold), direct, large +} diff --git a/op-challenger2/game/fault/preimages/types.go b/op-challenger2/game/fault/preimages/types.go new file mode 100644 index 000000000000..872be288a9fd --- /dev/null +++ b/op-challenger2/game/fault/preimages/types.go @@ -0,0 +1,38 @@ +package preimages + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/merkle" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" +) + +var ErrNilPreimageData = fmt.Errorf("cannot upload nil preimage data") + +// PreimageUploader is responsible for posting preimages. +type PreimageUploader interface { + // UploadPreimage uploads the provided preimage. + UploadPreimage(ctx context.Context, claimIdx uint64, data *types.PreimageOracleData) error +} + +type TxSender interface { + From() common.Address + SendAndWaitSimple(txPurpose string, txs ...txmgr.TxCandidate) error +} + +// PreimageOracleContract is the interface for interacting with the PreimageOracle contract. +type PreimageOracleContract interface { + InitLargePreimage(uuid *big.Int, partOffset uint32, claimedSize uint32) (txmgr.TxCandidate, error) + AddLeaves(uuid *big.Int, startingBlockIndex *big.Int, input []byte, commitments []common.Hash, finalize bool) (txmgr.TxCandidate, error) + Squeeze(claimant common.Address, uuid *big.Int, prestateMatrix keccakTypes.StateSnapshot, preState keccakTypes.Leaf, preStateProof merkle.Proof, postState keccakTypes.Leaf, postStateProof merkle.Proof) (txmgr.TxCandidate, error) + CallSqueeze(ctx context.Context, claimant common.Address, uuid *big.Int, prestateMatrix keccakTypes.StateSnapshot, preState keccakTypes.Leaf, preStateProof merkle.Proof, postState keccakTypes.Leaf, postStateProof merkle.Proof) error + GetProposalMetadata(ctx context.Context, block rpcblock.Block, idents ...keccakTypes.LargePreimageIdent) ([]keccakTypes.LargePreimageMetaData, error) + ChallengePeriod(ctx context.Context) (uint64, error) + GetMinBondLPP(ctx context.Context) (*big.Int, error) +} diff --git a/op-challenger2/game/fault/register.go b/op-challenger2/game/fault/register.go new file mode 100644 index 000000000000..124b2668f482 --- /dev/null +++ b/op-challenger2/game/fault/register.go @@ -0,0 +1,379 @@ +package fault + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/claims" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/alphabet" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/asterisc" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/cannon" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/outputs" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/prestates" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + faultTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" +) + +type CloseFunc func() + +type Registry interface { + RegisterGameType(gameType uint32, creator scheduler.PlayerCreator) + RegisterBondContract(gameType uint32, creator claims.BondContractCreator) +} + +type OracleRegistry interface { + RegisterOracle(oracle keccakTypes.LargePreimageOracle) +} + +type PrestateSource interface { + // PrestatePath returns the path to the prestate file to use for the game. + // The provided prestateHash may be used to differentiate between different states but no guarantee is made that + // the returned prestate matches the supplied hash. + PrestatePath(prestateHash common.Hash) (string, error) +} + +type RollupClient interface { + outputs.OutputRollupClient + SyncStatusProvider +} + +func RegisterGameTypes( + ctx context.Context, + systemClock clock.Clock, + l1Clock faultTypes.ClockReader, + logger log.Logger, + m metrics.Metricer, + cfg *config.Config, + registry Registry, + oracles OracleRegistry, + rollupClient RollupClient, + txSender TxSender, + gameFactory *contracts.DisputeGameFactoryContract, + caller *batching.MultiCaller, + l1HeaderSource L1HeaderSource, + selective bool, + claimants []common.Address, +) (CloseFunc, error) { + l2Client, err := ethclient.DialContext(ctx, cfg.L2Rpc) + if err != nil { + return nil, fmt.Errorf("dial l2 client %v: %w", cfg.L2Rpc, err) + } + syncValidator := newSyncStatusValidator(rollupClient) + + if cfg.TraceTypeEnabled(config.TraceTypeCannon) { + if err := registerCannon(faultTypes.CannonGameType, registry, oracles, ctx, systemClock, l1Clock, logger, m, cfg, syncValidator, rollupClient, txSender, gameFactory, caller, l2Client, l1HeaderSource, selective, claimants); err != nil { + return nil, fmt.Errorf("failed to register cannon game type: %w", err) + } + } + if cfg.TraceTypeEnabled(config.TraceTypePermissioned) { + if err := registerCannon(faultTypes.PermissionedGameType, registry, oracles, ctx, systemClock, l1Clock, logger, m, cfg, syncValidator, rollupClient, txSender, gameFactory, caller, l2Client, l1HeaderSource, selective, claimants); err != nil { + return nil, fmt.Errorf("failed to register permissioned cannon game type: %w", err) + } + } + if cfg.TraceTypeEnabled(config.TraceTypeAsterisc) { + if err := registerAsterisc(faultTypes.AsteriscGameType, registry, oracles, ctx, systemClock, l1Clock, logger, m, cfg, syncValidator, rollupClient, txSender, gameFactory, caller, l2Client, l1HeaderSource, selective, claimants); err != nil { + return nil, fmt.Errorf("failed to register asterisc game type: %w", err) + } + } + if cfg.TraceTypeEnabled(config.TraceTypeAlphabet) { + if err := registerAlphabet(registry, oracles, ctx, systemClock, l1Clock, logger, m, syncValidator, rollupClient, l2Client, txSender, gameFactory, caller, l1HeaderSource, selective, claimants); err != nil { + return nil, fmt.Errorf("failed to register alphabet game type: %w", err) + } + } + return l2Client.Close, nil +} + +func registerAlphabet( + registry Registry, + oracles OracleRegistry, + ctx context.Context, + systemClock clock.Clock, + l1Clock faultTypes.ClockReader, + logger log.Logger, + m metrics.Metricer, + syncValidator SyncValidator, + rollupClient RollupClient, + l2Client utils.L2HeaderSource, + txSender TxSender, + gameFactory *contracts.DisputeGameFactoryContract, + caller *batching.MultiCaller, + l1HeaderSource L1HeaderSource, + selective bool, + claimants []common.Address, +) error { + playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { + contract, err := contracts.NewFaultDisputeGameContract(ctx, m, game.Proxy, caller) + if err != nil { + return nil, fmt.Errorf("failed to create fault dispute game contract: %w", err) + } + oracle, err := contract.GetOracle(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load oracle for game %v: %w", game.Proxy, err) + } + oracles.RegisterOracle(oracle) + prestateBlock, poststateBlock, err := contract.GetBlockRange(ctx) + if err != nil { + return nil, err + } + splitDepth, err := contract.GetSplitDepth(ctx) + if err != nil { + return nil, err + } + l1Head, err := loadL1Head(contract, ctx, l1HeaderSource) + if err != nil { + return nil, err + } + prestateProvider := outputs.NewPrestateProvider(rollupClient, prestateBlock) + creator := func(ctx context.Context, logger log.Logger, gameDepth faultTypes.Depth, dir string) (faultTypes.TraceAccessor, error) { + accessor, err := outputs.NewOutputAlphabetTraceAccessor(logger, m, prestateProvider, rollupClient, l2Client, l1Head, splitDepth, prestateBlock, poststateBlock) + if err != nil { + return nil, err + } + return accessor, nil + } + prestateValidator := NewPrestateValidator("alphabet", contract.GetAbsolutePrestateHash, alphabet.PrestateProvider) + startingValidator := NewPrestateValidator("output root", contract.GetStartingRootHash, prestateProvider) + return NewGamePlayer(ctx, systemClock, l1Clock, logger, m, dir, game.Proxy, txSender, contract, syncValidator, []Validator{prestateValidator, startingValidator}, creator, l1HeaderSource, selective, claimants) + } + err := registerOracle(ctx, m, oracles, gameFactory, caller, faultTypes.AlphabetGameType) + if err != nil { + return err + } + registry.RegisterGameType(faultTypes.AlphabetGameType, playerCreator) + + contractCreator := func(game types.GameMetadata) (claims.BondContract, error) { + return contracts.NewFaultDisputeGameContract(ctx, m, game.Proxy, caller) + } + registry.RegisterBondContract(faultTypes.AlphabetGameType, contractCreator) + return nil +} + +func registerOracle(ctx context.Context, m metrics.Metricer, oracles OracleRegistry, gameFactory *contracts.DisputeGameFactoryContract, caller *batching.MultiCaller, gameType uint32) error { + implAddr, err := gameFactory.GetGameImpl(ctx, gameType) + if err != nil { + return fmt.Errorf("failed to load implementation for game type %v: %w", gameType, err) + } + contract, err := contracts.NewFaultDisputeGameContract(ctx, m, implAddr, caller) + if err != nil { + return fmt.Errorf("failed to create fault dispute game contracts: %w", err) + } + oracle, err := contract.GetOracle(ctx) + if err != nil { + return fmt.Errorf("failed to load oracle address: %w", err) + } + oracles.RegisterOracle(oracle) + return nil +} + +func registerAsterisc( + gameType uint32, + registry Registry, + oracles OracleRegistry, + ctx context.Context, + systemClock clock.Clock, + l1Clock faultTypes.ClockReader, + logger log.Logger, + m metrics.Metricer, + cfg *config.Config, + syncValidator SyncValidator, + rollupClient outputs.OutputRollupClient, + txSender TxSender, + gameFactory *contracts.DisputeGameFactoryContract, + caller *batching.MultiCaller, + l2Client utils.L2HeaderSource, + l1HeaderSource L1HeaderSource, + selective bool, + claimants []common.Address, +) error { + var prestateSource PrestateSource + if cfg.AsteriscAbsolutePreStateBaseURL != nil { + prestateSource = prestates.NewMultiPrestateProvider(cfg.AsteriscAbsolutePreStateBaseURL, filepath.Join(cfg.Datadir, "asterisc-prestates")) + } else { + prestateSource = prestates.NewSinglePrestateSource(cfg.AsteriscAbsolutePreState) + } + prestateProviderCache := prestates.NewPrestateProviderCache(m, fmt.Sprintf("prestates-%v", gameType), func(prestateHash common.Hash) (faultTypes.PrestateProvider, error) { + prestatePath, err := prestateSource.PrestatePath(prestateHash) + if err != nil { + return nil, fmt.Errorf("required prestate %v not available: %w", prestateHash, err) + } + return asterisc.NewPrestateProvider(prestatePath), nil + }) + playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { + contract, err := contracts.NewFaultDisputeGameContract(ctx, m, game.Proxy, caller) + if err != nil { + return nil, fmt.Errorf("failed to create fault dispute game contracts: %w", err) + } + requiredPrestatehash, err := contract.GetAbsolutePrestateHash(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load prestate hash for game %v: %w", game.Proxy, err) + } + asteriscPrestateProvider, err := prestateProviderCache.GetOrCreate(requiredPrestatehash) + if err != nil { + return nil, fmt.Errorf("required prestate %v not available for game %v: %w", requiredPrestatehash, game.Proxy, err) + } + + oracle, err := contract.GetOracle(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load oracle for game %v: %w", game.Proxy, err) + } + oracles.RegisterOracle(oracle) + prestateBlock, poststateBlock, err := contract.GetBlockRange(ctx) + if err != nil { + return nil, err + } + splitDepth, err := contract.GetSplitDepth(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load split depth: %w", err) + } + l1HeadID, err := loadL1Head(contract, ctx, l1HeaderSource) + if err != nil { + return nil, err + } + prestateProvider := outputs.NewPrestateProvider(rollupClient, prestateBlock) + creator := func(ctx context.Context, logger log.Logger, gameDepth faultTypes.Depth, dir string) (faultTypes.TraceAccessor, error) { + asteriscPrestate, err := prestateSource.PrestatePath(requiredPrestatehash) + if err != nil { + return nil, fmt.Errorf("failed to get asterisc prestate: %w", err) + } + accessor, err := outputs.NewOutputAsteriscTraceAccessor(logger, m, cfg, l2Client, prestateProvider, asteriscPrestate, rollupClient, dir, l1HeadID, splitDepth, prestateBlock, poststateBlock) + if err != nil { + return nil, err + } + return accessor, nil + } + prestateValidator := NewPrestateValidator("asterisc", contract.GetAbsolutePrestateHash, asteriscPrestateProvider) + genesisValidator := NewPrestateValidator("output root", contract.GetStartingRootHash, prestateProvider) + return NewGamePlayer(ctx, systemClock, l1Clock, logger, m, dir, game.Proxy, txSender, contract, syncValidator, []Validator{prestateValidator, genesisValidator}, creator, l1HeaderSource, selective, claimants) + } + err := registerOracle(ctx, m, oracles, gameFactory, caller, gameType) + if err != nil { + return err + } + registry.RegisterGameType(gameType, playerCreator) + + contractCreator := func(game types.GameMetadata) (claims.BondContract, error) { + return contracts.NewFaultDisputeGameContract(ctx, m, game.Proxy, caller) + } + registry.RegisterBondContract(gameType, contractCreator) + return nil +} + +func registerCannon( + gameType uint32, + registry Registry, + oracles OracleRegistry, + ctx context.Context, + systemClock clock.Clock, + l1Clock faultTypes.ClockReader, + logger log.Logger, + m metrics.Metricer, + cfg *config.Config, + syncValidator SyncValidator, + rollupClient outputs.OutputRollupClient, + txSender TxSender, + gameFactory *contracts.DisputeGameFactoryContract, + caller *batching.MultiCaller, + l2Client utils.L2HeaderSource, + l1HeaderSource L1HeaderSource, + selective bool, + claimants []common.Address, +) error { + var prestateSource PrestateSource + if cfg.CannonAbsolutePreStateBaseURL != nil { + prestateSource = prestates.NewMultiPrestateProvider(cfg.CannonAbsolutePreStateBaseURL, filepath.Join(cfg.Datadir, "cannon-prestates")) + } else { + prestateSource = prestates.NewSinglePrestateSource(cfg.CannonAbsolutePreState) + } + prestateProviderCache := prestates.NewPrestateProviderCache(m, fmt.Sprintf("prestates-%v", gameType), func(prestateHash common.Hash) (faultTypes.PrestateProvider, error) { + prestatePath, err := prestateSource.PrestatePath(prestateHash) + if err != nil { + return nil, fmt.Errorf("required prestate %v not available: %w", prestateHash, err) + } + return cannon.NewPrestateProvider(prestatePath), nil + }) + playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { + contract, err := contracts.NewFaultDisputeGameContract(ctx, m, game.Proxy, caller) + if err != nil { + return nil, fmt.Errorf("failed to create fault dispute game contracts: %w", err) + } + requiredPrestatehash, err := contract.GetAbsolutePrestateHash(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load prestate hash for game %v: %w", game.Proxy, err) + } + + cannonPrestateProvider, err := prestateProviderCache.GetOrCreate(requiredPrestatehash) + + if err != nil { + return nil, fmt.Errorf("required prestate %v not available for game %v: %w", requiredPrestatehash, game.Proxy, err) + } + + oracle, err := contract.GetOracle(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load oracle for game %v: %w", game.Proxy, err) + } + oracles.RegisterOracle(oracle) + prestateBlock, poststateBlock, err := contract.GetBlockRange(ctx) + if err != nil { + return nil, err + } + splitDepth, err := contract.GetSplitDepth(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load split depth: %w", err) + } + l1HeadID, err := loadL1Head(contract, ctx, l1HeaderSource) + if err != nil { + return nil, err + } + prestateProvider := outputs.NewPrestateProvider(rollupClient, prestateBlock) + creator := func(ctx context.Context, logger log.Logger, gameDepth faultTypes.Depth, dir string) (faultTypes.TraceAccessor, error) { + cannonPrestate, err := prestateSource.PrestatePath(requiredPrestatehash) + if err != nil { + return nil, fmt.Errorf("failed to get cannon prestate: %w", err) + } + accessor, err := outputs.NewOutputCannonTraceAccessor(logger, m, cfg, l2Client, prestateProvider, cannonPrestate, rollupClient, dir, l1HeadID, splitDepth, prestateBlock, poststateBlock) + if err != nil { + return nil, err + } + return accessor, nil + } + prestateValidator := NewPrestateValidator("cannon", contract.GetAbsolutePrestateHash, cannonPrestateProvider) + startingValidator := NewPrestateValidator("output root", contract.GetStartingRootHash, prestateProvider) + return NewGamePlayer(ctx, systemClock, l1Clock, logger, m, dir, game.Proxy, txSender, contract, syncValidator, []Validator{prestateValidator, startingValidator}, creator, l1HeaderSource, selective, claimants) + } + err := registerOracle(ctx, m, oracles, gameFactory, caller, gameType) + if err != nil { + return err + } + registry.RegisterGameType(gameType, playerCreator) + + contractCreator := func(game types.GameMetadata) (claims.BondContract, error) { + return contracts.NewFaultDisputeGameContract(ctx, m, game.Proxy, caller) + } + registry.RegisterBondContract(gameType, contractCreator) + return nil +} + +func loadL1Head(contract contracts.FaultDisputeGameContract, ctx context.Context, l1HeaderSource L1HeaderSource) (eth.BlockID, error) { + l1Head, err := contract.GetL1Head(ctx) + if err != nil { + return eth.BlockID{}, fmt.Errorf("failed to load L1 head: %w", err) + } + l1Header, err := l1HeaderSource.HeaderByHash(ctx, l1Head) + if err != nil { + return eth.BlockID{}, fmt.Errorf("failed to load L1 header: %w", err) + } + return eth.HeaderBlockID(l1Header), nil +} diff --git a/op-challenger2/game/fault/responder/responder.go b/op-challenger2/game/fault/responder/responder.go new file mode 100644 index 000000000000..d12a6b9f6558 --- /dev/null +++ b/op-challenger2/game/fault/responder/responder.go @@ -0,0 +1,129 @@ +package responder + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/preimages" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +type GameContract interface { + CallResolve(ctx context.Context) (gameTypes.GameStatus, error) + ResolveTx() (txmgr.TxCandidate, error) + CallResolveClaim(ctx context.Context, claimIdx uint64) error + ResolveClaimTx(claimIdx uint64) (txmgr.TxCandidate, error) + AttackTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) + DefendTx(ctx context.Context, parent types.Claim, pivot common.Hash) (txmgr.TxCandidate, error) + StepTx(claimIdx uint64, isAttack bool, stateData []byte, proof []byte) (txmgr.TxCandidate, error) + ChallengeL2BlockNumberTx(challenge *types.InvalidL2BlockNumberChallenge) (txmgr.TxCandidate, error) +} + +type Oracle interface { + GlobalDataExists(ctx context.Context, data *types.PreimageOracleData) (bool, error) +} + +type TxSender interface { + SendAndWaitSimple(txPurpose string, txs ...txmgr.TxCandidate) error +} + +// FaultResponder implements the [Responder] interface to send onchain transactions. +type FaultResponder struct { + log log.Logger + sender TxSender + contract GameContract + uploader preimages.PreimageUploader + oracle Oracle +} + +// NewFaultResponder returns a new [FaultResponder]. +func NewFaultResponder(logger log.Logger, sender TxSender, contract GameContract, uploader preimages.PreimageUploader, oracle Oracle) (*FaultResponder, error) { + return &FaultResponder{ + log: logger, + sender: sender, + contract: contract, + uploader: uploader, + oracle: oracle, + }, nil +} + +// CallResolve determines if the resolve function on the fault dispute game contract +// would succeed. Returns the game status if the call would succeed, errors otherwise. +func (r *FaultResponder) CallResolve(ctx context.Context) (gameTypes.GameStatus, error) { + return r.contract.CallResolve(ctx) +} + +// Resolve executes a resolve transaction to resolve a fault dispute game. +func (r *FaultResponder) Resolve() error { + candidate, err := r.contract.ResolveTx() + if err != nil { + return err + } + + return r.sender.SendAndWaitSimple("resolve game", candidate) +} + +// CallResolveClaim determines if the resolveClaim function on the fault dispute game contract +// would succeed. +func (r *FaultResponder) CallResolveClaim(ctx context.Context, claimIdx uint64) error { + return r.contract.CallResolveClaim(ctx, claimIdx) +} + +// ResolveClaims executes resolveClaim transactions to resolve claims in a dispute game. +func (r *FaultResponder) ResolveClaims(claimIdxs ...uint64) error { + txs := make([]txmgr.TxCandidate, 0, len(claimIdxs)) + for _, claimIdx := range claimIdxs { + candidate, err := r.contract.ResolveClaimTx(claimIdx) + if err != nil { + return err + } + txs = append(txs, candidate) + } + return r.sender.SendAndWaitSimple("resolve claim", txs...) +} + +func (r *FaultResponder) PerformAction(ctx context.Context, action types.Action) error { + if action.OracleData != nil { + var preimageExists bool + var err error + if !action.OracleData.IsLocal { + preimageExists, err = r.oracle.GlobalDataExists(ctx, action.OracleData) + if err != nil { + return fmt.Errorf("failed to check if preimage exists: %w", err) + } + } + // Always upload local preimages + if !preimageExists { + err := r.uploader.UploadPreimage(ctx, uint64(action.ParentClaim.ContractIndex), action.OracleData) + if errors.Is(err, preimages.ErrChallengePeriodNotOver) { + r.log.Debug("Large Preimage Squeeze failed, challenge period not over") + return nil + } else if err != nil { + return fmt.Errorf("failed to upload preimage: %w", err) + } + } + } + var candidate txmgr.TxCandidate + var err error + switch action.Type { + case types.ActionTypeMove: + if action.IsAttack { + candidate, err = r.contract.AttackTx(ctx, action.ParentClaim, action.Value) + } else { + candidate, err = r.contract.DefendTx(ctx, action.ParentClaim, action.Value) + } + case types.ActionTypeStep: + candidate, err = r.contract.StepTx(uint64(action.ParentClaim.ContractIndex), action.IsAttack, action.PreState, action.ProofData) + case types.ActionTypeChallengeL2BlockNumber: + candidate, err = r.contract.ChallengeL2BlockNumberTx(action.InvalidL2BlockNumberChallenge) + } + if err != nil { + return err + } + return r.sender.SendAndWaitSimple("perform action", candidate) +} diff --git a/op-challenger2/game/fault/responder/responder_test.go b/op-challenger2/game/fault/responder/responder_test.go new file mode 100644 index 000000000000..65517bc188fd --- /dev/null +++ b/op-challenger2/game/fault/responder/responder_test.go @@ -0,0 +1,436 @@ +package responder + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/stretchr/testify/require" +) + +var ( + mockPreimageUploadErr = errors.New("mock preimage upload error") + mockSendError = errors.New("mock send error") + mockCallError = errors.New("mock call error") + mockOracleExistsError = errors.New("mock oracle exists error") +) + +// TestCallResolve tests the [Responder.CallResolve]. +func TestCallResolve(t *testing.T) { + t.Run("SendFails", func(t *testing.T) { + responder, _, contract, _, _ := newTestFaultResponder(t) + contract.callFails = true + status, err := responder.CallResolve(context.Background()) + require.ErrorIs(t, err, mockCallError) + require.Equal(t, gameTypes.GameStatusInProgress, status) + require.Equal(t, 0, contract.calls) + }) + + t.Run("Success", func(t *testing.T) { + responder, _, contract, _, _ := newTestFaultResponder(t) + status, err := responder.CallResolve(context.Background()) + require.NoError(t, err) + require.Equal(t, gameTypes.GameStatusInProgress, status) + require.Equal(t, 1, contract.calls) + }) +} + +// TestResolve tests the [Responder.Resolve] method. +func TestResolve(t *testing.T) { + t.Run("SendFails", func(t *testing.T) { + responder, mockTxMgr, _, _, _ := newTestFaultResponder(t) + mockTxMgr.sendFails = true + err := responder.Resolve() + require.ErrorIs(t, err, mockSendError) + require.Equal(t, 0, mockTxMgr.sends) + }) + + t.Run("Success", func(t *testing.T) { + responder, mockTxMgr, _, _, _ := newTestFaultResponder(t) + err := responder.Resolve() + require.NoError(t, err) + require.Equal(t, 1, mockTxMgr.sends) + }) +} + +func TestCallResolveClaim(t *testing.T) { + t.Run("SendFails", func(t *testing.T) { + responder, _, contract, _, _ := newTestFaultResponder(t) + contract.callFails = true + err := responder.CallResolveClaim(context.Background(), 0) + require.ErrorIs(t, err, mockCallError) + require.Equal(t, 0, contract.calls) + }) + + t.Run("Success", func(t *testing.T) { + responder, _, contract, _, _ := newTestFaultResponder(t) + err := responder.CallResolveClaim(context.Background(), 0) + require.NoError(t, err) + require.Equal(t, 1, contract.calls) + }) +} + +func TestResolveClaim(t *testing.T) { + t.Run("SendFails", func(t *testing.T) { + responder, mockTxMgr, _, _, _ := newTestFaultResponder(t) + mockTxMgr.sendFails = true + err := responder.ResolveClaims(0) + require.ErrorIs(t, err, mockSendError) + require.Equal(t, 0, mockTxMgr.sends) + }) + + t.Run("Success", func(t *testing.T) { + responder, mockTxMgr, _, _, _ := newTestFaultResponder(t) + err := responder.ResolveClaims(0) + require.NoError(t, err) + require.Equal(t, 1, mockTxMgr.sends) + }) + + t.Run("Multiple", func(t *testing.T) { + responder, mockTxMgr, _, _, _ := newTestFaultResponder(t) + err := responder.ResolveClaims(0, 1, 2, 3) + require.NoError(t, err) + require.Equal(t, 4, mockTxMgr.sends) + }) +} + +// TestRespond tests the [Responder.Respond] method. +func TestPerformAction(t *testing.T) { + t.Run("send fails", func(t *testing.T) { + responder, mockTxMgr, _, _, _ := newTestFaultResponder(t) + mockTxMgr.sendFails = true + err := responder.PerformAction(context.Background(), types.Action{ + Type: types.ActionTypeMove, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + Value: common.Hash{0xaa}, + }) + require.ErrorIs(t, err, mockSendError) + require.Equal(t, 0, mockTxMgr.sends) + }) + + t.Run("sends response", func(t *testing.T) { + responder, mockTxMgr, _, _, _ := newTestFaultResponder(t) + err := responder.PerformAction(context.Background(), types.Action{ + Type: types.ActionTypeMove, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + Value: common.Hash{0xaa}, + }) + require.NoError(t, err) + require.Equal(t, 1, mockTxMgr.sends) + }) + + t.Run("attack", func(t *testing.T) { + responder, mockTxMgr, contract, _, _ := newTestFaultResponder(t) + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + Value: common.Hash{0xaa}, + } + err := responder.PerformAction(context.Background(), action) + require.NoError(t, err) + + require.Len(t, mockTxMgr.sent, 1) + require.EqualValues(t, []interface{}{action.ParentClaim, action.Value}, contract.attackArgs) + require.Equal(t, ([]byte)("attack"), mockTxMgr.sent[0].TxData) + }) + + t.Run("defend", func(t *testing.T) { + responder, mockTxMgr, contract, _, _ := newTestFaultResponder(t) + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: false, + Value: common.Hash{0xaa}, + } + err := responder.PerformAction(context.Background(), action) + require.NoError(t, err) + + require.Len(t, mockTxMgr.sent, 1) + require.EqualValues(t, []interface{}{action.ParentClaim, action.Value}, contract.defendArgs) + require.Equal(t, ([]byte)("defend"), mockTxMgr.sent[0].TxData) + }) + + t.Run("step", func(t *testing.T) { + responder, mockTxMgr, contract, _, _ := newTestFaultResponder(t) + action := types.Action{ + Type: types.ActionTypeStep, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + PreState: []byte{1, 2, 3}, + ProofData: []byte{4, 5, 6}, + } + err := responder.PerformAction(context.Background(), action) + require.NoError(t, err) + + require.Len(t, mockTxMgr.sent, 1) + require.EqualValues(t, []interface{}{uint64(123), action.IsAttack, action.PreState, action.ProofData}, contract.stepArgs) + require.Equal(t, ([]byte)("step"), mockTxMgr.sent[0].TxData) + }) + + t.Run("stepWithLocalOracleData", func(t *testing.T) { + responder, mockTxMgr, contract, uploader, oracle := newTestFaultResponder(t) + action := types.Action{ + Type: types.ActionTypeStep, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + PreState: []byte{1, 2, 3}, + ProofData: []byte{4, 5, 6}, + OracleData: &types.PreimageOracleData{ + IsLocal: true, + }, + } + err := responder.PerformAction(context.Background(), action) + require.NoError(t, err) + + require.Len(t, mockTxMgr.sent, 1) + require.Nil(t, contract.updateOracleArgs) // mock uploader returns nil + require.Equal(t, ([]byte)("step"), mockTxMgr.sent[0].TxData) + require.Equal(t, 1, uploader.updates) + require.Equal(t, 0, oracle.existCalls) + }) + + t.Run("stepWithGlobalOracleData", func(t *testing.T) { + responder, mockTxMgr, contract, uploader, oracle := newTestFaultResponder(t) + action := types.Action{ + Type: types.ActionTypeStep, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + PreState: []byte{1, 2, 3}, + ProofData: []byte{4, 5, 6}, + OracleData: &types.PreimageOracleData{ + IsLocal: false, + }, + } + err := responder.PerformAction(context.Background(), action) + require.NoError(t, err) + + require.Len(t, mockTxMgr.sent, 1) + require.Nil(t, contract.updateOracleArgs) // mock uploader returns nil + require.Equal(t, ([]byte)("step"), mockTxMgr.sent[0].TxData) + require.Equal(t, 1, uploader.updates) + require.Equal(t, 1, oracle.existCalls) + }) + + t.Run("stepWithOracleDataAndUploadFails", func(t *testing.T) { + responder, mockTxMgr, contract, uploader, _ := newTestFaultResponder(t) + uploader.uploadFails = true + action := types.Action{ + Type: types.ActionTypeStep, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + PreState: []byte{1, 2, 3}, + ProofData: []byte{4, 5, 6}, + OracleData: &types.PreimageOracleData{ + IsLocal: true, + }, + } + err := responder.PerformAction(context.Background(), action) + require.ErrorIs(t, err, mockPreimageUploadErr) + require.Len(t, mockTxMgr.sent, 0) + require.Nil(t, contract.updateOracleArgs) // mock uploader returns nil + require.Equal(t, 1, uploader.updates) + }) + + t.Run("stepWithOracleDataAndGlobalPreimageAlreadyExists", func(t *testing.T) { + responder, mockTxMgr, contract, uploader, oracle := newTestFaultResponder(t) + oracle.existsResult = true + action := types.Action{ + Type: types.ActionTypeStep, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + PreState: []byte{1, 2, 3}, + ProofData: []byte{4, 5, 6}, + OracleData: &types.PreimageOracleData{ + IsLocal: false, + }, + } + err := responder.PerformAction(context.Background(), action) + require.Nil(t, err) + require.Len(t, mockTxMgr.sent, 1) + require.Nil(t, contract.updateOracleArgs) // mock uploader returns nil + require.Equal(t, 0, uploader.updates) + require.Equal(t, 1, oracle.existCalls) + }) + + t.Run("stepWithOracleDataAndGlobalPreimageExistsFails", func(t *testing.T) { + responder, mockTxMgr, contract, uploader, oracle := newTestFaultResponder(t) + oracle.existsFails = true + action := types.Action{ + Type: types.ActionTypeStep, + ParentClaim: types.Claim{ContractIndex: 123}, + IsAttack: true, + PreState: []byte{1, 2, 3}, + ProofData: []byte{4, 5, 6}, + OracleData: &types.PreimageOracleData{ + IsLocal: false, + }, + } + err := responder.PerformAction(context.Background(), action) + require.ErrorIs(t, err, mockOracleExistsError) + require.Len(t, mockTxMgr.sent, 0) + require.Nil(t, contract.updateOracleArgs) // mock uploader returns nil + require.Equal(t, 0, uploader.updates) + require.Equal(t, 1, oracle.existCalls) + }) + + t.Run("challengeL2Block", func(t *testing.T) { + responder, mockTxMgr, contract, _, _ := newTestFaultResponder(t) + challenge := &types.InvalidL2BlockNumberChallenge{} + action := types.Action{ + Type: types.ActionTypeChallengeL2BlockNumber, + InvalidL2BlockNumberChallenge: challenge, + } + err := responder.PerformAction(context.Background(), action) + require.NoError(t, err) + require.Len(t, mockTxMgr.sent, 1) + require.Equal(t, []interface{}{challenge}, contract.challengeArgs) + }) +} + +func newTestFaultResponder(t *testing.T) (*FaultResponder, *mockTxManager, *mockContract, *mockPreimageUploader, *mockOracle) { + log := testlog.Logger(t, log.LevelError) + mockTxMgr := &mockTxManager{} + contract := &mockContract{} + uploader := &mockPreimageUploader{} + oracle := &mockOracle{} + responder, err := NewFaultResponder(log, mockTxMgr, contract, uploader, oracle) + require.NoError(t, err) + return responder, mockTxMgr, contract, uploader, oracle +} + +type mockPreimageUploader struct { + updates int + uploadFails bool +} + +func (m *mockPreimageUploader) UploadPreimage(ctx context.Context, parent uint64, data *types.PreimageOracleData) error { + m.updates++ + if m.uploadFails { + return mockPreimageUploadErr + } + return nil +} + +type mockOracle struct { + existCalls int + existsResult bool + existsFails bool +} + +func (m *mockOracle) GlobalDataExists(ctx context.Context, data *types.PreimageOracleData) (bool, error) { + m.existCalls++ + if m.existsFails { + return false, mockOracleExistsError + } + return m.existsResult, nil +} + +type mockTxManager struct { + from common.Address + sends int + sent []txmgr.TxCandidate + sendFails bool +} + +func (m *mockTxManager) SendAndWaitSimple(_ string, txs ...txmgr.TxCandidate) error { + for _, tx := range txs { + if m.sendFails { + return mockSendError + } + m.sends++ + m.sent = append(m.sent, tx) + } + return nil +} + +func (m *mockTxManager) BlockNumber(_ context.Context) (uint64, error) { + panic("not implemented") +} + +func (m *mockTxManager) From() common.Address { + return m.from +} + +func (m *mockTxManager) Close() { +} + +type mockContract struct { + calls int + callFails bool + attackArgs []interface{} + defendArgs []interface{} + stepArgs []interface{} + challengeArgs []interface{} + updateOracleClaimIdx uint64 + updateOracleArgs *types.PreimageOracleData +} + +func (m *mockContract) CallResolve(_ context.Context) (gameTypes.GameStatus, error) { + if m.callFails { + return gameTypes.GameStatusInProgress, mockCallError + } + m.calls++ + return gameTypes.GameStatusInProgress, nil +} + +func (m *mockContract) ResolveTx() (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, nil +} + +func (m *mockContract) CallResolveClaim(_ context.Context, _ uint64) error { + if m.callFails { + return mockCallError + } + m.calls++ + return nil +} + +func (m *mockContract) ResolveClaimTx(_ uint64) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, nil +} + +func (m *mockContract) ChallengeL2BlockNumberTx(challenge *types.InvalidL2BlockNumberChallenge) (txmgr.TxCandidate, error) { + m.challengeArgs = []interface{}{challenge} + return txmgr.TxCandidate{TxData: ([]byte)("challenge")}, nil +} + +func (m *mockContract) AttackTx(_ context.Context, parent types.Claim, claim common.Hash) (txmgr.TxCandidate, error) { + m.attackArgs = []interface{}{parent, claim} + return txmgr.TxCandidate{TxData: ([]byte)("attack")}, nil +} + +func (m *mockContract) DefendTx(_ context.Context, parent types.Claim, claim common.Hash) (txmgr.TxCandidate, error) { + m.defendArgs = []interface{}{parent, claim} + return txmgr.TxCandidate{TxData: ([]byte)("defend")}, nil +} + +func (m *mockContract) StepTx(claimIdx uint64, isAttack bool, stateData []byte, proofData []byte) (txmgr.TxCandidate, error) { + m.stepArgs = []interface{}{claimIdx, isAttack, stateData, proofData} + return txmgr.TxCandidate{TxData: ([]byte)("step")}, nil +} + +func (m *mockContract) UpdateOracleTx(_ context.Context, claimIdx uint64, data *types.PreimageOracleData) (txmgr.TxCandidate, error) { + m.updateOracleClaimIdx = claimIdx + m.updateOracleArgs = data + return txmgr.TxCandidate{TxData: ([]byte)("updateOracle")}, nil +} + +func (m *mockContract) GetCredit(_ context.Context, _ common.Address) (*big.Int, error) { + return big.NewInt(5), nil +} + +func (m *mockContract) ClaimCredit(_ common.Address) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{TxData: ([]byte)("claimCredit")}, nil +} diff --git a/op-challenger2/game/fault/solver/actors.go b/op-challenger2/game/fault/solver/actors.go new file mode 100644 index 000000000000..9ba26732544e --- /dev/null +++ b/op-challenger2/game/fault/solver/actors.go @@ -0,0 +1,136 @@ +package solver + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" +) + +type actor interface { + Apply(t *testing.T, game types.Game, correctTrace types.TraceProvider) (types.Game, bool) +} + +type actorFn func(t *testing.T, game types.Game, correctTrace types.TraceProvider) (types.Game, bool) + +func (a actorFn) Apply(t *testing.T, game types.Game, correctTrace types.TraceProvider) (types.Game, bool) { + return a(t, game, correctTrace) +} + +type builderFn func(builder *test.GameBuilder) bool + +func (a builderFn) Apply(t *testing.T, game types.Game, correctTrace types.TraceProvider) (types.Game, bool) { + builder := test.NewGameBuilderFromGame(t, correctTrace, game) + done := a(builder) + return builder.Game, done +} + +func combineActors(actors ...actor) actor { + return actorFn(func(t *testing.T, game types.Game, correctTrace types.TraceProvider) (types.Game, bool) { + done := true + for _, actor := range actors { + newGame, actorDone := actor.Apply(t, game, correctTrace) + game = newGame + done = done && actorDone + } + return game, done + }) +} + +var doNothingActor builderFn = func(builder *test.GameBuilder) bool { + return true +} + +var correctAttackLastClaim = respondLastClaim(func(seq *test.GameBuilderSeq) { + seq.Attack() +}) + +var correctDefendLastClaim = respondLastClaim(func(seq *test.GameBuilderSeq) { + if seq.IsRoot() { + // Must attack the root + seq.Attack() + } else { + seq.Defend() + } +}) + +var incorrectAttackLastClaim = respondLastClaim(func(seq *test.GameBuilderSeq) { + seq.Attack(test.WithValue(common.Hash{0xaa})) +}) + +var incorrectDefendLastClaim = respondLastClaim(func(seq *test.GameBuilderSeq) { + if seq.IsRoot() { + // Must attack the root + seq.Attack(test.WithValue(common.Hash{0xdd})) + } else { + seq.Defend(test.WithValue(common.Hash{0xdd})) + } +}) + +var attackEverythingCorrect = respondAllClaims(func(seq *test.GameBuilderSeq) { + seq.Attack() +}) + +var defendEverythingCorrect = respondAllClaims(func(seq *test.GameBuilderSeq) { + if seq.IsRoot() { + // Must attack root + seq.Attack() + } else { + seq.Defend() + } +}) + +var attackEverythingIncorrect = respondAllClaims(func(seq *test.GameBuilderSeq) { + seq.Attack(test.WithValue(common.Hash{0xaa})) +}) + +var defendEverythingIncorrect = respondAllClaims(func(seq *test.GameBuilderSeq) { + if seq.IsRoot() { + // Must attack root + seq.Attack(test.WithValue(common.Hash{0xbb})) + } else { + seq.Defend(test.WithValue(common.Hash{0xbb})) + } +}) + +var exhaustive = respondAllClaims(func(seq *test.GameBuilderSeq) { + seq.Attack() + seq.Attack(test.WithValue(common.Hash{0xaa})) + if !seq.IsRoot() { + seq.Defend() + seq.Defend(test.WithValue(common.Hash{0xdd})) + } +}) + +func respondLastClaim(respond func(seq *test.GameBuilderSeq)) builderFn { + return func(builder *test.GameBuilder) bool { + seq := seqFromLastClaim(builder) + if seq.IsMaxDepth() { + // Can't counter the leaf claim + return true + } + respond(seq) + return false + } +} + +func respondAllClaims(respond func(seq *test.GameBuilderSeq)) builderFn { + return func(builder *test.GameBuilder) bool { + startingCount := len(builder.Game.Claims()) + for _, claim := range builder.Game.Claims() { + if claim.Depth() == builder.Game.MaxDepth() { + continue + } + respond(builder.SeqFrom(claim)) + } + finalCount := len(builder.Game.Claims()) + return finalCount == startingCount + } +} + +func seqFromLastClaim(builder *test.GameBuilder) *test.GameBuilderSeq { + claims := builder.Game.Claims() + claim := claims[len(claims)-1] + return builder.SeqFrom(claim) +} diff --git a/op-challenger2/game/fault/solver/game_rules_test.go b/op-challenger2/game/fault/solver/game_rules_test.go new file mode 100644 index 000000000000..af6845ccf5eb --- /dev/null +++ b/op-challenger2/game/fault/solver/game_rules_test.go @@ -0,0 +1,130 @@ +package solver + +import ( + "testing" + + faultTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-dispute-mon/mon" + "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/transform" + disputeTypes "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func verifyGameRules(t *testing.T, game types.Game, rootClaimCorrect bool) { + actualResult, claimTree, resolvedGame := gameResult(game) + + verifyExpectedGameResult(t, rootClaimCorrect, actualResult) + + verifyNoChallengerClaimsWereSuccessfullyCountered(t, resolvedGame) + verifyChallengerAlwaysWinsParentBond(t, resolvedGame) + verifyChallengerNeverCountersAClaimTwice(t, claimTree) +} + +// verifyExpectedGameResult verifies that valid output roots are successfully defended and invalid roots are challenged +// Rationale: Ensures the game always and only allows valid output roots to be finalized. +func verifyExpectedGameResult(t *testing.T, rootClaimCorrect bool, actualResult gameTypes.GameStatus) { + expectedResult := gameTypes.GameStatusChallengerWon + if rootClaimCorrect { + expectedResult = gameTypes.GameStatusDefenderWon + } + require.Equalf(t, expectedResult, actualResult, "Game should resolve correctly expected %v but was %v", expectedResult, actualResult) +} + +// verifyNoChallengerClaimsWereSuccessfullyCountered verifies the challenger didn't lose any of its bonds +// Note that this also forbids the challenger losing a bond to itself since it shouldn't challenge its own claims +// Rationale: If honest actors lose their bond, it indicates that incentive compatibility is broken because honest actors +// lose money. +func verifyNoChallengerClaimsWereSuccessfullyCountered(t *testing.T, resolvedGame types.Game) { + for _, claim := range resolvedGame.Claims() { + if claim.Claimant != challengerAddr { + continue + } + if claim.CounteredBy != (common.Address{}) { + t.Fatalf("Challenger posted claim %v but it was countered by someone else:\n%v", claim.ContractIndex, printClaim(claim, resolvedGame)) + } + } +} + +// verifyChallengerAlwaysWinsParentBond verifies that the challenger is always allocated the bond of any parent claim it +// counters. +// Rationale: If an honest action does not win the bond for countering a claim, incentive compatibility is broken because +// honest actors are not being paid to perform their job (or the challenger is posting unnecessary claims) +func verifyChallengerAlwaysWinsParentBond(t *testing.T, resolvedGame types.Game) { + for _, claim := range resolvedGame.Claims() { + if claim.Claimant != challengerAddr { + continue + } + parent, err := resolvedGame.GetParent(claim) + require.NoErrorf(t, err, "Failed to get parent of claim %v", claim.ContractIndex) + require.Equal(t, challengerAddr, parent.CounteredBy, + "Expected claim %v to have challenger as its claimant because of counter claim %v", parent.ContractIndex, claim.ContractIndex) + } +} + +// verifyChallengerNeverCountersAClaimTwice verifies that the challenger never posts more than one counter to a claim +// Rationale: The parent claim bond is only intended to cover costs of a single counter claim so incentive compatibility +// is broken if the challenger needs to post multiple claims. Or if the claim wasn't required, the challenger is just +// wasting money posting unnecessary claims. +func verifyChallengerNeverCountersAClaimTwice(t *testing.T, tree *disputeTypes.BidirectionalTree) { + for _, claim := range tree.Claims { + challengerCounterCount := 0 + for _, child := range claim.Children { + if child.Claim.Claimant != challengerAddr { + continue + } + challengerCounterCount++ + } + require.LessOrEqualf(t, challengerCounterCount, 1, "Found multiple honest counters to claim %v", claim.Claim.ContractIndex) + } +} + +func enrichClaims(claims []types.Claim) []disputeTypes.EnrichedClaim { + enriched := make([]disputeTypes.EnrichedClaim, len(claims)) + for i, claim := range claims { + castedClaim := faultTypes.Claim{ + ClaimData: faultTypes.ClaimData{ + Value: claim.ClaimData.Value, + Bond: claim.ClaimData.Bond, + Position: faultTypes.NewPosition(faultTypes.Depth(claim.ClaimData.Position.Depth()), claim.ClaimData.Position.IndexAtDepth()), + }, + CounteredBy: claim.CounteredBy, + Claimant: claim.Claimant, + Clock: faultTypes.Clock{ + Duration: claim.Clock.Duration, + Timestamp: claim.Clock.Timestamp, + }, + ContractIndex: claim.ContractIndex, + ParentContractIndex: claim.ParentContractIndex, + } + enriched[i] = disputeTypes.EnrichedClaim{Claim: castedClaim} + } + return enriched +} + +func gameResult(game types.Game) (gameTypes.GameStatus, *disputeTypes.BidirectionalTree, types.Game) { + tree := transform.CreateBidirectionalTree(enrichClaims(game.Claims())) + result := mon.Resolve(tree) + resolvedClaims := make([]types.Claim, 0, len(tree.Claims)) + for _, claim := range tree.Claims { + castedClaim := types.Claim{ + ClaimData: types.ClaimData{ + Value: claim.Claim.ClaimData.Value, + Bond: claim.Claim.ClaimData.Bond, + Position: types.NewPosition(types.Depth(claim.Claim.ClaimData.Position.Depth()), claim.Claim.ClaimData.Position.IndexAtDepth()), + }, + CounteredBy: claim.Claim.CounteredBy, + Claimant: claim.Claim.Claimant, + Clock: types.Clock{ + Duration: claim.Claim.Clock.Duration, + Timestamp: claim.Claim.Clock.Timestamp, + }, + ContractIndex: claim.Claim.ContractIndex, + ParentContractIndex: claim.Claim.ParentContractIndex, + } + resolvedClaims = append(resolvedClaims, castedClaim) + } + return gameTypes.GameStatusToOPChallenger2GameStatus(result), tree, types.NewGameState(resolvedClaims, game.MaxDepth()) +} diff --git a/op-challenger2/game/fault/solver/game_solver.go b/op-challenger2/game/fault/solver/game_solver.go new file mode 100644 index 000000000000..60951c64af2c --- /dev/null +++ b/op-challenger2/game/fault/solver/game_solver.go @@ -0,0 +1,115 @@ +package solver + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" +) + +type GameSolver struct { + claimSolver *claimSolver +} + +func NewGameSolver(gameDepth types.Depth, trace types.TraceAccessor) *GameSolver { + return &GameSolver{ + claimSolver: newClaimSolver(gameDepth, trace), + } +} + +func (s *GameSolver) AgreeWithRootClaim(ctx context.Context, game types.Game) (bool, error) { + return s.claimSolver.agreeWithClaim(ctx, game, game.Claims()[0]) +} + +func (s *GameSolver) CalculateNextActions(ctx context.Context, game types.Game) ([]types.Action, error) { + agreeWithRootClaim, err := s.AgreeWithRootClaim(ctx, game) + if err != nil { + return nil, fmt.Errorf("failed to determine if root claim is correct: %w", err) + } + + // Challenging the L2 block number will only work if we have the same output root as the claim + // Otherwise our output root preimage won't match. We can just proceed and invalidate the output root by disputing claims instead. + if agreeWithRootClaim { + if challenge, err := s.claimSolver.trace.GetL2BlockNumberChallenge(ctx, game); errors.Is(err, types.ErrL2BlockNumberValid) { + // We agree with the L2 block number, proceed to processing claims + } else if err != nil { + // Failed to check L2 block validity + return nil, fmt.Errorf("failed to determine L2 block validity: %w", err) + } else { + return []types.Action{ + { + Type: types.ActionTypeChallengeL2BlockNumber, + InvalidL2BlockNumberChallenge: challenge, + }, + }, nil + } + } + + var actions []types.Action + agreedClaims := newHonestClaimTracker() + if agreeWithRootClaim { + agreedClaims.AddHonestClaim(types.Claim{}, game.Claims()[0]) + } + for _, claim := range game.Claims() { + var action *types.Action + if claim.Depth() == game.MaxDepth() { + action, err = s.calculateStep(ctx, game, claim, agreedClaims) + } else { + action, err = s.calculateMove(ctx, game, claim, agreedClaims) + } + if err != nil { + // Unable to continue iterating claims safely because we may not have tracked the required honest moves + // for this claim which affects the response to later claims. + // Any actions we've already identified are still safe to apply. + return actions, fmt.Errorf("failed to determine response to claim %v: %w", claim.ContractIndex, err) + } + if action == nil { + continue + } + actions = append(actions, *action) + } + return actions, nil +} + +func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim types.Claim, agreedClaims *honestClaimTracker) (*types.Action, error) { + if claim.CounteredBy != (common.Address{}) { + return nil, nil + } + step, err := s.claimSolver.AttemptStep(ctx, game, claim, agreedClaims) + if err != nil { + return nil, err + } + if step == nil { + return nil, nil + } + return &types.Action{ + Type: types.ActionTypeStep, + ParentClaim: step.LeafClaim, + IsAttack: step.IsAttack, + PreState: step.PreState, + ProofData: step.ProofData, + OracleData: step.OracleData, + }, nil +} + +func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, claim types.Claim, honestClaims *honestClaimTracker) (*types.Action, error) { + move, err := s.claimSolver.NextMove(ctx, claim, game, honestClaims) + if err != nil { + return nil, fmt.Errorf("failed to calculate next move for claim index %v: %w", claim.ContractIndex, err) + } + if move == nil { + return nil, nil + } + honestClaims.AddHonestClaim(claim, *move) + if game.IsDuplicate(*move) { + return nil, nil + } + return &types.Action{ + Type: types.ActionTypeMove, + IsAttack: !game.DefendsParent(*move), + ParentClaim: game.Claims()[move.ParentContractIndex], + Value: move.Value, + }, nil +} diff --git a/op-challenger2/game/fault/solver/game_solver_test.go b/op-challenger2/game/fault/solver/game_solver_test.go new file mode 100644 index 000000000000..e12dd305cce1 --- /dev/null +++ b/op-challenger2/game/fault/solver/game_solver_test.go @@ -0,0 +1,360 @@ +package solver + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "testing" + + faulttest "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestCalculateNextActions_ChallengeL2BlockNumber(t *testing.T) { + startingBlock := big.NewInt(5) + maxDepth := types.Depth(6) + challenge := &types.InvalidL2BlockNumberChallenge{ + Output: ð.OutputResponse{OutputRoot: eth.Bytes32{0xbb}}, + } + claimBuilder := faulttest.NewAlphabetClaimBuilder(t, startingBlock, maxDepth) + traceProvider := faulttest.NewAlphabetWithProofProvider(t, startingBlock, maxDepth, nil) + solver := NewGameSolver(maxDepth, trace.NewSimpleTraceAccessor(traceProvider)) + + // Do not challenge when provider returns error indicating l2 block is valid + actions, err := solver.CalculateNextActions(context.Background(), claimBuilder.GameBuilder().Game) + require.NoError(t, err) + require.Len(t, actions, 0) + + // Do challenge when the provider returns a challenge + traceProvider.L2BlockChallenge = challenge + actions, err = solver.CalculateNextActions(context.Background(), claimBuilder.GameBuilder().Game) + require.NoError(t, err) + require.Len(t, actions, 1) + action := actions[0] + require.Equal(t, types.ActionTypeChallengeL2BlockNumber, action.Type) + require.Equal(t, challenge, action.InvalidL2BlockNumberChallenge) +} + +func TestCalculateNextActions(t *testing.T) { + maxDepth := types.Depth(6) + startingL2BlockNumber := big.NewInt(0) + claimBuilder := faulttest.NewAlphabetClaimBuilder(t, startingL2BlockNumber, maxDepth) + + tests := []struct { + name string + rootClaimCorrect bool + setupGame func(builder *faulttest.GameBuilder) + }{ + { + name: "AttackRootClaim", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq().ExpectAttack() + }, + }, + { + name: "DoNotAttackCorrectRootClaim_AgreeWithOutputRoot", + rootClaimCorrect: true, + setupGame: func(builder *faulttest.GameBuilder) {}, + }, + { + name: "DoNotPerformDuplicateMoves", + setupGame: func(builder *faulttest.GameBuilder) { + // Expected move has already been made. + builder.Seq().Attack() + }, + }, + { + name: "RespondToAllClaimsAtDisagreeingLevel", + setupGame: func(builder *faulttest.GameBuilder) { + honestClaim := builder.Seq().Attack() + honestClaim.Attack().ExpectDefend() + honestClaim.Defend().ExpectDefend() + honestClaim.Attack(faulttest.WithValue(common.Hash{0xaa})).ExpectAttack() + honestClaim.Attack(faulttest.WithValue(common.Hash{0xbb})).ExpectAttack() + honestClaim.Defend(faulttest.WithValue(common.Hash{0xcc})).ExpectAttack() + honestClaim.Defend(faulttest.WithValue(common.Hash{0xdd})).ExpectAttack() + }, + }, + { + name: "StepAtMaxDepth", + setupGame: func(builder *faulttest.GameBuilder) { + lastHonestClaim := builder.Seq(). + Attack(). + Attack(). + Defend(). + Defend(). + Defend() + lastHonestClaim.Attack().ExpectStepDefend() + lastHonestClaim.Attack(faulttest.WithValue(common.Hash{0xdd})).ExpectStepAttack() + }, + }, + { + name: "PoisonedPreState", + setupGame: func(builder *faulttest.GameBuilder) { + // A claim hash that has no pre-image + maliciousStateHash := common.Hash{0x01, 0xaa} + + // Dishonest actor counters their own claims to set up a situation with an invalid prestate + // The honest actor should ignore path created by the dishonest actor, only supporting its own attack on the root claim + honestMove := builder.Seq().Attack() // This expected action is the winning move. + dishonestMove := honestMove.Attack(faulttest.WithValue(maliciousStateHash)) + // The expected action by the honest actor + dishonestMove.ExpectAttack() + // The honest actor will ignore this poisoned path + dishonestMove. + Defend(faulttest.WithValue(maliciousStateHash)). + Attack(faulttest.WithValue(maliciousStateHash)) + }, + }, + { + name: "Freeloader-ValidClaimAtInvalidAttackPosition", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + Attack(). // Honest response to invalid root + Defend().ExpectDefend(). // Defender agrees at this point, we should defend + Attack().ExpectDefend() // Freeloader attacks instead of defends + }, + }, + { + name: "Freeloader-InvalidClaimAtInvalidAttackPosition", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + Attack(). // Honest response to invalid root + Defend().ExpectDefend(). // Defender agrees at this point, we should defend + Attack(faulttest.WithValue(common.Hash{0xbb})).ExpectAttack() // Freeloader attacks with wrong claim instead of defends + }, + }, + { + name: "Freeloader-InvalidClaimAtValidDefensePosition", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + Attack(). // Honest response to invalid root + Defend().ExpectDefend(). // Defender agrees at this point, we should defend + Defend(faulttest.WithValue(common.Hash{0xbb})).ExpectAttack() // Freeloader defends with wrong claim, we should attack + }, + }, + { + name: "Freeloader-InvalidClaimAtValidAttackPosition", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + Attack(). // Honest response to invalid root + Defend(faulttest.WithValue(common.Hash{0xaa})).ExpectAttack(). // Defender disagrees at this point, we should attack + Attack(faulttest.WithValue(common.Hash{0xbb})).ExpectAttack() // Freeloader attacks with wrong claim instead of defends + }, + }, + { + name: "Freeloader-InvalidClaimAtInvalidDefensePosition", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + Attack(). // Honest response to invalid root + Defend(faulttest.WithValue(common.Hash{0xaa})).ExpectAttack(). // Defender disagrees at this point, we should attack + Defend(faulttest.WithValue(common.Hash{0xbb})) // Freeloader defends with wrong claim but we must not respond to avoid poisoning + }, + }, + { + name: "Freeloader-ValidClaimAtInvalidAttackPosition-RespondingToDishonestButCorrectAttack", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + Attack(). // Honest response to invalid root + Attack().ExpectDefend(). // Defender attacks with correct value, we should defend + Attack().ExpectDefend() // Freeloader attacks with wrong claim, we should defend + }, + }, + { + name: "Freeloader-DoNotCounterOwnClaim", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + Attack(). // Honest response to invalid root + Attack().ExpectDefend(). // Defender attacks with correct value, we should defend + Attack(). // Freeloader attacks instead, we should defend + Defend() // We do defend and we shouldn't counter our own claim + }, + }, + { + name: "Freeloader-ContinueDefendingAgainstFreeloader", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). // invalid root + Attack(). // Honest response to invalid root + Attack().ExpectDefend(). // Defender attacks with correct value, we should defend + Attack(). // Freeloader attacks instead, we should defend + Defend(). // We do defend + Attack(faulttest.WithValue(common.Hash{0xaa})). // freeloader attacks our defense, we should attack + ExpectAttack() + }, + }, + { + name: "Freeloader-FreeloaderCountersRootClaim", + setupGame: func(builder *faulttest.GameBuilder) { + builder.Seq(). + ExpectAttack(). // Honest response to invalid root + Attack(faulttest.WithValue(common.Hash{0xaa})). // freeloader + ExpectAttack() // Honest response to freeloader + }, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + builder := claimBuilder.GameBuilder(faulttest.WithInvalidValue(!test.rootClaimCorrect)) + test.setupGame(builder) + game := builder.Game + + solver := NewGameSolver(maxDepth, trace.NewSimpleTraceAccessor(claimBuilder.CorrectTraceProvider())) + postState, actions := runStep(t, solver, game, claimBuilder.CorrectTraceProvider()) + for i, action := range builder.ExpectedActions { + t.Logf("Expect %v: Type: %v, ParentIdx: %v, Attack: %v, Value: %v, PreState: %v, ProofData: %v", + i, action.Type, action.ParentClaim.ContractIndex, action.IsAttack, action.Value, hex.EncodeToString(action.PreState), hex.EncodeToString(action.ProofData)) + require.Containsf(t, actions, action, "Expected claim %v missing", i) + } + require.Len(t, actions, len(builder.ExpectedActions), "Incorrect number of actions") + + verifyGameRules(t, postState, test.rootClaimCorrect) + }) + } +} + +func runStep(t *testing.T, solver *GameSolver, game types.Game, correctTraceProvider types.TraceProvider) (types.Game, []types.Action) { + actions, err := solver.CalculateNextActions(context.Background(), game) + require.NoError(t, err) + + postState := applyActions(game, challengerAddr, actions) + + for i, action := range actions { + t.Logf("Move %v: Type: %v, ParentIdx: %v, Attack: %v, Value: %v, PreState: %v, ProofData: %v", + i, action.Type, action.ParentClaim.ContractIndex, action.IsAttack, action.Value, hex.EncodeToString(action.PreState), hex.EncodeToString(action.ProofData)) + // Check that every move the solver returns meets the generic validation rules + require.NoError(t, checkRules(game, action, correctTraceProvider), "Attempting to perform invalid action") + } + return postState, actions +} + +func TestMultipleRounds(t *testing.T) { + t.Parallel() + tests := []struct { + name string + actor actor + }{ + { + name: "SingleRoot", + actor: doNothingActor, + }, + { + name: "LinearAttackCorrect", + actor: correctAttackLastClaim, + }, + { + name: "LinearDefendCorrect", + actor: correctDefendLastClaim, + }, + { + name: "LinearAttackIncorrect", + actor: incorrectAttackLastClaim, + }, + { + name: "LinearDefendInorrect", + actor: incorrectDefendLastClaim, + }, + { + name: "LinearDefendIncorrectDefendCorrect", + actor: combineActors(incorrectDefendLastClaim, correctDefendLastClaim), + }, + { + name: "LinearAttackIncorrectDefendCorrect", + actor: combineActors(incorrectAttackLastClaim, correctDefendLastClaim), + }, + { + name: "LinearDefendIncorrectDefendIncorrect", + actor: combineActors(incorrectDefendLastClaim, incorrectDefendLastClaim), + }, + { + name: "LinearAttackIncorrectDefendIncorrect", + actor: combineActors(incorrectAttackLastClaim, incorrectDefendLastClaim), + }, + { + name: "AttackEverythingCorrect", + actor: attackEverythingCorrect, + }, + { + name: "DefendEverythingCorrect", + actor: defendEverythingCorrect, + }, + { + name: "AttackEverythingIncorrect", + actor: attackEverythingIncorrect, + }, + { + name: "DefendEverythingIncorrect", + actor: defendEverythingIncorrect, + }, + { + name: "Exhaustive", + actor: exhaustive, + }, + } + for _, test := range tests { + test := test + for _, rootClaimCorrect := range []bool{true, false} { + rootClaimCorrect := rootClaimCorrect + t.Run(fmt.Sprintf("%v-%v", test.name, rootClaimCorrect), func(t *testing.T) { + t.Parallel() + + maxDepth := types.Depth(6) + startingL2BlockNumber := big.NewInt(50) + claimBuilder := faulttest.NewAlphabetClaimBuilder(t, startingL2BlockNumber, maxDepth) + builder := claimBuilder.GameBuilder(faulttest.WithInvalidValue(!rootClaimCorrect)) + game := builder.Game + + correctTrace := claimBuilder.CorrectTraceProvider() + solver := NewGameSolver(maxDepth, trace.NewSimpleTraceAccessor(correctTrace)) + + roundNum := 0 + done := false + for !done { + t.Logf("------ ROUND %v ------", roundNum) + game, _ = runStep(t, solver, game, correctTrace) + verifyGameRules(t, game, rootClaimCorrect) + + game, done = test.actor.Apply(t, game, correctTrace) + roundNum++ + } + }) + } + } +} + +func applyActions(game types.Game, claimant common.Address, actions []types.Action) types.Game { + claims := game.Claims() + for _, action := range actions { + switch action.Type { + case types.ActionTypeMove: + newPosition := action.ParentClaim.Position.Attack() + if !action.IsAttack { + newPosition = action.ParentClaim.Position.Defend() + } + claim := types.Claim{ + ClaimData: types.ClaimData{ + Value: action.Value, + Bond: big.NewInt(0), + Position: newPosition, + }, + Claimant: claimant, + ContractIndex: len(claims), + ParentContractIndex: action.ParentClaim.ContractIndex, + } + claims = append(claims, claim) + case types.ActionTypeStep: + counteredClaim := claims[action.ParentClaim.ContractIndex] + counteredClaim.CounteredBy = claimant + claims[action.ParentClaim.ContractIndex] = counteredClaim + default: + panic(fmt.Errorf("unknown move type: %v", action.Type)) + } + } + return types.NewGameState(claims, game.MaxDepth()) +} diff --git a/op-challenger2/game/fault/solver/honest_claims.go b/op-challenger2/game/fault/solver/honest_claims.go new file mode 100644 index 000000000000..2378c5f526d1 --- /dev/null +++ b/op-challenger2/game/fault/solver/honest_claims.go @@ -0,0 +1,36 @@ +package solver + +import "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + +type honestClaimTracker struct { + // agreed tracks the existing claims in the game that the honest actor would make + // The claims may not yet have been made so are tracked by ClaimID not ContractIndex + agreed map[types.ClaimID]bool + + // counters tracks the counter claim for a claim by contract index. + // The counter claim may not yet be part of the game state (ie it may be a move the honest actor is planning to make) + counters map[types.ClaimID]types.Claim +} + +func newHonestClaimTracker() *honestClaimTracker { + return &honestClaimTracker{ + agreed: make(map[types.ClaimID]bool), + counters: make(map[types.ClaimID]types.Claim), + } +} + +func (a *honestClaimTracker) AddHonestClaim(parent types.Claim, claim types.Claim) { + a.agreed[claim.ID()] = true + if parent != (types.Claim{}) { + a.counters[parent.ID()] = claim + } +} + +func (a *honestClaimTracker) IsHonest(claim types.Claim) bool { + return a.agreed[claim.ID()] +} + +func (a *honestClaimTracker) HonestCounter(parent types.Claim) (types.Claim, bool) { + counter, ok := a.counters[parent.ID()] + return counter, ok +} diff --git a/op-challenger2/game/fault/solver/honest_claims_test.go b/op-challenger2/game/fault/solver/honest_claims_test.go new file mode 100644 index 000000000000..878f64cc2f90 --- /dev/null +++ b/op-challenger2/game/fault/solver/honest_claims_test.go @@ -0,0 +1,38 @@ +package solver + +import ( + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/stretchr/testify/require" +) + +func TestHonestClaimTracker_RootClaim(t *testing.T) { + tracker := newHonestClaimTracker() + builder := test.NewAlphabetClaimBuilder(t, big.NewInt(3), 4) + + claim := builder.Seq().Get() + require.False(t, tracker.IsHonest(claim)) + + tracker.AddHonestClaim(types.Claim{}, claim) + require.True(t, tracker.IsHonest(claim)) +} + +func TestHonestClaimTracker_ChildClaim(t *testing.T) { + tracker := newHonestClaimTracker() + builder := test.NewAlphabetClaimBuilder(t, big.NewInt(3), 4) + + seq := builder.Seq().Attack().Defend() + parent := seq.Get() + child := seq.Attack().Get() + require.Zero(t, child.ContractIndex, "should work for claims that are not in the game state yet") + + tracker.AddHonestClaim(parent, child) + require.False(t, tracker.IsHonest(parent)) + require.True(t, tracker.IsHonest(child)) + counter, ok := tracker.HonestCounter(parent) + require.True(t, ok) + require.Equal(t, child, counter) +} diff --git a/op-challenger2/game/fault/solver/rules.go b/op-challenger2/game/fault/solver/rules.go new file mode 100644 index 000000000000..88d790296a78 --- /dev/null +++ b/op-challenger2/game/fault/solver/rules.go @@ -0,0 +1,279 @@ +package solver + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + "slices" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var challengerAddr = common.Address(bytes.Repeat([]byte{0xaa}, 20)) + +type actionRule func(game types.Game, action types.Action, correctTrace types.TraceProvider) error + +var rules = []actionRule{ + parentMustExist, + onlyStepAtMaxDepth, + onlyMoveBeforeMaxDepth, + doNotDuplicateExistingMoves, + doNotStepAlreadyCounteredClaims, + doNotDefendRootClaim, + avoidPoisonedPrestate, + detectPoisonedStepPrestate, + detectFailedStep, + doNotCounterSelf, +} + +func printClaim(claim types.Claim, game types.Game) string { + return fmt.Sprintf("Claim %v: Pos: %v TraceIdx: %v Depth: %v IndexAtDepth: %v ParentIdx: %v Value: %v Claimant: %v CounteredBy: %v", + claim.ContractIndex, claim.Position.ToGIndex(), claim.Position.TraceIndex(game.MaxDepth()), claim.Position.Depth(), claim.Position.IndexAtDepth(), claim.ParentContractIndex, claim.Value, claim.Claimant, claim.CounteredBy) +} + +func checkRules(game types.Game, action types.Action, correctTrace types.TraceProvider) error { + var errs []error + for _, rule := range rules { + errs = append(errs, rule(game, action, correctTrace)) + } + return errors.Join(errs...) +} + +// parentMustExist checks that every action performed has a valid parent claim +// Rationale: The action would be rejected by the contracts +func parentMustExist(game types.Game, action types.Action, _ types.TraceProvider) error { + if len(game.Claims()) <= action.ParentClaim.ContractIndex || action.ParentClaim.ContractIndex < 0 { + return fmt.Errorf("parent claim %v does not exist in game with %v claims", action.ParentClaim.ContractIndex, len(game.Claims())) + } + return nil +} + +// onlyStepAtMaxDepth verifies that step actions are only performed against leaf claims +// Rationale: The action would be rejected by the contracts +func onlyStepAtMaxDepth(game types.Game, action types.Action, _ types.TraceProvider) error { + if action.Type == types.ActionTypeStep { + return nil + } + parentDepth := game.Claims()[action.ParentClaim.ContractIndex].Position.Depth() + if parentDepth >= game.MaxDepth() { + return fmt.Errorf("parent at max depth (%v) but attempting to perform %v action instead of step", + parentDepth, action.Type) + } + return nil +} + +// onlyMoveBeforeMaxDepth verifies that move actions are not performed against leaf claims +// Rationale: The action would be rejected by the contracts +func onlyMoveBeforeMaxDepth(game types.Game, action types.Action, _ types.TraceProvider) error { + if action.Type == types.ActionTypeMove { + return nil + } + parentDepth := game.Claims()[action.ParentClaim.ContractIndex].Position.Depth() + if parentDepth < game.MaxDepth() { + return fmt.Errorf("parent (%v) not at max depth (%v) but attempting to perform %v action instead of move", + parentDepth, game.MaxDepth(), action.Type) + } + return nil +} + +// doNotDuplicateExistingMoves verifies that the challenger doesn't attempt to post a duplicate claim +// Rationale: The action would be rejected by the contracts +func doNotDuplicateExistingMoves(game types.Game, action types.Action, _ types.TraceProvider) error { + newClaimData := types.ClaimData{ + Value: action.Value, + Position: resultingPosition(game, action), + } + if game.IsDuplicate(types.Claim{ClaimData: newClaimData, ParentContractIndex: action.ParentClaim.ContractIndex}) { + return fmt.Errorf("creating duplicate claim at %v with value %v", newClaimData.Position.ToGIndex(), newClaimData.Value) + } + return nil +} + +// doNotStepAlreadyCounteredClaims checks the challenger does not attempt to call step on already countered claims +// Rationale: The step call is redundant and a waste of gas +func doNotStepAlreadyCounteredClaims(game types.Game, action types.Action, _ types.TraceProvider) error { + claim := game.Claims()[action.ParentClaim.ContractIndex] + if claim.CounteredBy != (common.Address{}) { + return fmt.Errorf("attempting to step already countered claim: %v", claim.ContractIndex) + } + return nil +} + +// doNotDefendRootClaim checks the challenger doesn't attempt to defend the root claim +// Rationale: The action would be rejected by the contracts +func doNotDefendRootClaim(game types.Game, action types.Action, _ types.TraceProvider) error { + if game.Claims()[action.ParentClaim.ContractIndex].IsRootPosition() && !action.IsAttack { + return fmt.Errorf("defending the root claim at idx %v", action.ParentClaim.ContractIndex) + } + return nil +} + +// doNotCounterSelf checks the challenger doesn't counter its own claims +// Rationale: The challenger should not disagree with itself +func doNotCounterSelf(game types.Game, action types.Action, _ types.TraceProvider) error { + claim := game.Claims()[action.ParentClaim.ContractIndex] + if claim.Claimant == challengerAddr { + return fmt.Errorf("countering own claim at idx %v", action.ParentClaim.ContractIndex) + } + return nil +} + +// avoidPoisonedPrestate checks the challenger does not perform a move that results in a claim where the ancestor +// with the largest trace index less than the new claim's trace index is invalid. +// Rationale: If such a claim were posted, an attacker could attack with invalid values down to max depth and setup a +// step call which uses the invalid claim as the pre-state. The challenger could not call step because it does not have +// the preimage of the invalid state. If the attacker should call step, they could provide a carefully crafted state +// that allows it to successfully step against the challenger's claim. +func avoidPoisonedPrestate(game types.Game, action types.Action, correctTrace types.TraceProvider) error { + if action.Type == types.ActionTypeStep { + return nil + } + ancestors := "" + movePosition := resultingPosition(game, action) + honestTraceIndex := movePosition.TraceIndex(game.MaxDepth()) + // Walk back up the claims and find the claim with highest trace index < honestTraceIndex + claim := game.Claims()[action.ParentClaim.ContractIndex] + var preStateClaim types.Claim + for { + ancestors += printClaim(claim, game) + "\n" + claimTraceIdx := claim.TraceIndex(game.MaxDepth()) + if claimTraceIdx.Cmp(honestTraceIndex) < 0 { // Check it's left of the honest claim + if preStateClaim == (types.Claim{}) || claimTraceIdx.Cmp(preStateClaim.TraceIndex(game.MaxDepth())) > 0 { + preStateClaim = claim + } + } + if claim.IsRoot() { + break + } + parent, err := game.GetParent(claim) + if err != nil { + return fmt.Errorf("no parent of claim %v: %w", claim.ContractIndex, err) + } + claim = parent + } + if preStateClaim == (types.Claim{}) { + // No claim to the left of the honest claim, so can't have been poisoned + return nil + } + correctValue, err := correctTrace.Get(context.Background(), preStateClaim.Position) + if err != nil { + return fmt.Errorf("failed to get correct trace at position %v: %w", preStateClaim.Position, err) + } + if correctValue != preStateClaim.Value { + err = fmt.Errorf("prestate poisoned claim %v has invalid prestate and is left of honest claim countering %v at trace index %v", preStateClaim.ContractIndex, action.ParentClaim.ContractIndex, honestTraceIndex) + return err + } + return nil +} + +// detectFailedStep checks that step actions will succeed. +// Rationale: The action would be rejected by the contracts +// +// INVARIANT: If a step is an attack, the poststate is valid if the step produces +// +// the same poststate hash as the parent claim's value. +// If a step is a defense: +// 1. If the parent claim and the found post state agree with each other +// (depth diff % 2 == 0), the step is valid if it produces the same +// state hash as the post state's claim. +// 2. If the parent claim and the found post state disagree with each other +// (depth diff % 2 != 0), the parent cannot be countered unless the step +// produces the same state hash as `postState.claim`. +func detectFailedStep(game types.Game, action types.Action, correctTrace types.TraceProvider) error { + if action.Type != types.ActionTypeStep { + // An invalid post state is not an issue if we are moving, only if the honest challenger has to call step. + return nil + } + position := resultingPosition(game, action) + if position.Depth() != game.MaxDepth() { + // Not at max depth yet + return nil + } + honestTraceIndex := position.TraceIndex(game.MaxDepth()) + poststateIndex := honestTraceIndex + if !action.IsAttack { + poststateIndex = new(big.Int).Add(honestTraceIndex, big.NewInt(1)) + } + // Walk back up the claims and find the claim required post state index + claim := game.Claims()[action.ParentClaim.ContractIndex] + poststateClaim, ok := game.AncestorWithTraceIndex(claim, poststateIndex) + if !ok { + return fmt.Errorf("did not find required poststate at %v to counter claim %v", poststateIndex, action.ParentClaim.ContractIndex) + } + correctValue, err := correctTrace.Get(context.Background(), poststateClaim.Position) + if err != nil { + return fmt.Errorf("failed to get correct trace at position %v: %w", poststateClaim.Position, err) + } + validStep := correctValue == poststateClaim.Value + parentPostAgree := (claim.Depth()-poststateClaim.Depth())%2 == 0 + if parentPostAgree == validStep { + return fmt.Errorf("failed step against claim at %v using poststate from claim %v post state is correct? %v parentPostAgree? %v", + action.ParentClaim.ContractIndex, poststateClaim.ContractIndex, validStep, parentPostAgree) + } + return nil +} + +// detectPoisonedStepPrestate checks that: +// 1. step actions performed by the challenger always have a valid prestate +// 2. move actions that create a claim a max depth would have a valid prestate if they are attacked +// 3. the actual prestate provided matches the prestate claim's commitment +// Rationale: A step against an invalid prestate will fail because the preimage of the prestate claim is unknown +// and claims at max depth with an invalid prestate could be stepped against because the prestate is invalid so a VM +// step will not result in the correct post-state. +func detectPoisonedStepPrestate(game types.Game, action types.Action, correctTrace types.TraceProvider) error { + position := resultingPosition(game, action) + if position.Depth() != game.MaxDepth() { + // Not at max depth yet + return nil + } + honestTraceIndex := position.TraceIndex(game.MaxDepth()) + prestateIndex := honestTraceIndex + // If we're performing a move to post a leaf claim, assume the attacker will try to attack it from their + // poisoned prestate + if action.IsAttack || action.Type == types.ActionTypeMove { + prestateIndex = new(big.Int).Sub(prestateIndex, big.NewInt(1)) + } + if prestateIndex.Cmp(big.NewInt(0)) < 0 { + // Absolute prestate is not poisoned + return nil + } + // Walk back up the claims and find the claim with highest trace index < honestTraceIndex + claim := game.Claims()[action.ParentClaim.ContractIndex] + preStateClaim, ok := game.AncestorWithTraceIndex(claim, prestateIndex) + if !ok { + return fmt.Errorf("performing step against claim %v with no prestate available at %v", claim.ContractIndex, prestateIndex) + } + correctValue, err := correctTrace.Get(context.Background(), preStateClaim.Position) + if err != nil { + return fmt.Errorf("failed to get correct trace at position %v: %w", preStateClaim.Position, err) + } + if correctValue != preStateClaim.Value { + if action.Type == types.ActionTypeStep { + return fmt.Errorf("stepping from poisoned prestate at claim %v when countering %v", preStateClaim.ContractIndex, action.ParentClaim.ContractIndex) + } else { + return fmt.Errorf("posting leaf claim with poisoned prestate from claim %v when countering %v", preStateClaim.ContractIndex, action.ParentClaim.ContractIndex) + } + } + if action.Type == types.ActionTypeStep { + prestateHash := crypto.Keccak256Hash(action.PreState) + if !slices.Equal(prestateHash[1:], preStateClaim.Value[1:]) { + return fmt.Errorf("prestate hash %v does not match expected prestate claim %v from claim %v", prestateHash, preStateClaim.Value, preStateClaim.ContractIndex) + } + } + return nil +} + +func resultingPosition(game types.Game, action types.Action) types.Position { + parentPos := game.Claims()[action.ParentClaim.ContractIndex].Position + if action.Type == types.ActionTypeStep { + return parentPos + } + if action.IsAttack { + return parentPos.Attack() + } + return parentPos.Defend() +} diff --git a/op-challenger2/game/fault/solver/solver.go b/op-challenger2/game/fault/solver/solver.go new file mode 100644 index 000000000000..ea6d6fdce4cf --- /dev/null +++ b/op-challenger2/game/fault/solver/solver.go @@ -0,0 +1,168 @@ +package solver + +import ( + "bytes" + "context" + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" +) + +var ( + ErrStepNonLeafNode = errors.New("cannot step on non-leaf claims") +) + +// claimSolver uses a [TraceProvider] to determine the moves to make in a dispute game. +type claimSolver struct { + trace types.TraceAccessor + gameDepth types.Depth +} + +// newClaimSolver creates a new [claimSolver] using the provided [TraceProvider]. +func newClaimSolver(gameDepth types.Depth, trace types.TraceAccessor) *claimSolver { + return &claimSolver{ + trace, + gameDepth, + } +} + +func (s *claimSolver) shouldCounter(game types.Game, claim types.Claim, honestClaims *honestClaimTracker) (bool, error) { + // Do not counter honest claims + if honestClaims.IsHonest(claim) { + return false, nil + } + + if claim.IsRoot() { + // Always counter the root claim if it is not honest + return true, nil + } + + parent, err := game.GetParent(claim) + if err != nil { + return false, fmt.Errorf("no parent for claim %v: %w", claim.ContractIndex, err) + } + + // Counter all claims that are countering an honest claim + if honestClaims.IsHonest(parent) { + return true, nil + } + + counter, hasCounter := honestClaims.HonestCounter(parent) + // Do not respond to any claim countering a claim the honest actor ignored + if !hasCounter { + return false, nil + } + + // Do not counter sibling to an honest claim that are right of the honest claim. + honestIdx := counter.TraceIndex(game.MaxDepth()) + claimIdx := claim.TraceIndex(game.MaxDepth()) + return claimIdx.Cmp(honestIdx) <= 0, nil +} + +// NextMove returns the next move to make given the current state of the game. +func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, game types.Game, honestClaims *honestClaimTracker) (*types.Claim, error) { + if claim.Depth() == s.gameDepth { + return nil, types.ErrGameDepthReached + } + + if counter, err := s.shouldCounter(game, claim, honestClaims); err != nil { + return nil, fmt.Errorf("failed to determine if claim should be countered: %w", err) + } else if !counter { + return nil, nil + } + + if agree, err := s.agreeWithClaim(ctx, game, claim); err != nil { + return nil, err + } else if agree { + return s.defend(ctx, game, claim) + } else { + return s.attack(ctx, game, claim) + } +} + +type StepData struct { + LeafClaim types.Claim + IsAttack bool + PreState []byte + ProofData []byte + OracleData *types.PreimageOracleData +} + +// AttemptStep determines what step, if any, should occur for a given leaf claim. +// An error will be returned if the claim is not at the max depth. +// Returns nil, nil if no step should be performed. +func (s *claimSolver) AttemptStep(ctx context.Context, game types.Game, claim types.Claim, honestClaims *honestClaimTracker) (*StepData, error) { + if claim.Depth() != s.gameDepth { + return nil, ErrStepNonLeafNode + } + + if counter, err := s.shouldCounter(game, claim, honestClaims); err != nil { + return nil, fmt.Errorf("failed to determine if claim should be countered: %w", err) + } else if !counter { + return nil, nil + } + + claimCorrect, err := s.agreeWithClaim(ctx, game, claim) + if err != nil { + return nil, err + } + + var position types.Position + if !claimCorrect { + // Attack the claim by executing step index, so we need to get the pre-state of that index + position = claim.Position + } else { + // Defend and use this claim as the starting point to execute the step after. + // Thus, we need the pre-state of the next step. + position = claim.Position.MoveRight() + } + + preState, proofData, oracleData, err := s.trace.GetStepData(ctx, game, claim, position) + if err != nil { + return nil, err + } + + return &StepData{ + LeafClaim: claim, + IsAttack: !claimCorrect, + PreState: preState, + ProofData: proofData, + OracleData: oracleData, + }, nil +} + +// attack returns a response that attacks the claim. +func (s *claimSolver) attack(ctx context.Context, game types.Game, claim types.Claim) (*types.Claim, error) { + position := claim.Attack() + value, err := s.trace.Get(ctx, game, claim, position) + if err != nil { + return nil, fmt.Errorf("attack claim: %w", err) + } + return &types.Claim{ + ClaimData: types.ClaimData{Value: value, Position: position}, + ParentContractIndex: claim.ContractIndex, + }, nil +} + +// defend returns a response that defends the claim. +func (s *claimSolver) defend(ctx context.Context, game types.Game, claim types.Claim) (*types.Claim, error) { + if claim.IsRoot() { + return nil, nil + } + position := claim.Defend() + value, err := s.trace.Get(ctx, game, claim, position) + if err != nil { + return nil, fmt.Errorf("defend claim: %w", err) + } + return &types.Claim{ + ClaimData: types.ClaimData{Value: value, Position: position}, + ParentContractIndex: claim.ContractIndex, + }, nil +} + +// agreeWithClaim returns true if the claim is correct according to the internal [TraceProvider]. +func (s *claimSolver) agreeWithClaim(ctx context.Context, game types.Game, claim types.Claim) (bool, error) { + ourValue, err := s.trace.Get(ctx, game, claim, claim.Position) + return bytes.Equal(ourValue[:], claim.Value[:]), err +} diff --git a/op-challenger2/game/fault/solver/solver_test.go b/op-challenger2/game/fault/solver/solver_test.go new file mode 100644 index 000000000000..565aed3ecc08 --- /dev/null +++ b/op-challenger2/game/fault/solver/solver_test.go @@ -0,0 +1,197 @@ +package solver + +import ( + "context" + "math/big" + "testing" + + faulttest "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestAttemptStep(t *testing.T) { + maxDepth := types.Depth(3) + startingL2BlockNumber := big.NewInt(0) + claimBuilder := faulttest.NewAlphabetClaimBuilder(t, startingL2BlockNumber, maxDepth) + + // Last accessible leaf is the second last trace index + // The root node is used for the last trace index and can only be attacked. + lastLeafTraceIndex := big.NewInt(1< 0 { + return nil, nil, nil, fmt.Errorf("%w depth: %v index: %v max: %v", ErrIndexTooLarge, ap.depth, traceIndex, ap.maxLen) + } + initialTraceIndex := new(big.Int).Lsh(ap.startingBlockNumber, 4) + initialClaim := new(big.Int).Add(absolutePrestateInt, initialTraceIndex) + newTraceIndex := new(big.Int).Add(initialTraceIndex, traceIndex) + newClaim := new(big.Int).Add(initialClaim, traceIndex) + return BuildAlphabetPreimage(newTraceIndex, newClaim), []byte{}, preimageData, nil +} + +// Get returns the claim value at the given index in the trace. +func (ap *AlphabetTraceProvider) Get(ctx context.Context, i types.Position) (common.Hash, error) { + if i.Depth() > ap.depth { + return common.Hash{}, fmt.Errorf("%w depth: %v max: %v", ErrIndexTooLarge, i.Depth(), ap.depth) + } + // Step data returns the pre-state, so add 1 to get the state for index i + ti := i.TraceIndex(ap.depth) + postPosition := types.NewPosition(ap.depth, new(big.Int).Add(ti, big.NewInt(1))) + claimBytes, _, _, err := ap.GetStepData(ctx, postPosition) + if err != nil { + return common.Hash{}, err + } + return alphabetStateHash(claimBytes), nil +} + +func (ap *AlphabetTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { + return nil, types.ErrL2BlockNumberValid +} + +// BuildAlphabetPreimage constructs the claim bytes for the index and claim. +func BuildAlphabetPreimage(traceIndex *big.Int, claim *big.Int) []byte { + return append(traceIndex.FillBytes(make([]byte, 32)), claim.FillBytes(make([]byte, 32))...) +} + +func alphabetStateHash(state []byte) common.Hash { + h := crypto.Keccak256Hash(state) + h[0] = mipsevm.VMStatusInvalid + return h +} diff --git a/op-challenger2/game/fault/trace/alphabet/provider_test.go b/op-challenger2/game/fault/trace/alphabet/provider_test.go new file mode 100644 index 000000000000..6c785291ba1e --- /dev/null +++ b/op-challenger2/game/fault/trace/alphabet/provider_test.go @@ -0,0 +1,225 @@ +package alphabet + +import ( + "context" + "fmt" + "math/big" + "testing" + + preimage "github.com/ethereum-optimism/optimism/op-preimage" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func alphabetClaim(index *big.Int, claim *big.Int) common.Hash { + return alphabetStateHash(BuildAlphabetPreimage(index, claim)) +} + +func TestAlphabetProvider_Prestate(t *testing.T) { + depth := types.Depth(4) + startingL2BlockNumber := big.NewInt(2) + + // Actual preimage values generated by the solidity AlphabetVM at each step. + expectedPrestates := []string{ + "0000000000000000000000000000000000000000000000000000000000000060", + "00000000000000000000000000000000000000000000000000000000000000210000000000000000000000000000000000000000000000000000000000000081", + "00000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000000000000082", + "00000000000000000000000000000000000000000000000000000000000000230000000000000000000000000000000000000000000000000000000000000083", + "00000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000084", + "00000000000000000000000000000000000000000000000000000000000000250000000000000000000000000000000000000000000000000000000000000085", + } + + ap := NewTraceProvider(startingL2BlockNumber, depth) + + for i, expected := range expectedPrestates { + i, expected := i, expected + t.Run(fmt.Sprintf("Step_%v", i), func(t *testing.T) { + result, _, _, err := ap.GetStepData(context.Background(), types.NewPosition(4, big.NewInt(int64(i)))) + require.NoError(t, err) + require.Equalf(t, expected, common.Bytes2Hex(result), "Incorrect prestate at trace index %v", i) + }) + } +} + +func TestAlphabetProvider_GetStepData_MaxLen(t *testing.T) { + depth := types.Depth(4) + startingL2BlockNumber := big.NewInt(2) + ap := NewTraceProvider(startingL2BlockNumber, depth) + + // Step data for the max position is allowed + maxLen := int64(1 << depth) + maxPos := types.NewPosition(4, big.NewInt(maxLen)) + result, _, _, err := ap.GetStepData(context.Background(), maxPos) + require.NoError(t, err) + expected := "00000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000090" + require.Equal(t, expected, common.Bytes2Hex(result)) + + // Cannot step on a position greater than the max. + oobPos := types.NewPosition(4, big.NewInt(int64(1< p.lastStep { + i = p.lastStep + } + path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json.gz", i)) + file, err := ioutil.OpenDecompressed(path) + if errors.Is(err, os.ErrNotExist) { + if err := p.generator.GenerateProof(ctx, p.dir, i); err != nil { + return nil, fmt.Errorf("generate asterisc trace with proof at %v: %w", i, err) + } + // Try opening the file again now and it should exist. + file, err = ioutil.OpenDecompressed(path) + if errors.Is(err, os.ErrNotExist) { + // Expected proof wasn't generated, check if we reached the end of execution + state, err := p.finalState() + if err != nil { + return nil, err + } + if state.Exited && state.Step <= i { + p.logger.Warn("Requested proof was after the program exited", "proof", i, "last", state.Step) + // The final instruction has already been applied to this state, so the last step we can execute + // is one before its Step value. + p.lastStep = state.Step - 1 + // Extend the trace out to the full length using a no-op instruction that doesn't change any state + // No execution is done, so no proof-data or oracle values are required. + proof := &utils.ProofData{ + ClaimValue: state.StateHash, + StateData: state.Witness, + ProofData: []byte{}, + OracleKey: nil, + OracleValue: nil, + OracleOffset: 0, + } + if err := utils.WriteLastStep(p.dir, proof, p.lastStep); err != nil { + p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep) + } + return proof, nil + } else { + return nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, state.Step) + } + } + } + if err != nil { + return nil, fmt.Errorf("cannot open proof file (%v): %w", path, err) + } + defer file.Close() + var proof utils.ProofData + err = json.NewDecoder(file).Decode(&proof) + if err != nil { + return nil, fmt.Errorf("failed to read proof (%v): %w", path, err) + } + return &proof, nil +} + +func (c *AsteriscTraceProvider) finalState() (*VMState, error) { + state, err := parseState(filepath.Join(c.dir, utils.FinalState)) + if err != nil { + return nil, fmt.Errorf("cannot read final state: %w", err) + } + return state, nil +} + +// AsteriscTraceProviderForTest is a AsteriscTraceProvider that can find the step referencing the preimage read +// Only to be used for testing +type AsteriscTraceProviderForTest struct { + *AsteriscTraceProvider +} + +func NewTraceProviderForTest(logger log.Logger, m AsteriscMetricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProviderForTest { + p := &AsteriscTraceProvider{ + logger: logger, + dir: dir, + prestate: cfg.AsteriscAbsolutePreState, + generator: NewExecutor(logger, m, cfg, cfg.AsteriscNetwork, localInputs), + gameDepth: gameDepth, + preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(utils.PreimageDir(dir)).Get), + } + return &AsteriscTraceProviderForTest{p} +} + +func (p *AsteriscTraceProviderForTest) FindStep(ctx context.Context, start uint64, preimage utils.PreimageOpt) (uint64, error) { + // Run asterisc to find the step that meets the preimage conditions + if err := p.generator.(*Executor).generateProof(ctx, p.dir, start, math.MaxUint64, preimage()...); err != nil { + return 0, fmt.Errorf("generate asterisc trace (until preimage read): %w", err) + } + // Load the step from the state asterisc finished with + state, err := p.finalState() + if err != nil { + return 0, fmt.Errorf("failed to load final state: %w", err) + } + // Check we didn't get to the end of the trace without finding the preimage read we were looking for + if state.Exited { + return 0, fmt.Errorf("preimage read not found: %w", io.EOF) + } + // The state is the post-state so the step we want to execute to read the preimage is step - 1. + return state.Step - 1, nil +} diff --git a/op-challenger2/game/fault/trace/asterisc/provider_test.go b/op-challenger2/game/fault/trace/asterisc/provider_test.go new file mode 100644 index 000000000000..8b8d4c9eab00 --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/provider_test.go @@ -0,0 +1,260 @@ +package asterisc + +import ( + "context" + "embed" + "encoding/json" + "fmt" + "math" + "math/big" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +//go:embed test_data +var testData embed.FS + +func PositionFromTraceIndex(provider *AsteriscTraceProvider, idx *big.Int) types.Position { + return types.NewPosition(provider.gameDepth, idx) +} + +func TestGet(t *testing.T) { + dataDir, prestate := setupTestData(t) + t.Run("ExistingProof", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, common.Big0)) + require.NoError(t, err) + require.Equal(t, common.HexToHash("0x034689707b571db46b32c9e433def18e648f4e1fa9e5abd4012e7913031bfc10"), value) + require.Empty(t, generator.generated) + }) + + t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + largePosition := PositionFromTraceIndex(provider, new(big.Int).Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2))) + _, err := provider.Get(context.Background(), largePosition) + require.ErrorContains(t, err, "trace index out of bounds") + require.Empty(t, generator.generated) + }) + + t.Run("MissingPostHash", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + _, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1))) + require.ErrorContains(t, err, "missing post hash") + require.Empty(t, generator.generated) + }) + + t.Run("IgnoreUnknownFields", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2))) + require.NoError(t, err) + expected := common.HexToHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + require.Equal(t, expected, value) + require.Empty(t, generator.generated) + }) +} + +func TestGetStepData(t *testing.T) { + t.Run("ExistingProof", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, common.Big0)) + require.NoError(t, err) + expected := common.FromHex("0x354cfaf28a5b60c3f64f22f9f171b64aa067f90c6de6c96f725f44c5cf9f8ac1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080e080000000000000000000000007f0000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") + require.Equal(t, expected, value) + expectedProof := common.FromHex("0x000000000000000003350100930581006f00800100000000970f000067800f01000000000000000097c2ffff938282676780020000000000032581009308e0050e1893682c323d6695396f1122b3cb562af8c65cab19978c9246434fda0536c90ca1cfabf684ebce3ad9fbd54000a2b258f8d0e447c1bb6f7e97de47aadfc12cd7b6f466bfd024daa905886c5f638f4692d843709e6c1c0d9eb2e251c626d53d15e04b59735fe0781bc4357a4243fbc28e6981902a8c2669a2d6456f7a964423db5d1585da978861f8b84067654b29490275c82b54083ee09c82eb7aa9ae693911226bb8297ad82c0963ae943f22d0c6086f4f14437e4d1c87ceb17e68caf5eaec77f14b46225b417d2191ca7b49564c896836a95ad4e9c383bd1c8ff9d8e888c64fb3836daa9535e58372e9646b7b144219980a4389aca5da241c3ec11fbc9297bd7a94ac671ccec288604c23a0072b0c1ed069198959cacdc2574aff65b7eceffc391e21778a1775deceb3ec0990836df98d98a4f3f0dc854587230fbf59e4daa60e8240d74caf90f7e2cd014c1d5d707b2e44269d9a9caf133882fe1ebb2f4237f6282abe89639b357e9231418d0c41373229ae9edfa6815bec484cb79772c9e2a7d80912123558f79b539bb45d435f2a4446970f1e2123494740285cec3491b0a41a9fd7403bdc8cd239a87508039a77b48ee39a951a8bd196b583de2b93444aafd456d0cd92050fa6a816d5183c1d75e96df540c8ac3bb8638b971f0cf3fb5b4a321487a1c8992b921de110f3d5bbb87369b25fe743ad7e789ca52d9f9fe62ccb103b78fe65eaa2cd47895022c590639c8f0c6a3999d8a5c71ed94d355815851b479f8d93eae90822294c96b39724b33491f8497b0bf7e1b995b37e4d759ff8a7958d194da6e00c475a6ddcf6efcb5fb4bb383c9b273da18d01e000dbe9c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc3ec7d4dabb75e0d3e144d7cc882372d13746b6dcd481b1b229bcaec9f7422cdfb84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb50000000000000000420000000000000035000000000000000000000000000000060000000000000000100000000000001900000000000000480000000000001050edbc06b4bfc3ee108b66f7a8f772ca4d90e1a085f4a8398505920f7465bb44b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d3021ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85e58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a193440eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968ffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f839867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756afcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0f9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5f8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf8923490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99cc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8beccda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d22733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981fe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0b46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc30f3e39c5412c30550d1d07fb07ff0e546fbeea1988f6658f04a9b19693e5b99d84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb5") + require.Equal(t, expectedProof, proof) + // TODO: Need to add some oracle data + require.Nil(t, data) + require.Empty(t, generator.generated) + }) + + t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + largePosition := PositionFromTraceIndex(provider, new(big.Int).Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2))) + _, _, _, err := provider.GetStepData(context.Background(), largePosition) + require.ErrorContains(t, err, "trace index out of bounds") + require.Empty(t, generator.generated) + }) + + t.Run("GenerateProof", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + generator.finalState = &VMState{ + Step: 10, + Exited: true, + Witness: make([]byte, asteriscWitnessLen), + } + generator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + OracleKey: common.Hash{0xdd}.Bytes(), + OracleValue: []byte{0xdd}, + OracleOffset: 10, + } + preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(4))) + require.NoError(t, err) + require.Contains(t, generator.generated, 4, "should have tried to generate the proof") + + require.EqualValues(t, generator.proof.StateData, preimage) + require.EqualValues(t, generator.proof.ProofData, proof) + expectedData := types.NewPreimageOracleData(generator.proof.OracleKey, generator.proof.OracleValue, generator.proof.OracleOffset) + require.EqualValues(t, expectedData, data) + }) + + t.Run("ProofAfterEndOfTrace", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + generator.finalState = &VMState{ + Step: 10, + Exited: true, + Witness: make([]byte, asteriscWitnessLen), + } + generator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + OracleKey: common.Hash{0xdd}.Bytes(), + OracleValue: []byte{0xdd}, + OracleOffset: 10, + } + preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) + require.NoError(t, err) + require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") + + witness := generator.finalState.Witness + require.EqualValues(t, witness, preimage) + require.Equal(t, []byte{}, proof) + require.Nil(t, data) + }) + + t.Run("ReadLastStepFromDisk", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, initGenerator := setupWithTestData(t, dataDir, prestate) + initGenerator.finalState = &VMState{ + Step: 10, + Exited: true, + Witness: make([]byte, asteriscWitnessLen), + } + initGenerator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + OracleKey: common.Hash{0xdd}.Bytes(), + OracleValue: []byte{0xdd}, + OracleOffset: 10, + } + _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) + require.NoError(t, err) + require.Contains(t, initGenerator.generated, 7000, "should have tried to generate the proof") + + provider, generator := setupWithTestData(t, dataDir, prestate) + generator.finalState = &VMState{ + Step: 10, + Exited: true, + Witness: make([]byte, asteriscWitnessLen), + } + generator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + } + preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) + require.NoError(t, err) + require.Empty(t, generator.generated, "should not have to generate the proof again") + + require.EqualValues(t, initGenerator.finalState.Witness, preimage) + require.Empty(t, proof) + require.Nil(t, data) + }) + + t.Run("MissingStateData", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1))) + require.ErrorContains(t, err, "missing state data") + require.Empty(t, generator.generated) + }) + + t.Run("IgnoreUnknownFields", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2))) + require.NoError(t, err) + expected := common.FromHex("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + require.Equal(t, expected, value) + expectedProof := common.FromHex("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd") + require.Equal(t, expectedProof, proof) + require.Empty(t, generator.generated) + require.Nil(t, data) + }) +} + +func setupTestData(t *testing.T) (string, string) { + srcDir := filepath.Join("test_data", "proofs") + entries, err := testData.ReadDir(srcDir) + require.NoError(t, err) + dataDir := t.TempDir() + require.NoError(t, os.Mkdir(filepath.Join(dataDir, proofsDir), 0o777)) + for _, entry := range entries { + path := filepath.Join(srcDir, entry.Name()) + file, err := testData.ReadFile(path) + require.NoErrorf(t, err, "reading %v", path) + proofFile := filepath.Join(dataDir, proofsDir, entry.Name()+".gz") + err = ioutil.WriteCompressedBytes(proofFile, file, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + require.NoErrorf(t, err, "writing %v", path) + } + return dataDir, "state.json" +} + +func setupWithTestData(t *testing.T, dataDir string, prestate string) (*AsteriscTraceProvider, *stubGenerator) { + generator := &stubGenerator{} + return &AsteriscTraceProvider{ + logger: testlog.Logger(t, log.LevelInfo), + dir: dataDir, + generator: generator, + prestate: filepath.Join(dataDir, prestate), + gameDepth: 63, + }, generator +} + +type stubGenerator struct { + generated []int // Using int makes assertions easier + finalState *VMState + proof *utils.ProofData +} + +func (e *stubGenerator) GenerateProof(ctx context.Context, dir string, i uint64) error { + e.generated = append(e.generated, int(i)) + var proofFile string + var data []byte + var err error + if e.finalState != nil && e.finalState.Step <= i { + // Requesting a trace index past the end of the trace + proofFile = filepath.Join(dir, utils.FinalState) + data, err = json.Marshal(e.finalState) + if err != nil { + return err + } + return ioutil.WriteCompressedBytes(proofFile, data, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + } + if e.proof != nil { + proofFile = filepath.Join(dir, proofsDir, fmt.Sprintf("%d.json.gz", i)) + data, err = json.Marshal(e.proof) + if err != nil { + return err + } + return ioutil.WriteCompressedBytes(proofFile, data, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + } + return nil +} diff --git a/op-challenger2/game/fault/trace/asterisc/state.go b/op-challenger2/game/fault/trace/asterisc/state.go new file mode 100644 index 000000000000..b766cda6f50c --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/state.go @@ -0,0 +1,74 @@ +package asterisc + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-service/ioutil" +) + +var asteriscWitnessLen = 362 + +// The state struct will be read from json. +// other fields included in json are specific to FPVM implementation, and not required for trace provider. +type VMState struct { + PC uint64 `json:"pc"` + Exited bool `json:"exited"` + Step uint64 `json:"step"` + Witness []byte `json:"witness"` + StateHash [32]byte `json:"stateHash"` +} + +func (state *VMState) validateStateHash() error { + exitCode := state.StateHash[0] + if exitCode >= 4 { + return fmt.Errorf("invalid stateHash: unknown exitCode %d", exitCode) + } + if (state.Exited && exitCode == mipsevm.VMStatusUnfinished) || (!state.Exited && exitCode != mipsevm.VMStatusUnfinished) { + return fmt.Errorf("invalid stateHash: invalid exitCode %d", exitCode) + } + return nil +} + +func (state *VMState) validateWitness() error { + witnessLen := len(state.Witness) + if witnessLen != asteriscWitnessLen { + return fmt.Errorf("invalid witness: Length must be 362 but got %d", witnessLen) + } + return nil +} + +// validateState performs verification of state; it is not perfect. +// It does not recalculate whether witness nor stateHash is correctly set from state. +func (state *VMState) validateState() error { + if err := state.validateStateHash(); err != nil { + return err + } + if err := state.validateWitness(); err != nil { + return err + } + return nil +} + +// parseState parses state from json and goes on state validation +func parseState(path string) (*VMState, error) { + file, err := ioutil.OpenDecompressed(path) + if err != nil { + return nil, fmt.Errorf("cannot open state file (%v): %w", path, err) + } + return parseStateFromReader(file) +} + +func parseStateFromReader(in io.ReadCloser) (*VMState, error) { + defer in.Close() + var state VMState + if err := json.NewDecoder(in).Decode(&state); err != nil { + return nil, fmt.Errorf("invalid asterisc VM state %w", err) + } + if err := state.validateState(); err != nil { + return nil, fmt.Errorf("invalid asterisc VM state %w", err) + } + return &state, nil +} diff --git a/op-challenger2/game/fault/trace/asterisc/state_test.go b/op-challenger2/game/fault/trace/asterisc/state_test.go new file mode 100644 index 000000000000..02b38eb41565 --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/state_test.go @@ -0,0 +1,83 @@ +package asterisc + +import ( + "compress/gzip" + _ "embed" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +//go:embed test_data/state.json +var testState []byte + +func TestLoadState(t *testing.T) { + t.Run("Uncompressed", func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "state.json") + require.NoError(t, os.WriteFile(path, testState, 0644)) + + state, err := parseState(path) + require.NoError(t, err) + + var expected VMState + require.NoError(t, json.Unmarshal(testState, &expected)) + require.Equal(t, &expected, state) + }) + + t.Run("Gzipped", func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "state.json.gz") + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + require.NoError(t, err) + defer f.Close() + writer := gzip.NewWriter(f) + _, err = writer.Write(testState) + require.NoError(t, err) + require.NoError(t, writer.Close()) + + state, err := parseState(path) + require.NoError(t, err) + + var expected VMState + require.NoError(t, json.Unmarshal(testState, &expected)) + require.Equal(t, &expected, state) + }) + + t.Run("InvalidStateWitness", func(t *testing.T) { + invalidWitnessLen := asteriscWitnessLen - 1 + state := &VMState{ + Step: 10, + Exited: true, + Witness: make([]byte, invalidWitnessLen), + } + err := state.validateState() + require.ErrorContains(t, err, "invalid witness") + }) + + t.Run("InvalidStateHash", func(t *testing.T) { + state := &VMState{ + Step: 10, + Exited: true, + Witness: make([]byte, asteriscWitnessLen), + } + // Unknown exit code + state.StateHash[0] = 37 + err := state.validateState() + require.ErrorContains(t, err, "invalid stateHash: unknown exitCode") + // Exited but ExitCode is VMStatusUnfinished + state.StateHash[0] = 3 + err = state.validateState() + require.ErrorContains(t, err, "invalid stateHash: invalid exitCode") + // Not Exited but ExitCode is not VMStatusUnfinished + state.Exited = false + for exitCode := 0; exitCode < 3; exitCode++ { + state.StateHash[0] = byte(exitCode) + err = state.validateState() + require.ErrorContains(t, err, "invalid stateHash: invalid exitCode") + } + }) +} diff --git a/op-challenger2/game/fault/trace/asterisc/test_data/invalid.json b/op-challenger2/game/fault/trace/asterisc/test_data/invalid.json new file mode 100644 index 000000000000..06a76bf5b23d --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/test_data/invalid.json @@ -0,0 +1,3 @@ +{ + "preimageKey": 1 +} diff --git a/op-challenger2/game/fault/trace/asterisc/test_data/proofs/0.json b/op-challenger2/game/fault/trace/asterisc/test_data/proofs/0.json new file mode 100644 index 000000000000..e5838ddfc5ab --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/test_data/proofs/0.json @@ -0,0 +1,7 @@ +{ + "step": 0, + "pre": "0x03abd5c535c08bae7c4ad48fcae39b65f9c25239f65b4376c58638d262c97381", + "post": "0x034689707b571db46b32c9e433def18e648f4e1fa9e5abd4012e7913031bfc10", + "state-data": "0x354cfaf28a5b60c3f64f22f9f171b64aa067f90c6de6c96f725f44c5cf9f8ac1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080e080000000000000000000000007f0000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "proof-data": "0x000000000000000003350100930581006f00800100000000970f000067800f01000000000000000097c2ffff938282676780020000000000032581009308e0050e1893682c323d6695396f1122b3cb562af8c65cab19978c9246434fda0536c90ca1cfabf684ebce3ad9fbd54000a2b258f8d0e447c1bb6f7e97de47aadfc12cd7b6f466bfd024daa905886c5f638f4692d843709e6c1c0d9eb2e251c626d53d15e04b59735fe0781bc4357a4243fbc28e6981902a8c2669a2d6456f7a964423db5d1585da978861f8b84067654b29490275c82b54083ee09c82eb7aa9ae693911226bb8297ad82c0963ae943f22d0c6086f4f14437e4d1c87ceb17e68caf5eaec77f14b46225b417d2191ca7b49564c896836a95ad4e9c383bd1c8ff9d8e888c64fb3836daa9535e58372e9646b7b144219980a4389aca5da241c3ec11fbc9297bd7a94ac671ccec288604c23a0072b0c1ed069198959cacdc2574aff65b7eceffc391e21778a1775deceb3ec0990836df98d98a4f3f0dc854587230fbf59e4daa60e8240d74caf90f7e2cd014c1d5d707b2e44269d9a9caf133882fe1ebb2f4237f6282abe89639b357e9231418d0c41373229ae9edfa6815bec484cb79772c9e2a7d80912123558f79b539bb45d435f2a4446970f1e2123494740285cec3491b0a41a9fd7403bdc8cd239a87508039a77b48ee39a951a8bd196b583de2b93444aafd456d0cd92050fa6a816d5183c1d75e96df540c8ac3bb8638b971f0cf3fb5b4a321487a1c8992b921de110f3d5bbb87369b25fe743ad7e789ca52d9f9fe62ccb103b78fe65eaa2cd47895022c590639c8f0c6a3999d8a5c71ed94d355815851b479f8d93eae90822294c96b39724b33491f8497b0bf7e1b995b37e4d759ff8a7958d194da6e00c475a6ddcf6efcb5fb4bb383c9b273da18d01e000dbe9c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc3ec7d4dabb75e0d3e144d7cc882372d13746b6dcd481b1b229bcaec9f7422cdfb84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb50000000000000000420000000000000035000000000000000000000000000000060000000000000000100000000000001900000000000000480000000000001050edbc06b4bfc3ee108b66f7a8f772ca4d90e1a085f4a8398505920f7465bb44b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d3021ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85e58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a193440eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968ffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f839867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756afcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0f9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5f8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf8923490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99cc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8beccda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d22733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981fe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0b46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc30f3e39c5412c30550d1d07fb07ff0e546fbeea1988f6658f04a9b19693e5b99d84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb5" +} diff --git a/op-challenger2/game/fault/trace/asterisc/test_data/proofs/1.json b/op-challenger2/game/fault/trace/asterisc/test_data/proofs/1.json new file mode 100644 index 000000000000..311847daa5a0 --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/test_data/proofs/1.json @@ -0,0 +1,2 @@ +{} + diff --git a/op-challenger2/game/fault/trace/asterisc/test_data/proofs/2.json b/op-challenger2/game/fault/trace/asterisc/test_data/proofs/2.json new file mode 100644 index 000000000000..96f58c8e8cb3 --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/test_data/proofs/2.json @@ -0,0 +1,9 @@ +{ + "foo": 0, + "bar": "0x71f9eb93ff904e5c03c3425228ef75766db0c906ad239df9a7a7f0d9c6a89705", + "step": 0, + "pre": "0x03abd5c535c08bae7c4ad48fcae39b65f9c25239f65b4376c58638d262c97381", + "post": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "state-data": "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "proof-data": "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" +} diff --git a/op-challenger2/game/fault/trace/asterisc/test_data/state.json b/op-challenger2/game/fault/trace/asterisc/test_data/state.json new file mode 100644 index 000000000000..a1bf2e5b412e --- /dev/null +++ b/op-challenger2/game/fault/trace/asterisc/test_data/state.json @@ -0,0 +1,40 @@ +{ + "pc": 0, + "exited": false, + "step": 0, + "witness": "wOSi8Cm62dDmKt1OGwxlLrSznk6zE4ghp7evP1rfrXYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGCAAAAAAAAAAAAAAAAB/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "stateHash": [ + 3, + 33, + 111, + 220, + 74, + 123, + 253, + 76, + 113, + 96, + 250, + 148, + 109, + 27, + 254, + 69, + 29, + 19, + 255, + 50, + 218, + 73, + 102, + 9, + 254, + 24, + 53, + 82, + 130, + 185, + 16, + 198 + ] +} diff --git a/op-challenger2/game/fault/trace/cannon/executor.go b/op-challenger2/game/fault/trace/cannon/executor.go new file mode 100644 index 000000000000..d75eb61c75ba --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/executor.go @@ -0,0 +1,127 @@ +package cannon + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum/go-ethereum/log" +) + +type Executor struct { + logger log.Logger + metrics CannonMetricer + l1 string + l1Beacon string + l2 string + inputs utils.LocalGameInputs + cannon string + server string + network string + rollupConfig string + l2Genesis string + absolutePreState string + snapshotFreq uint + infoFreq uint + selectSnapshot utils.SnapshotSelect + cmdExecutor utils.CmdExecutor +} + +func NewExecutor(logger log.Logger, m CannonMetricer, cfg *config.Config, prestate string, inputs utils.LocalGameInputs) *Executor { + return &Executor{ + logger: logger, + metrics: m, + l1: cfg.L1EthRpc, + l1Beacon: cfg.L1Beacon, + l2: cfg.L2Rpc, + inputs: inputs, + cannon: cfg.CannonBin, + server: cfg.CannonServer, + network: cfg.CannonNetwork, + rollupConfig: cfg.CannonRollupConfigPath, + l2Genesis: cfg.CannonL2GenesisPath, + absolutePreState: prestate, + snapshotFreq: cfg.CannonSnapshotFreq, + infoFreq: cfg.CannonInfoFreq, + selectSnapshot: utils.FindStartingSnapshot, + cmdExecutor: utils.RunCmd, + } +} + +// GenerateProof executes cannon to generate a proof at the specified trace index. +// The proof is stored at the specified directory. +func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) error { + return e.generateProof(ctx, dir, i, i) +} + +// generateProof executes cannon from the specified starting trace index until the end trace index. +// The proof is stored at the specified directory. +func (e *Executor) generateProof(ctx context.Context, dir string, begin uint64, end uint64, extraCannonArgs ...string) error { + snapshotDir := filepath.Join(dir, utils.SnapsDir) + start, err := e.selectSnapshot(e.logger, snapshotDir, e.absolutePreState, begin) + if err != nil { + return fmt.Errorf("find starting snapshot: %w", err) + } + proofDir := filepath.Join(dir, utils.ProofsDir) + dataDir := utils.PreimageDir(dir) + lastGeneratedState := filepath.Join(dir, utils.FinalState) + args := []string{ + "run", + "--input", start, + "--output", lastGeneratedState, + "--meta", "", + "--info-at", "%" + strconv.FormatUint(uint64(e.infoFreq), 10), + "--proof-at", "=" + strconv.FormatUint(end, 10), + "--proof-fmt", filepath.Join(proofDir, "%d.json.gz"), + "--snapshot-at", "%" + strconv.FormatUint(uint64(e.snapshotFreq), 10), + "--snapshot-fmt", filepath.Join(snapshotDir, "%d.json.gz"), + } + if end < math.MaxUint64 { + args = append(args, "--stop-at", "="+strconv.FormatUint(end+1, 10)) + } + args = append(args, extraCannonArgs...) + args = append(args, + "--", + e.server, "--server", + "--l1", e.l1, + "--l1.beacon", e.l1Beacon, + "--l2", e.l2, + "--datadir", dataDir, + "--l1.head", e.inputs.L1Head.Hex(), + "--l2.head", e.inputs.L2Head.Hex(), + "--l2.outputroot", e.inputs.L2OutputRoot.Hex(), + "--l2.claim", e.inputs.L2Claim.Hex(), + "--l2.blocknumber", e.inputs.L2BlockNumber.Text(10), + ) + if e.network != "" { + args = append(args, "--network", e.network) + } + if e.rollupConfig != "" { + args = append(args, "--rollup.config", e.rollupConfig) + } + if e.l2Genesis != "" { + args = append(args, "--l2.genesis", e.l2Genesis) + } + + if err := os.MkdirAll(snapshotDir, 0755); err != nil { + return fmt.Errorf("could not create snapshot directory %v: %w", snapshotDir, err) + } + if err := os.MkdirAll(dataDir, 0755); err != nil { + return fmt.Errorf("could not create preimage cache directory %v: %w", dataDir, err) + } + if err := os.MkdirAll(proofDir, 0755); err != nil { + return fmt.Errorf("could not create proofs directory %v: %w", proofDir, err) + } + e.logger.Info("Generating trace", "proof", end, "cmd", e.cannon, "args", strings.Join(args, ", ")) + execStart := time.Now() + err = e.cmdExecutor(ctx, e.logger.New("proof", end), e.cannon, args...) + e.metrics.RecordCannonExecutionTime(time.Since(execStart).Seconds()) + return err +} diff --git a/op-challenger2/game/fault/trace/cannon/executor_test.go b/op-challenger2/game/fault/trace/cannon/executor_test.go new file mode 100644 index 000000000000..94988f74b594 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/executor_test.go @@ -0,0 +1,227 @@ +package cannon + +import ( + "context" + "fmt" + "math" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +const execTestCannonPrestate = "/foo/pre.json" + +func TestGenerateProof(t *testing.T) { + input := "starting.json" + tempDir := t.TempDir() + dir := filepath.Join(tempDir, "gameDir") + cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", "http://localhost:9000", "http://localhost:9096", "http://localhost:9095", tempDir, config.TraceTypeCannon) + cfg.L2Rpc = "http://localhost:9999" + prestate := "pre.json" + cfg.CannonBin = "./bin/cannon" + cfg.CannonServer = "./bin/op-program" + cfg.CannonSnapshotFreq = 500 + cfg.CannonInfoFreq = 900 + + inputs := utils.LocalGameInputs{ + L1Head: common.Hash{0x11}, + L2Head: common.Hash{0x22}, + L2OutputRoot: common.Hash{0x33}, + L2Claim: common.Hash{0x44}, + L2BlockNumber: big.NewInt(3333), + } + captureExec := func(t *testing.T, cfg config.Config, proofAt uint64) (string, string, map[string]string) { + m := &cannonDurationMetrics{} + executor := NewExecutor(testlog.Logger(t, log.LevelInfo), m, &cfg, prestate, inputs) + executor.selectSnapshot = func(logger log.Logger, dir string, absolutePreState string, i uint64) (string, error) { + return input, nil + } + var binary string + var subcommand string + args := make(map[string]string) + executor.cmdExecutor = func(ctx context.Context, l log.Logger, b string, a ...string) error { + binary = b + subcommand = a[0] + for i := 1; i < len(a); { + if a[i] == "--" { + // Skip over the divider between cannon and server program + i += 1 + continue + } + args[a[i]] = a[i+1] + i += 2 + } + return nil + } + err := executor.GenerateProof(context.Background(), dir, proofAt) + require.NoError(t, err) + require.Equal(t, 1, m.executionTimeRecordCount, "Should record cannon execution time") + return binary, subcommand, args + } + + t.Run("Network", func(t *testing.T) { + cfg.CannonNetwork = "mainnet" + cfg.CannonRollupConfigPath = "" + cfg.CannonL2GenesisPath = "" + binary, subcommand, args := captureExec(t, cfg, 150_000_000) + require.DirExists(t, filepath.Join(dir, utils.PreimagesDir)) + require.DirExists(t, filepath.Join(dir, utils.ProofsDir)) + require.DirExists(t, filepath.Join(dir, utils.SnapsDir)) + require.Equal(t, cfg.CannonBin, binary) + require.Equal(t, "run", subcommand) + require.Equal(t, input, args["--input"]) + require.Contains(t, args, "--meta") + require.Equal(t, "", args["--meta"]) + require.Equal(t, filepath.Join(dir, utils.FinalState), args["--output"]) + require.Equal(t, "=150000000", args["--proof-at"]) + require.Equal(t, "=150000001", args["--stop-at"]) + require.Equal(t, "%500", args["--snapshot-at"]) + require.Equal(t, "%900", args["--info-at"]) + // Slight quirk of how we pair off args + // The server binary winds up as the key and the first arg --server as the value which has no value + // Then everything else pairs off correctly again + require.Equal(t, "--server", args[cfg.CannonServer]) + require.Equal(t, cfg.L1EthRpc, args["--l1"]) + require.Equal(t, cfg.L1Beacon, args["--l1.beacon"]) + require.Equal(t, cfg.L2Rpc, args["--l2"]) + require.Equal(t, filepath.Join(dir, utils.PreimagesDir), args["--datadir"]) + require.Equal(t, filepath.Join(dir, utils.ProofsDir, "%d.json.gz"), args["--proof-fmt"]) + require.Equal(t, filepath.Join(dir, utils.SnapsDir, "%d.json.gz"), args["--snapshot-fmt"]) + require.Equal(t, cfg.CannonNetwork, args["--network"]) + require.NotContains(t, args, "--rollup.config") + require.NotContains(t, args, "--l2.genesis") + + // Local game inputs + require.Equal(t, inputs.L1Head.Hex(), args["--l1.head"]) + require.Equal(t, inputs.L2Head.Hex(), args["--l2.head"]) + require.Equal(t, inputs.L2OutputRoot.Hex(), args["--l2.outputroot"]) + require.Equal(t, inputs.L2Claim.Hex(), args["--l2.claim"]) + require.Equal(t, "3333", args["--l2.blocknumber"]) + }) + + t.Run("RollupAndGenesis", func(t *testing.T) { + cfg.CannonNetwork = "" + cfg.CannonRollupConfigPath = "rollup.json" + cfg.CannonL2GenesisPath = "genesis.json" + _, _, args := captureExec(t, cfg, 150_000_000) + require.NotContains(t, args, "--network") + require.Equal(t, cfg.CannonRollupConfigPath, args["--rollup.config"]) + require.Equal(t, cfg.CannonL2GenesisPath, args["--l2.genesis"]) + }) + + t.Run("NoStopAtWhenProofIsMaxUInt", func(t *testing.T) { + cfg.CannonNetwork = "mainnet" + cfg.CannonRollupConfigPath = "rollup.json" + cfg.CannonL2GenesisPath = "genesis.json" + _, _, args := captureExec(t, cfg, math.MaxUint64) + // stop-at would need to be one more than the proof step which would overflow back to 0 + // so expect that it will be omitted. We'll ultimately want cannon to execute until the program exits. + require.NotContains(t, args, "--stop-at") + }) +} + +func TestRunCmdLogsOutput(t *testing.T) { + bin := "/bin/echo" + if _, err := os.Stat(bin); err != nil { + t.Skip(bin, " not available", err) + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + logger, logs := testlog.CaptureLogger(t, log.LevelInfo) + err := utils.RunCmd(ctx, logger, bin, "Hello World") + require.NoError(t, err) + levelFilter := testlog.NewLevelFilter(log.LevelInfo) + msgFilter := testlog.NewMessageFilter("Hello World") + require.NotNil(t, logs.FindLog(levelFilter, msgFilter)) +} + +func TestFindStartingSnapshot(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) + + withSnapshots := func(t *testing.T, files ...string) string { + dir := t.TempDir() + for _, file := range files { + require.NoError(t, os.WriteFile(fmt.Sprintf("%v/%v", dir, file), nil, 0o644)) + } + return dir + } + + t.Run("UsePrestateWhenSnapshotsDirDoesNotExist", func(t *testing.T) { + dir := t.TempDir() + snapshot, err := utils.FindStartingSnapshot(logger, filepath.Join(dir, "doesNotExist"), execTestCannonPrestate, 1200) + require.NoError(t, err) + require.Equal(t, execTestCannonPrestate, snapshot) + }) + + t.Run("UsePrestateWhenSnapshotsDirEmpty", func(t *testing.T) { + dir := withSnapshots(t) + snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 1200) + require.NoError(t, err) + require.Equal(t, execTestCannonPrestate, snapshot) + }) + + t.Run("UsePrestateWhenNoSnapshotBeforeTraceIndex", func(t *testing.T) { + dir := withSnapshots(t, "100.json", "200.json") + snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 99) + require.NoError(t, err) + require.Equal(t, execTestCannonPrestate, snapshot) + + snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 100) + require.NoError(t, err) + require.Equal(t, execTestCannonPrestate, snapshot) + }) + + t.Run("UseClosestAvailableSnapshot", func(t *testing.T) { + dir := withSnapshots(t, "100.json.gz", "123.json.gz", "250.json.gz") + + snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 101) + require.NoError(t, err) + require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) + + snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 123) + require.NoError(t, err) + require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) + + snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 124) + require.NoError(t, err) + require.Equal(t, filepath.Join(dir, "123.json.gz"), snapshot) + + snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 256) + require.NoError(t, err) + require.Equal(t, filepath.Join(dir, "250.json.gz"), snapshot) + }) + + t.Run("IgnoreDirectories", func(t *testing.T) { + dir := withSnapshots(t, "100.json.gz") + require.NoError(t, os.Mkdir(filepath.Join(dir, "120.json.gz"), 0o777)) + snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 150) + require.NoError(t, err) + require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) + }) + + t.Run("IgnoreUnexpectedFiles", func(t *testing.T) { + dir := withSnapshots(t, ".file", "100.json.gz", "foo", "bar.json.gz") + snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 150) + require.NoError(t, err) + require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) + }) +} + +type cannonDurationMetrics struct { + metrics.NoopMetricsImpl + executionTimeRecordCount int +} + +func (c *cannonDurationMetrics) RecordCannonExecutionTime(_ float64) { + c.executionTimeRecordCount++ +} diff --git a/op-challenger2/game/fault/trace/cannon/prestate.go b/op-challenger2/game/fault/trace/cannon/prestate.go new file mode 100644 index 000000000000..5c4044e113d0 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/prestate.go @@ -0,0 +1,47 @@ +package cannon + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" +) + +var _ types.PrestateProvider = (*CannonPrestateProvider)(nil) + +type CannonPrestateProvider struct { + prestate string + + prestateCommitment common.Hash +} + +func NewPrestateProvider(prestate string) *CannonPrestateProvider { + return &CannonPrestateProvider{prestate: prestate} +} + +func (p *CannonPrestateProvider) absolutePreState() ([]byte, error) { + state, err := parseState(p.prestate) + if err != nil { + return nil, fmt.Errorf("cannot load absolute pre-state: %w", err) + } + return state.EncodeWitness(), nil +} + +func (p *CannonPrestateProvider) AbsolutePreStateCommitment(_ context.Context) (common.Hash, error) { + if p.prestateCommitment != (common.Hash{}) { + return p.prestateCommitment, nil + } + state, err := p.absolutePreState() + if err != nil { + return common.Hash{}, fmt.Errorf("cannot load absolute pre-state: %w", err) + } + hash, err := mipsevm.StateWitness(state).StateHash() + if err != nil { + return common.Hash{}, fmt.Errorf("cannot hash absolute pre-state: %w", err) + } + p.prestateCommitment = hash + return hash, nil +} diff --git a/op-challenger2/game/fault/trace/cannon/prestate_test.go b/op-challenger2/game/fault/trace/cannon/prestate_test.go new file mode 100644 index 000000000000..1297da54bd88 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/prestate_test.go @@ -0,0 +1,85 @@ +package cannon + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func newCannonPrestateProvider(dataDir string, prestate string) *CannonPrestateProvider { + return &CannonPrestateProvider{ + prestate: filepath.Join(dataDir, prestate), + } +} + +func TestAbsolutePreStateCommitment(t *testing.T) { + dataDir := t.TempDir() + + prestate := "state.json" + + t.Run("StateUnavailable", func(t *testing.T) { + provider := newCannonPrestateProvider("/dir/does/not/exist", prestate) + _, err := provider.AbsolutePreStateCommitment(context.Background()) + require.ErrorIs(t, err, os.ErrNotExist) + }) + + t.Run("InvalidStateFile", func(t *testing.T) { + setupPreState(t, dataDir, "invalid.json") + provider := newCannonPrestateProvider(dataDir, prestate) + _, err := provider.AbsolutePreStateCommitment(context.Background()) + require.ErrorContains(t, err, "invalid mipsevm state") + }) + + t.Run("ExpectedAbsolutePreState", func(t *testing.T) { + setupPreState(t, dataDir, "state.json") + provider := newCannonPrestateProvider(dataDir, prestate) + actual, err := provider.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + state := mipsevm.State{ + Memory: mipsevm.NewMemory(), + PreimageKey: common.HexToHash("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"), + PreimageOffset: 0, + PC: 0, + NextPC: 1, + LO: 0, + HI: 0, + Heap: 0, + ExitCode: 0, + Exited: false, + Step: 0, + Registers: [32]uint32{}, + } + expected, err := state.EncodeWitness().StateHash() + require.NoError(t, err) + require.Equal(t, expected, actual) + }) + + t.Run("CacheAbsolutePreState", func(t *testing.T) { + setupPreState(t, dataDir, prestate) + provider := newCannonPrestateProvider(dataDir, prestate) + first, err := provider.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + + // Remove the prestate from disk + require.NoError(t, os.Remove(provider.prestate)) + + // Value should still be available from cache + cached, err := provider.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + require.Equal(t, first, cached) + }) +} + +func setupPreState(t *testing.T, dataDir string, filename string) { + srcDir := filepath.Join("test_data") + path := filepath.Join(srcDir, filename) + file, err := testData.ReadFile(path) + require.NoErrorf(t, err, "reading %v", path) + err = os.WriteFile(filepath.Join(dataDir, "state.json"), file, 0o644) + require.NoErrorf(t, err, "writing %v", path) +} diff --git a/op-challenger2/game/fault/trace/cannon/provider.go b/op-challenger2/game/fault/trace/cannon/provider.go new file mode 100644 index 000000000000..8de4510196ee --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/provider.go @@ -0,0 +1,214 @@ +package cannon + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-program/host/kvstore" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" +) + +type CannonMetricer interface { + RecordCannonExecutionTime(t float64) +} + +type CannonTraceProvider struct { + logger log.Logger + dir string + prestate string + generator utils.ProofGenerator + gameDepth types.Depth + preimageLoader *utils.PreimageLoader + + types.PrestateProvider + + // lastStep stores the last step in the actual trace if known. 0 indicates unknown. + // Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace. + lastStep uint64 +} + +func NewTraceProvider(logger log.Logger, m CannonMetricer, cfg *config.Config, prestateProvider types.PrestateProvider, prestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProvider { + return &CannonTraceProvider{ + logger: logger, + dir: dir, + prestate: prestate, + generator: NewExecutor(logger, m, cfg, prestate, localInputs), + gameDepth: gameDepth, + preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(utils.PreimageDir(dir)).Get), + PrestateProvider: prestateProvider, + } +} + +func (p *CannonTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { + traceIndex := pos.TraceIndex(p.gameDepth) + if !traceIndex.IsUint64() { + return common.Hash{}, errors.New("trace index out of bounds") + } + proof, err := p.loadProof(ctx, traceIndex.Uint64()) + if err != nil { + return common.Hash{}, err + } + value := proof.ClaimValue + + if value == (common.Hash{}) { + return common.Hash{}, errors.New("proof missing post hash") + } + return value, nil +} + +func (p *CannonTraceProvider) GetStepData(ctx context.Context, pos types.Position) ([]byte, []byte, *types.PreimageOracleData, error) { + traceIndex := pos.TraceIndex(p.gameDepth) + if !traceIndex.IsUint64() { + return nil, nil, nil, errors.New("trace index out of bounds") + } + proof, err := p.loadProof(ctx, traceIndex.Uint64()) + if err != nil { + return nil, nil, nil, err + } + value := ([]byte)(proof.StateData) + if len(value) == 0 { + return nil, nil, nil, errors.New("proof missing state data") + } + data := ([]byte)(proof.ProofData) + if data == nil { + return nil, nil, nil, errors.New("proof missing proof data") + } + oracleData, err := p.preimageLoader.LoadPreimage(proof) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to load preimage: %w", err) + } + return value, data, oracleData, nil +} + +func (p *CannonTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { + return nil, types.ErrL2BlockNumberValid +} + +// loadProof will attempt to load or generate the proof data at the specified index +// If the requested index is beyond the end of the actual trace it is extended with no-op instructions. +func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*utils.ProofData, error) { + // Attempt to read the last step from disk cache + if p.lastStep == 0 { + step, err := utils.ReadLastStep(p.dir) + if err != nil { + p.logger.Warn("Failed to read last step from disk cache", "err", err) + } else { + p.lastStep = step + } + } + // If the last step is tracked, set i to the last step to generate or load the final proof + if p.lastStep != 0 && i > p.lastStep { + i = p.lastStep + } + path := filepath.Join(p.dir, utils.ProofsDir, fmt.Sprintf("%d.json.gz", i)) + file, err := ioutil.OpenDecompressed(path) + if errors.Is(err, os.ErrNotExist) { + if err := p.generator.GenerateProof(ctx, p.dir, i); err != nil { + return nil, fmt.Errorf("generate cannon trace with proof at %v: %w", i, err) + } + // Try opening the file again now and it should exist. + file, err = ioutil.OpenDecompressed(path) + if errors.Is(err, os.ErrNotExist) { + // Expected proof wasn't generated, check if we reached the end of execution + state, err := p.finalState() + if err != nil { + return nil, err + } + if state.Exited && state.Step <= i { + p.logger.Warn("Requested proof was after the program exited", "proof", i, "last", state.Step) + // The final instruction has already been applied to this state, so the last step we can execute + // is one before its Step value. + p.lastStep = state.Step - 1 + // Extend the trace out to the full length using a no-op instruction that doesn't change any state + // No execution is done, so no proof-data or oracle values are required. + witness := state.EncodeWitness() + witnessHash, err := mipsevm.StateWitness(witness).StateHash() + if err != nil { + return nil, fmt.Errorf("cannot hash witness: %w", err) + } + proof := &utils.ProofData{ + ClaimValue: witnessHash, + StateData: hexutil.Bytes(witness), + ProofData: []byte{}, + OracleKey: nil, + OracleValue: nil, + OracleOffset: 0, + } + if err := utils.WriteLastStep(p.dir, proof, p.lastStep); err != nil { + p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep) + } + return proof, nil + } else { + return nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, state.Step) + } + } + } + if err != nil { + return nil, fmt.Errorf("cannot open proof file (%v): %w", path, err) + } + defer file.Close() + var proof utils.ProofData + err = json.NewDecoder(file).Decode(&proof) + if err != nil { + return nil, fmt.Errorf("failed to read proof (%v): %w", path, err) + } + return &proof, nil +} + +func (c *CannonTraceProvider) finalState() (*mipsevm.State, error) { + state, err := parseState(filepath.Join(c.dir, utils.FinalState)) + if err != nil { + return nil, fmt.Errorf("cannot read final state: %w", err) + } + return state, nil +} + +// CannonTraceProviderForTest is a CannonTraceProvider that can find the step referencing the preimage read +// Only to be used for testing +type CannonTraceProviderForTest struct { + *CannonTraceProvider +} + +func NewTraceProviderForTest(logger log.Logger, m CannonMetricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProviderForTest { + p := &CannonTraceProvider{ + logger: logger, + dir: dir, + prestate: cfg.CannonAbsolutePreState, + generator: NewExecutor(logger, m, cfg, cfg.CannonAbsolutePreState, localInputs), + gameDepth: gameDepth, + preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(utils.PreimageDir(dir)).Get), + } + return &CannonTraceProviderForTest{p} +} + +func (p *CannonTraceProviderForTest) FindStep(ctx context.Context, start uint64, preimage utils.PreimageOpt) (uint64, error) { + // Run cannon to find the step that meets the preimage conditions + if err := p.generator.(*Executor).generateProof(ctx, p.dir, start, math.MaxUint64, preimage()...); err != nil { + return 0, fmt.Errorf("generate cannon trace (until preimage read): %w", err) + } + // Load the step from the state cannon finished with + state, err := p.finalState() + if err != nil { + return 0, fmt.Errorf("failed to load final state: %w", err) + } + // Check we didn't get to the end of the trace without finding the preimage read we were looking for + if state.Exited { + return 0, fmt.Errorf("preimage read not found: %w", io.EOF) + } + // The state is the post-state so the step we want to execute to read the preimage is step - 1. + return state.Step - 1, nil +} diff --git a/op-challenger2/game/fault/trace/cannon/provider_test.go b/op-challenger2/game/fault/trace/cannon/provider_test.go new file mode 100644 index 000000000000..da9fa5efcab7 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/provider_test.go @@ -0,0 +1,276 @@ +package cannon + +import ( + "context" + "embed" + "encoding/json" + "fmt" + "math" + "math/big" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +//go:embed test_data +var testData embed.FS + +func PositionFromTraceIndex(provider *CannonTraceProvider, idx *big.Int) types.Position { + return types.NewPosition(provider.gameDepth, idx) +} + +func TestGet(t *testing.T) { + dataDir, prestate := setupTestData(t) + t.Run("ExistingProof", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, common.Big0)) + require.NoError(t, err) + require.Equal(t, common.HexToHash("0x45fd9aa59768331c726e719e76aa343e73123af888804604785ae19506e65e87"), value) + require.Empty(t, generator.generated) + }) + + t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + largePosition := PositionFromTraceIndex(provider, new(big.Int).Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2))) + _, err := provider.Get(context.Background(), largePosition) + require.ErrorContains(t, err, "trace index out of bounds") + require.Empty(t, generator.generated) + }) + + t.Run("ProofAfterEndOfTrace", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + generator.finalState = &mipsevm.State{ + Memory: &mipsevm.Memory{}, + Step: 10, + Exited: true, + } + value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) + require.NoError(t, err) + require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") + stateHash, err := generator.finalState.EncodeWitness().StateHash() + require.NoError(t, err) + require.Equal(t, stateHash, value) + }) + + t.Run("MissingPostHash", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + _, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1))) + require.ErrorContains(t, err, "missing post hash") + require.Empty(t, generator.generated) + }) + + t.Run("IgnoreUnknownFields", func(t *testing.T) { + provider, generator := setupWithTestData(t, dataDir, prestate) + value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2))) + require.NoError(t, err) + expected := common.HexToHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + require.Equal(t, expected, value) + require.Empty(t, generator.generated) + }) +} + +func TestGetStepData(t *testing.T) { + t.Run("ExistingProof", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, new(big.Int))) + require.NoError(t, err) + expected := common.FromHex("b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000") + require.Equal(t, expected, value) + expectedProof := common.FromHex("08028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004") + require.Equal(t, expectedProof, proof) + // TODO: Need to add some oracle data + require.Nil(t, data) + require.Empty(t, generator.generated) + }) + + t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + largePosition := PositionFromTraceIndex(provider, new(big.Int).Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2))) + _, _, _, err := provider.GetStepData(context.Background(), largePosition) + require.ErrorContains(t, err, "trace index out of bounds") + require.Empty(t, generator.generated) + }) + + t.Run("GenerateProof", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + generator.finalState = &mipsevm.State{ + Memory: &mipsevm.Memory{}, + Step: 10, + Exited: true, + } + generator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + OracleKey: common.Hash{0xdd}.Bytes(), + OracleValue: []byte{0xdd}, + OracleOffset: 10, + } + preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(4))) + require.NoError(t, err) + require.Contains(t, generator.generated, 4, "should have tried to generate the proof") + + require.EqualValues(t, generator.proof.StateData, preimage) + require.EqualValues(t, generator.proof.ProofData, proof) + expectedData := types.NewPreimageOracleData(generator.proof.OracleKey, generator.proof.OracleValue, generator.proof.OracleOffset) + require.EqualValues(t, expectedData, data) + }) + + t.Run("ProofAfterEndOfTrace", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + generator.finalState = &mipsevm.State{ + Memory: &mipsevm.Memory{}, + Step: 10, + Exited: true, + } + generator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + OracleKey: common.Hash{0xdd}.Bytes(), + OracleValue: []byte{0xdd}, + OracleOffset: 10, + } + preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) + require.NoError(t, err) + require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") + + witness := generator.finalState.EncodeWitness() + require.EqualValues(t, witness, preimage) + require.Equal(t, []byte{}, proof) + require.Nil(t, data) + }) + + t.Run("ReadLastStepFromDisk", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, initGenerator := setupWithTestData(t, dataDir, prestate) + initGenerator.finalState = &mipsevm.State{ + Memory: &mipsevm.Memory{}, + Step: 10, + Exited: true, + } + initGenerator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + OracleKey: common.Hash{0xdd}.Bytes(), + OracleValue: []byte{0xdd}, + OracleOffset: 10, + } + _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) + require.NoError(t, err) + require.Contains(t, initGenerator.generated, 7000, "should have tried to generate the proof") + + provider, generator := setupWithTestData(t, dataDir, prestate) + generator.finalState = &mipsevm.State{ + Memory: &mipsevm.Memory{}, + Step: 10, + Exited: true, + } + generator.proof = &utils.ProofData{ + ClaimValue: common.Hash{0xaa}, + StateData: []byte{0xbb}, + ProofData: []byte{0xcc}, + } + preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) + require.NoError(t, err) + require.Empty(t, generator.generated, "should not have to generate the proof again") + + require.EqualValues(t, initGenerator.finalState.EncodeWitness(), preimage) + require.Empty(t, proof) + require.Nil(t, data) + }) + + t.Run("MissingStateData", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1))) + require.ErrorContains(t, err, "missing state data") + require.Empty(t, generator.generated) + }) + + t.Run("IgnoreUnknownFields", func(t *testing.T) { + dataDir, prestate := setupTestData(t) + provider, generator := setupWithTestData(t, dataDir, prestate) + value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2))) + require.NoError(t, err) + expected := common.FromHex("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + require.Equal(t, expected, value) + expectedProof := common.FromHex("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd") + require.Equal(t, expectedProof, proof) + require.Empty(t, generator.generated) + require.Nil(t, data) + }) +} + +func setupTestData(t *testing.T) (string, string) { + srcDir := filepath.Join("test_data", "proofs") + entries, err := testData.ReadDir(srcDir) + require.NoError(t, err) + dataDir := t.TempDir() + require.NoError(t, os.Mkdir(filepath.Join(dataDir, utils.ProofsDir), 0o777)) + for _, entry := range entries { + path := filepath.Join(srcDir, entry.Name()) + file, err := testData.ReadFile(path) + require.NoErrorf(t, err, "reading %v", path) + proofFile := filepath.Join(dataDir, utils.ProofsDir, entry.Name()+".gz") + err = ioutil.WriteCompressedBytes(proofFile, file, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + require.NoErrorf(t, err, "writing %v", path) + } + return dataDir, "state.json" +} + +func setupWithTestData(t *testing.T, dataDir string, prestate string) (*CannonTraceProvider, *stubGenerator) { + generator := &stubGenerator{} + return &CannonTraceProvider{ + logger: testlog.Logger(t, log.LevelInfo), + dir: dataDir, + generator: generator, + prestate: filepath.Join(dataDir, prestate), + gameDepth: 63, + }, generator +} + +type stubGenerator struct { + generated []int // Using int makes assertions easier + finalState *mipsevm.State + proof *utils.ProofData +} + +func (e *stubGenerator) GenerateProof(ctx context.Context, dir string, i uint64) error { + e.generated = append(e.generated, int(i)) + var proofFile string + var data []byte + var err error + if e.finalState != nil && e.finalState.Step <= i { + // Requesting a trace index past the end of the trace + proofFile = filepath.Join(dir, utils.FinalState) + data, err = json.Marshal(e.finalState) + if err != nil { + return err + } + return ioutil.WriteCompressedBytes(proofFile, data, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + } + if e.proof != nil { + proofFile = filepath.Join(dir, utils.ProofsDir, fmt.Sprintf("%d.json.gz", i)) + data, err = json.Marshal(e.proof) + if err != nil { + return err + } + return ioutil.WriteCompressedBytes(proofFile, data, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + } + return nil +} diff --git a/op-challenger2/game/fault/trace/cannon/state.go b/op-challenger2/game/fault/trace/cannon/state.go new file mode 100644 index 000000000000..db0c9c50a94c --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/state.go @@ -0,0 +1,27 @@ +package cannon + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-service/ioutil" +) + +func parseState(path string) (*mipsevm.State, error) { + file, err := ioutil.OpenDecompressed(path) + if err != nil { + return nil, fmt.Errorf("cannot open state file (%v): %w", path, err) + } + return parseStateFromReader(file) +} + +func parseStateFromReader(in io.ReadCloser) (*mipsevm.State, error) { + defer in.Close() + var state mipsevm.State + if err := json.NewDecoder(in).Decode(&state); err != nil { + return nil, fmt.Errorf("invalid mipsevm state: %w", err) + } + return &state, nil +} diff --git a/op-challenger2/game/fault/trace/cannon/state_test.go b/op-challenger2/game/fault/trace/cannon/state_test.go new file mode 100644 index 000000000000..b437d307c545 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/state_test.go @@ -0,0 +1,50 @@ +package cannon + +import ( + "compress/gzip" + _ "embed" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/stretchr/testify/require" +) + +//go:embed test_data/state.json +var testState []byte + +func TestLoadState(t *testing.T) { + t.Run("Uncompressed", func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "state.json") + require.NoError(t, os.WriteFile(path, testState, 0644)) + + state, err := parseState(path) + require.NoError(t, err) + + var expected mipsevm.State + require.NoError(t, json.Unmarshal(testState, &expected)) + require.Equal(t, &expected, state) + }) + + t.Run("Gzipped", func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "state.json.gz") + f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + require.NoError(t, err) + defer f.Close() + writer := gzip.NewWriter(f) + _, err = writer.Write(testState) + require.NoError(t, err) + require.NoError(t, writer.Close()) + + state, err := parseState(path) + require.NoError(t, err) + + var expected mipsevm.State + require.NoError(t, json.Unmarshal(testState, &expected)) + require.Equal(t, &expected, state) + }) +} diff --git a/op-challenger2/game/fault/trace/cannon/test_data/invalid.json b/op-challenger2/game/fault/trace/cannon/test_data/invalid.json new file mode 100644 index 000000000000..06a76bf5b23d --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/test_data/invalid.json @@ -0,0 +1,3 @@ +{ + "preimageKey": 1 +} diff --git a/op-challenger2/game/fault/trace/cannon/test_data/proofs/0.json b/op-challenger2/game/fault/trace/cannon/test_data/proofs/0.json new file mode 100644 index 000000000000..5cadf03b8843 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/test_data/proofs/0.json @@ -0,0 +1 @@ +{"step":0,"pre":"0x71f9eb93ff904e5c03c3425228ef75766db0c906ad239df9a7a7f0d9c6a89705","post":"0x45fd9aa59768331c726e719e76aa343e73123af888804604785ae19506e65e87","state-data":"0xb8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000","proof-data":"0x08028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004","step-input":"0xf8e0cb960000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000014200000000000000000000000000000000000000000000000000000000000000e2b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f4000000000000000000000000000000000000000000000000000000000000000000000000000a3900000a39040000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007fffd0000000000000000000000000000000000000000000000000000000000000000000000000000000070008028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004240210960000000c0000003403e00008000000008fa100040000102571c0e460346a89963488f904199fc7b4dc3dce2ddadfe484510463ae5014a79df9d922ef2cb84325e4e13ad98828ed29937c1440d8ea9eb19cab7474243c2d0b1a83646e420529153298f3a914a2550658c930f5e519b1d8dd151cf828116697d27264e6fad331820ecf3855adcc68dc529acfc33ecfa45a3a33c9ac766edc1f437988f2abab9dce36d3bac27b0f7b58a06d125acd50a1bf14bb8c7f6c1618465a532f945043b5a9ebc800d7336673019654eb76f8c10cff4f794ee586dc9992c318cef3dfa57032e2dd2fc5cb2dcfebd05551301704dd37a7c169448ec02574f706e38c20963616dae4e03cc91f39a4c3f9608119212965b72948f0ee15feb48b758f050691197816dc3ca919bbb3b50624d195c82d644025647ac8ba07206e5eb830799dfa896506743e81856edf8a31fef737fb4f44501dc71f019bdb12ed9cf0b9fba40ef98e5091b70484ba4f6af7711ec8b0ba4f4f2c4b11455a9e071f465817724159ddeea1170f4dd912c3a5a10ec6b046aa3c4a9febddfeeaa47e3ef06e1758694515562c958dc1b018149c7e4fcd91b9033ee216fea2ea498acd065e61fd436f26c31654bfd27c13ab67707384ad7a84a4b085e890e998e8a9655da954db3d279d598343a4706a2272fca526caeddb017627ecaf0138f1446c82e16d0926c0c510773e2b439c2c71414deb9b739fa370c010380d9ed5927fd7f4bb84ac22747f1bd405830b65d9e04c5efddc2c4dc89ba294c7568b9952193172d75ed8ea3e0fe57c8ad6636da54921ab52a8a0f54920d124f43b9fd3577690140cb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef6834d8ef8faaf96b7b45235297538a266eb882b8b5680f621aab3417d43cdc2eb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","oracle-input":"0x"} diff --git a/op-challenger2/game/fault/trace/cannon/test_data/proofs/1.json b/op-challenger2/game/fault/trace/cannon/test_data/proofs/1.json new file mode 100644 index 000000000000..0967ef424bce --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/test_data/proofs/1.json @@ -0,0 +1 @@ +{} diff --git a/op-challenger2/game/fault/trace/cannon/test_data/proofs/2.json b/op-challenger2/game/fault/trace/cannon/test_data/proofs/2.json new file mode 100644 index 000000000000..c73f8051384a --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/test_data/proofs/2.json @@ -0,0 +1 @@ +{"foo":0,"bar":"0x71f9eb93ff904e5c03c3425228ef75766db0c906ad239df9a7a7f0d9c6a89705","post":"0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb","state-data":"0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc","proof-data":"0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd","step-input":"0xf8e0cb960000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000014200000000000000000000000000000000000000000000000000000000000000e2b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f4000000000000000000000000000000000000000000000000000000000000000000000000000a3900000a39040000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007fffd0000000000000000000000000000000000000000000000000000000000000000000000000000000070008028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004240210960000000c0000003403e00008000000008fa100040000102571c0e460346a89963488f904199fc7b4dc3dce2ddadfe484510463ae5014a79df9d922ef2cb84325e4e13ad98828ed29937c1440d8ea9eb19cab7474243c2d0b1a83646e420529153298f3a914a2550658c930f5e519b1d8dd151cf828116697d27264e6fad331820ecf3855adcc68dc529acfc33ecfa45a3a33c9ac766edc1f437988f2abab9dce36d3bac27b0f7b58a06d125acd50a1bf14bb8c7f6c1618465a532f945043b5a9ebc800d7336673019654eb76f8c10cff4f794ee586dc9992c318cef3dfa57032e2dd2fc5cb2dcfebd05551301704dd37a7c169448ec02574f706e38c20963616dae4e03cc91f39a4c3f9608119212965b72948f0ee15feb48b758f050691197816dc3ca919bbb3b50624d195c82d644025647ac8ba07206e5eb830799dfa896506743e81856edf8a31fef737fb4f44501dc71f019bdb12ed9cf0b9fba40ef98e5091b70484ba4f6af7711ec8b0ba4f4f2c4b11455a9e071f465817724159ddeea1170f4dd912c3a5a10ec6b046aa3c4a9febddfeeaa47e3ef06e1758694515562c958dc1b018149c7e4fcd91b9033ee216fea2ea498acd065e61fd436f26c31654bfd27c13ab67707384ad7a84a4b085e890e998e8a9655da954db3d279d598343a4706a2272fca526caeddb017627ecaf0138f1446c82e16d0926c0c510773e2b439c2c71414deb9b739fa370c010380d9ed5927fd7f4bb84ac22747f1bd405830b65d9e04c5efddc2c4dc89ba294c7568b9952193172d75ed8ea3e0fe57c8ad6636da54921ab52a8a0f54920d124f43b9fd3577690140cb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef6834d8ef8faaf96b7b45235297538a266eb882b8b5680f621aab3417d43cdc2eb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","oracle-input":"0x"} diff --git a/op-challenger2/game/fault/trace/cannon/test_data/proofs/420.json b/op-challenger2/game/fault/trace/cannon/test_data/proofs/420.json new file mode 100644 index 000000000000..a3974f1f1992 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/test_data/proofs/420.json @@ -0,0 +1,11 @@ +{ + "step": 0, + "pre": "0x71f9eb93ff904e5c03c3425228ef75766db0c906ad239df9a7a7f0d9c6a89705", + "post": "0x45fd9aa59768331c726e719e76aa343e73123af888804604785ae19506e65e87", + "state-data": "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "proof-data": "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "step-input": "0xf8e0cb960000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000014200000000000000000000000000000000000000000000000000000000000000e2b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f4000000000000000000000000000000000000000000000000000000000000000000000000000a3900000a39040000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007fffd0000000000000000000000000000000000000000000000000000000000000000000000000000000070008028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004240210960000000c0000003403e00008000000008fa100040000102571c0e460346a89963488f904199fc7b4dc3dce2ddadfe484510463ae5014a79df9d922ef2cb84325e4e13ad98828ed29937c1440d8ea9eb19cab7474243c2d0b1a83646e420529153298f3a914a2550658c930f5e519b1d8dd151cf828116697d27264e6fad331820ecf3855adcc68dc529acfc33ecfa45a3a33c9ac766edc1f437988f2abab9dce36d3bac27b0f7b58a06d125acd50a1bf14bb8c7f6c1618465a532f945043b5a9ebc800d7336673019654eb76f8c10cff4f794ee586dc9992c318cef3dfa57032e2dd2fc5cb2dcfebd05551301704dd37a7c169448ec02574f706e38c20963616dae4e03cc91f39a4c3f9608119212965b72948f0ee15feb48b758f050691197816dc3ca919bbb3b50624d195c82d644025647ac8ba07206e5eb830799dfa896506743e81856edf8a31fef737fb4f44501dc71f019bdb12ed9cf0b9fba40ef98e5091b70484ba4f6af7711ec8b0ba4f4f2c4b11455a9e071f465817724159ddeea1170f4dd912c3a5a10ec6b046aa3c4a9febddfeeaa47e3ef06e1758694515562c958dc1b018149c7e4fcd91b9033ee216fea2ea498acd065e61fd436f26c31654bfd27c13ab67707384ad7a84a4b085e890e998e8a9655da954db3d279d598343a4706a2272fca526caeddb017627ecaf0138f1446c82e16d0926c0c510773e2b439c2c71414deb9b739fa370c010380d9ed5927fd7f4bb84ac22747f1bd405830b65d9e04c5efddc2c4dc89ba294c7568b9952193172d75ed8ea3e0fe57c8ad6636da54921ab52a8a0f54920d124f43b9fd3577690140cb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef6834d8ef8faaf96b7b45235297538a266eb882b8b5680f621aab3417d43cdc2eb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "oracle-input": "0x", + "oracle-key": "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", + "oracle-value": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +} diff --git a/op-challenger2/game/fault/trace/cannon/test_data/proofs/421.json b/op-challenger2/game/fault/trace/cannon/test_data/proofs/421.json new file mode 100644 index 000000000000..974bcb6c1100 --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/test_data/proofs/421.json @@ -0,0 +1,11 @@ +{ + "foo": 0, + "bar": "0x71f9eb93ff904e5c03c3425228ef75766db0c906ad239df9a7a7f0d9c6a89705", + "post": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "state-data": "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "proof-data": "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "step-input": "0xf8e0cb960000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000014200000000000000000000000000000000000000000000000000000000000000e2b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f4000000000000000000000000000000000000000000000000000000000000000000000000000a3900000a39040000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007fffd0000000000000000000000000000000000000000000000000000000000000000000000000000000070008028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004240210960000000c0000003403e00008000000008fa100040000102571c0e460346a89963488f904199fc7b4dc3dce2ddadfe484510463ae5014a79df9d922ef2cb84325e4e13ad98828ed29937c1440d8ea9eb19cab7474243c2d0b1a83646e420529153298f3a914a2550658c930f5e519b1d8dd151cf828116697d27264e6fad331820ecf3855adcc68dc529acfc33ecfa45a3a33c9ac766edc1f437988f2abab9dce36d3bac27b0f7b58a06d125acd50a1bf14bb8c7f6c1618465a532f945043b5a9ebc800d7336673019654eb76f8c10cff4f794ee586dc9992c318cef3dfa57032e2dd2fc5cb2dcfebd05551301704dd37a7c169448ec02574f706e38c20963616dae4e03cc91f39a4c3f9608119212965b72948f0ee15feb48b758f050691197816dc3ca919bbb3b50624d195c82d644025647ac8ba07206e5eb830799dfa896506743e81856edf8a31fef737fb4f44501dc71f019bdb12ed9cf0b9fba40ef98e5091b70484ba4f6af7711ec8b0ba4f4f2c4b11455a9e071f465817724159ddeea1170f4dd912c3a5a10ec6b046aa3c4a9febddfeeaa47e3ef06e1758694515562c958dc1b018149c7e4fcd91b9033ee216fea2ea498acd065e61fd436f26c31654bfd27c13ab67707384ad7a84a4b085e890e998e8a9655da954db3d279d598343a4706a2272fca526caeddb017627ecaf0138f1446c82e16d0926c0c510773e2b439c2c71414deb9b739fa370c010380d9ed5927fd7f4bb84ac22747f1bd405830b65d9e04c5efddc2c4dc89ba294c7568b9952193172d75ed8ea3e0fe57c8ad6636da54921ab52a8a0f54920d124f43b9fd3577690140cb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef6834d8ef8faaf96b7b45235297538a266eb882b8b5680f621aab3417d43cdc2eb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "oracle-input": "0x", + "oracle-key": "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", + "oracle-value": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +} diff --git a/op-challenger2/game/fault/trace/cannon/test_data/state.json b/op-challenger2/game/fault/trace/cannon/test_data/state.json new file mode 100644 index 000000000000..30cd1ccdcf0c --- /dev/null +++ b/op-challenger2/game/fault/trace/cannon/test_data/state.json @@ -0,0 +1,14 @@ +{ + "memory": [], + "preimageKey": "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "preimageOffset": 0, + "pc": 0, + "nextPC": 1, + "lo": 0, + "hi": 0, + "heap": 0, + "exit": 0, + "exited": false, + "step": 0, + "registers": [] +} diff --git a/op-challenger2/game/fault/trace/outputs/output_alphabet.go b/op-challenger2/game/fault/trace/outputs/output_alphabet.go new file mode 100644 index 000000000000..bb74dad028d7 --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/output_alphabet.go @@ -0,0 +1,37 @@ +package outputs + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/alphabet" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/split" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +func NewOutputAlphabetTraceAccessor( + logger log.Logger, + m metrics.Metricer, + prestateProvider types.PrestateProvider, + rollupClient OutputRollupClient, + l2Client utils.L2HeaderSource, + l1Head eth.BlockID, + splitDepth types.Depth, + prestateBlock uint64, + poststateBlock uint64, +) (*trace.Accessor, error) { + outputProvider := NewTraceProvider(logger, prestateProvider, rollupClient, l2Client, l1Head, splitDepth, prestateBlock, poststateBlock) + alphabetCreator := func(ctx context.Context, localContext common.Hash, depth types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { + provider := alphabet.NewTraceProvider(agreed.L2BlockNumber, depth) + return provider, nil + } + cache := NewProviderCache(m, "output_alphabet_provider", alphabetCreator) + selector := split.NewSplitProviderSelector(outputProvider, splitDepth, OutputRootSplitAdapter(outputProvider, cache.GetOrCreate)) + return trace.NewAccessor(selector), nil +} diff --git a/op-challenger2/game/fault/trace/outputs/output_asterisc.go b/op-challenger2/game/fault/trace/outputs/output_asterisc.go new file mode 100644 index 000000000000..375d788cde5d --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/output_asterisc.go @@ -0,0 +1,50 @@ +package outputs + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/asterisc" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/split" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +func NewOutputAsteriscTraceAccessor( + logger log.Logger, + m metrics.Metricer, + cfg *config.Config, + l2Client utils.L2HeaderSource, + prestateProvider types.PrestateProvider, + asteriscPrestate string, + rollupClient OutputRollupClient, + dir string, + l1Head eth.BlockID, + splitDepth types.Depth, + prestateBlock uint64, + poststateBlock uint64, +) (*trace.Accessor, error) { + outputProvider := NewTraceProvider(logger, prestateProvider, rollupClient, l2Client, l1Head, splitDepth, prestateBlock, poststateBlock) + asteriscCreator := func(ctx context.Context, localContext common.Hash, depth types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { + logger := logger.New("pre", agreed.OutputRoot, "post", claimed.OutputRoot, "localContext", localContext) + subdir := filepath.Join(dir, localContext.Hex()) + localInputs, err := utils.FetchLocalInputsFromProposals(ctx, l1Head.Hash, l2Client, agreed, claimed) + if err != nil { + return nil, fmt.Errorf("failed to fetch asterisc local inputs: %w", err) + } + provider := asterisc.NewTraceProvider(logger, m, cfg, prestateProvider, asteriscPrestate, localInputs, subdir, depth) + return provider, nil + } + + cache := NewProviderCache(m, "output_asterisc_provider", asteriscCreator) + selector := split.NewSplitProviderSelector(outputProvider, splitDepth, OutputRootSplitAdapter(outputProvider, cache.GetOrCreate)) + return trace.NewAccessor(selector), nil +} diff --git a/op-challenger2/game/fault/trace/outputs/output_cannon.go b/op-challenger2/game/fault/trace/outputs/output_cannon.go new file mode 100644 index 000000000000..2eb6597899cf --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/output_cannon.go @@ -0,0 +1,50 @@ +package outputs + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/cannon" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/split" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +func NewOutputCannonTraceAccessor( + logger log.Logger, + m metrics.Metricer, + cfg *config.Config, + l2Client utils.L2HeaderSource, + prestateProvider types.PrestateProvider, + cannonPrestate string, + rollupClient OutputRollupClient, + dir string, + l1Head eth.BlockID, + splitDepth types.Depth, + prestateBlock uint64, + poststateBlock uint64, +) (*trace.Accessor, error) { + outputProvider := NewTraceProvider(logger, prestateProvider, rollupClient, l2Client, l1Head, splitDepth, prestateBlock, poststateBlock) + cannonCreator := func(ctx context.Context, localContext common.Hash, depth types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { + logger := logger.New("pre", agreed.OutputRoot, "post", claimed.OutputRoot, "localContext", localContext) + subdir := filepath.Join(dir, localContext.Hex()) + localInputs, err := utils.FetchLocalInputsFromProposals(ctx, l1Head.Hash, l2Client, agreed, claimed) + if err != nil { + return nil, fmt.Errorf("failed to fetch cannon local inputs: %w", err) + } + provider := cannon.NewTraceProvider(logger, m, cfg, prestateProvider, cannonPrestate, localInputs, subdir, depth) + return provider, nil + } + + cache := NewProviderCache(m, "output_cannon_provider", cannonCreator) + selector := split.NewSplitProviderSelector(outputProvider, splitDepth, OutputRootSplitAdapter(outputProvider, cache.GetOrCreate)) + return trace.NewAccessor(selector), nil +} diff --git a/op-challenger2/game/fault/trace/outputs/prestate.go b/op-challenger2/game/fault/trace/outputs/prestate.go new file mode 100644 index 000000000000..57937ab8b014 --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/prestate.go @@ -0,0 +1,35 @@ +package outputs + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" +) + +var _ types.PrestateProvider = (*OutputPrestateProvider)(nil) + +type OutputPrestateProvider struct { + prestateBlock uint64 + rollupClient OutputRollupClient +} + +func NewPrestateProvider(rollupClient OutputRollupClient, prestateBlock uint64) *OutputPrestateProvider { + return &OutputPrestateProvider{ + prestateBlock: prestateBlock, + rollupClient: rollupClient, + } +} + +func (o *OutputPrestateProvider) AbsolutePreStateCommitment(ctx context.Context) (hash common.Hash, err error) { + return o.outputAtBlock(ctx, o.prestateBlock) +} + +func (o *OutputPrestateProvider) outputAtBlock(ctx context.Context, block uint64) (common.Hash, error) { + output, err := o.rollupClient.OutputAtBlock(ctx, block) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to fetch output at block %v: %w", block, err) + } + return common.Hash(output.OutputRoot), nil +} diff --git a/op-challenger2/game/fault/trace/outputs/prestate_test.go b/op-challenger2/game/fault/trace/outputs/prestate_test.go new file mode 100644 index 000000000000..0b497ee5e872 --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/prestate_test.go @@ -0,0 +1,47 @@ +package outputs + +import ( + "context" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/require" +) + +func newOutputPrestateProvider(t *testing.T, prestateBlock uint64) (*OutputPrestateProvider, *stubRollupClient) { + rollupClient := &stubRollupClient{ + outputs: map[uint64]*eth.OutputResponse{ + prestateBlock: { + OutputRoot: eth.Bytes32(prestateOutputRoot), + }, + 101: { + OutputRoot: eth.Bytes32(firstOutputRoot), + }, + poststateBlock: { + OutputRoot: eth.Bytes32(poststateOutputRoot), + }, + }, + } + return &OutputPrestateProvider{ + rollupClient: rollupClient, + prestateBlock: prestateBlock, + }, rollupClient +} + +func TestAbsolutePreStateCommitment(t *testing.T) { + var prestateBlock = uint64(100) + + t.Run("FailedToFetchOutput", func(t *testing.T) { + provider, rollupClient := newOutputPrestateProvider(t, prestateBlock) + rollupClient.errorsOnPrestateFetch = true + _, err := provider.AbsolutePreStateCommitment(context.Background()) + require.ErrorIs(t, err, errNoOutputAtBlock) + }) + + t.Run("ReturnsCorrectPrestateOutput", func(t *testing.T) { + provider, _ := newOutputPrestateProvider(t, prestateBlock) + value, err := provider.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + require.Equal(t, value, prestateOutputRoot) + }) +} diff --git a/op-challenger2/game/fault/trace/outputs/provider.go b/op-challenger2/game/fault/trace/outputs/provider.go new file mode 100644 index 000000000000..a65d19d23a7d --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/provider.go @@ -0,0 +1,130 @@ +package outputs + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +var ( + ErrGetStepData = errors.New("GetStepData not supported") + ErrIndexTooBig = errors.New("trace index is greater than max uint64") +) + +var _ types.TraceProvider = (*OutputTraceProvider)(nil) + +type OutputRollupClient interface { + OutputAtBlock(ctx context.Context, blockNum uint64) (*eth.OutputResponse, error) + SafeHeadAtL1Block(ctx context.Context, l1BlockNum uint64) (*eth.SafeHeadResponse, error) +} + +// OutputTraceProvider is a [types.TraceProvider] implementation that uses +// output roots for given L2 Blocks as a trace. +type OutputTraceProvider struct { + types.PrestateProvider + logger log.Logger + rollupProvider OutputRollupClient + l2Client utils.L2HeaderSource + prestateBlock uint64 + poststateBlock uint64 + l1Head eth.BlockID + gameDepth types.Depth +} + +func NewTraceProvider(logger log.Logger, prestateProvider types.PrestateProvider, rollupProvider OutputRollupClient, l2Client utils.L2HeaderSource, l1Head eth.BlockID, gameDepth types.Depth, prestateBlock, poststateBlock uint64) *OutputTraceProvider { + return &OutputTraceProvider{ + PrestateProvider: prestateProvider, + logger: logger, + rollupProvider: rollupProvider, + l2Client: l2Client, + prestateBlock: prestateBlock, + poststateBlock: poststateBlock, + l1Head: l1Head, + gameDepth: gameDepth, + } +} + +// ClaimedBlockNumber returns the block number for a position restricted only by the claimed L2 block number. +// The returned block number may be after the safe head reached by processing batch data up to the game's L1 head +func (o *OutputTraceProvider) ClaimedBlockNumber(pos types.Position) (uint64, error) { + traceIndex := pos.TraceIndex(o.gameDepth) + if !traceIndex.IsUint64() { + return 0, fmt.Errorf("%w: %v", ErrIndexTooBig, traceIndex) + } + + outputBlock := traceIndex.Uint64() + o.prestateBlock + 1 + if outputBlock > o.poststateBlock { + outputBlock = o.poststateBlock + } + return outputBlock, nil +} + +// HonestBlockNumber returns the block number for a position in the game restricted to the minimum of the claimed L2 +// block number or the safe head reached by processing batch data up to the game's L1 head. +// This is used when posting honest output roots to ensure that only roots supported by L1 data are posted +func (o *OutputTraceProvider) HonestBlockNumber(ctx context.Context, pos types.Position) (uint64, error) { + outputBlock, err := o.ClaimedBlockNumber(pos) + if err != nil { + return 0, err + } + resp, err := o.rollupProvider.SafeHeadAtL1Block(ctx, o.l1Head.Number) + if err != nil { + return 0, fmt.Errorf("failed to get safe head at L1 block %v: %w", o.l1Head, err) + } + maxSafeHead := resp.SafeHead.Number + if outputBlock > maxSafeHead { + outputBlock = maxSafeHead + } + return outputBlock, nil +} + +func (o *OutputTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { + outputBlock, err := o.HonestBlockNumber(ctx, pos) + if err != nil { + return common.Hash{}, err + } + return o.outputAtBlock(ctx, outputBlock) +} + +// GetStepData is not supported in the [OutputTraceProvider]. +func (o *OutputTraceProvider) GetStepData(_ context.Context, _ types.Position) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) { + return nil, nil, nil, ErrGetStepData +} + +func (o *OutputTraceProvider) GetL2BlockNumberChallenge(ctx context.Context) (*types.InvalidL2BlockNumberChallenge, error) { + outputBlock, err := o.HonestBlockNumber(ctx, types.RootPosition) + if err != nil { + return nil, err + } + claimedBlock, err := o.ClaimedBlockNumber(types.RootPosition) + if err != nil { + return nil, err + } + if claimedBlock == outputBlock { + return nil, types.ErrL2BlockNumberValid + } + output, err := o.rollupProvider.OutputAtBlock(ctx, outputBlock) + if err != nil { + return nil, err + } + header, err := o.l2Client.HeaderByNumber(ctx, new(big.Int).SetUint64(outputBlock)) + if err != nil { + return nil, fmt.Errorf("failed to retrieve L2 block header %v: %w", outputBlock, err) + } + return types.NewInvalidL2BlockNumberProof(output, header), nil +} + +func (o *OutputTraceProvider) outputAtBlock(ctx context.Context, block uint64) (common.Hash, error) { + output, err := o.rollupProvider.OutputAtBlock(ctx, block) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to fetch output at block %v: %w", block, err) + } + return common.Hash(output.OutputRoot), nil +} diff --git a/op-challenger2/game/fault/trace/outputs/provider_cache.go b/op-challenger2/game/fault/trace/outputs/provider_cache.go new file mode 100644 index 000000000000..ddbb54ef01b5 --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/provider_cache.go @@ -0,0 +1,36 @@ +package outputs + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/sources/caching" + "github.com/ethereum/go-ethereum/common" +) + +type ProviderCache struct { + cache *caching.LRUCache[common.Hash, types.TraceProvider] + creator ProposalTraceProviderCreator +} + +func (c *ProviderCache) GetOrCreate(ctx context.Context, localContext common.Hash, depth types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { + provider, ok := c.cache.Get(localContext) + if ok { + return provider, nil + } + provider, err := c.creator(ctx, localContext, depth, agreed, claimed) + if err != nil { + return nil, err + } + c.cache.Add(localContext, provider) + return provider, nil +} + +func NewProviderCache(m caching.Metrics, metricsLabel string, creator ProposalTraceProviderCreator) *ProviderCache { + cache := caching.NewLRUCache[common.Hash, types.TraceProvider](m, metricsLabel, 100) + return &ProviderCache{ + cache: cache, + creator: creator, + } +} diff --git a/op-challenger2/game/fault/trace/outputs/provider_cache_test.go b/op-challenger2/game/fault/trace/outputs/provider_cache_test.go new file mode 100644 index 000000000000..e3f931a287da --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/provider_cache_test.go @@ -0,0 +1,77 @@ +package outputs + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/alphabet" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestProviderCache(t *testing.T) { + agreed := contracts.Proposal{ + L2BlockNumber: big.NewInt(34), + OutputRoot: common.Hash{0xaa}, + } + claimed := contracts.Proposal{ + L2BlockNumber: big.NewInt(35), + OutputRoot: common.Hash{0xcc}, + } + depth := types.Depth(6) + var createdProvider types.TraceProvider + creator := func(ctx context.Context, localContext common.Hash, depth types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { + createdProvider = alphabet.NewTraceProvider(big.NewInt(0), depth) + return createdProvider, nil + } + localContext1 := common.Hash{0xdd} + localContext2 := common.Hash{0xee} + + cache := NewProviderCache(metrics.NoopMetrics, "test", creator) + + // Create on first call + provider1, err := cache.GetOrCreate(context.Background(), localContext1, depth, agreed, claimed) + require.NoError(t, err) + require.Same(t, createdProvider, provider1, "should return created trace provider") + + // Return the cached provider on subsequent calls. + createdProvider = nil + cached, err := cache.GetOrCreate(context.Background(), localContext1, depth, agreed, claimed) + require.NoError(t, err) + require.Same(t, provider1, cached, "should return exactly the same instance from cache") + require.Nil(t, createdProvider) + + // Create a new provider when the local context is different + createdProvider = nil + otherProvider, err := cache.GetOrCreate(context.Background(), localContext2, depth, agreed, claimed) + require.NoError(t, err) + require.Same(t, otherProvider, createdProvider, "should return newly created trace provider") + require.NotSame(t, otherProvider, provider1, "should not use cached provider for different local context") +} + +func TestProviderCache_DoNotCacheErrors(t *testing.T) { + callCount := 0 + providerErr := errors.New("boom") + creator := func(ctx context.Context, localContext common.Hash, depth types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { + callCount++ + return nil, providerErr + } + localContext1 := common.Hash{0xdd} + + cache := NewProviderCache(metrics.NoopMetrics, "test", creator) + provider, err := cache.GetOrCreate(context.Background(), localContext1, 6, contracts.Proposal{}, contracts.Proposal{}) + require.Nil(t, provider) + require.ErrorIs(t, err, providerErr) + require.Equal(t, 1, callCount) + + // Should call the creator again on the second attempt + provider, err = cache.GetOrCreate(context.Background(), localContext1, 6, contracts.Proposal{}, contracts.Proposal{}) + require.Nil(t, provider) + require.ErrorIs(t, err, providerErr) + require.Equal(t, 2, callCount) +} diff --git a/op-challenger2/game/fault/trace/outputs/provider_test.go b/op-challenger2/game/fault/trace/outputs/provider_test.go new file mode 100644 index 000000000000..f660ee2dda40 --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/provider_test.go @@ -0,0 +1,263 @@ +package outputs + +import ( + "context" + "errors" + "fmt" + "math" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var ( + prestateBlock = uint64(100) + poststateBlock = uint64(200) + gameDepth = types.Depth(7) // 128 leaf nodes + prestateOutputRoot = common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + firstOutputRoot = common.HexToHash("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + poststateOutputRoot = common.HexToHash("0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + errNoOutputAtBlock = errors.New("no output at block") +) + +func TestGet(t *testing.T) { + t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { + deepGame := types.Depth(164) + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock, deepGame) + pos := types.NewPosition(0, big.NewInt(0)) + _, err := provider.Get(context.Background(), pos) + require.ErrorIs(t, err, ErrIndexTooBig) + }) + + t.Run("FirstBlockAfterPrestate", func(t *testing.T) { + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock) + value, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(0))) + require.NoError(t, err) + require.Equal(t, firstOutputRoot, value) + }) + + t.Run("MissingOutputAtBlock", func(t *testing.T) { + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock) + _, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(1))) + require.ErrorIs(t, err, errNoOutputAtBlock) + }) + + t.Run("PostStateBlock", func(t *testing.T) { + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock) + value, err := provider.Get(context.Background(), types.NewPositionFromGIndex(big.NewInt(228))) + require.NoError(t, err) + require.Equal(t, value, poststateOutputRoot) + }) + + t.Run("AfterPostStateBlock", func(t *testing.T) { + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock) + value, err := provider.Get(context.Background(), types.NewPositionFromGIndex(big.NewInt(229))) + require.NoError(t, err) + require.Equal(t, value, poststateOutputRoot) + }) +} + +func TestHonestBlockNumber(t *testing.T) { + tests := []struct { + name string + pos types.Position + expected uint64 + maxSafeHead uint64 + }{ + {"FirstBlockAfterPrestate", types.NewPosition(gameDepth, big.NewInt(0)), prestateBlock + 1, math.MaxUint64}, + {"PostStateBlock", types.NewPositionFromGIndex(big.NewInt(228)), poststateBlock, math.MaxUint64}, + {"AfterPostStateBlock", types.NewPositionFromGIndex(big.NewInt(229)), poststateBlock, math.MaxUint64}, + {"Root", types.NewPositionFromGIndex(big.NewInt(1)), poststateBlock, math.MaxUint64}, + {"MiddleNode1", types.NewPosition(gameDepth-1, big.NewInt(2)), 106, math.MaxUint64}, + {"MiddleNode2", types.NewPosition(gameDepth-1, big.NewInt(3)), 108, math.MaxUint64}, + {"Leaf1", types.NewPosition(gameDepth, big.NewInt(1)), prestateBlock + 2, math.MaxUint64}, + {"Leaf2", types.NewPosition(gameDepth, big.NewInt(2)), prestateBlock + 3, math.MaxUint64}, + + {"RestrictedHead-UnderLimit", types.NewPosition(gameDepth, big.NewInt(48)), prestateBlock + 49, prestateBlock + 50}, + {"RestrictedHead-EqualLimit", types.NewPosition(gameDepth, big.NewInt(49)), prestateBlock + 50, prestateBlock + 50}, + {"RestrictedHead-OverLimit", types.NewPosition(gameDepth, big.NewInt(50)), prestateBlock + 50, prestateBlock + 50}, + {"RestrictedHead-PastPostState", types.NewPosition(gameDepth, big.NewInt(1000)), prestateBlock + 50, prestateBlock + 50}, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + provider, stubRollupClient, _ := setupWithTestData(t, prestateBlock, poststateBlock) + stubRollupClient.maxSafeHead = test.maxSafeHead + actual, err := provider.HonestBlockNumber(context.Background(), test.pos) + require.NoError(t, err) + require.Equal(t, test.expected, actual) + }) + } + + t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { + deepGame := types.Depth(164) + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock, deepGame) + pos := types.NewPosition(0, big.NewInt(0)) + _, err := provider.HonestBlockNumber(context.Background(), pos) + require.ErrorIs(t, err, ErrIndexTooBig) + }) +} + +func TestGetL2BlockNumberChallenge(t *testing.T) { + tests := []struct { + name string + maxSafeHead uint64 + expectChallenge bool + }{ + {"NoChallengeWhenMaxHeadNotLimited", math.MaxUint64, false}, + {"NoChallengeWhenBeforeMaxHead", poststateBlock + 1, false}, + {"NoChallengeWhenAtMaxHead", poststateBlock, false}, + {"ChallengeWhenBeforeMaxHead", poststateBlock - 1, true}, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + provider, stubRollupClient, stubL2Client := setupWithTestData(t, prestateBlock, poststateBlock) + stubRollupClient.maxSafeHead = test.maxSafeHead + if test.expectChallenge { + stubRollupClient.outputs[test.maxSafeHead] = ð.OutputResponse{ + OutputRoot: eth.Bytes32{0xaa}, + BlockRef: eth.L2BlockRef{ + Number: test.maxSafeHead, + }, + } + stubL2Client.headers[test.maxSafeHead] = ðTypes.Header{ + Number: new(big.Int).SetUint64(test.maxSafeHead), + Root: common.Hash{0xcc}, + } + } + actual, err := provider.GetL2BlockNumberChallenge(context.Background()) + if test.expectChallenge { + require.NoError(t, err) + require.Equal(t, &types.InvalidL2BlockNumberChallenge{ + Output: stubRollupClient.outputs[test.maxSafeHead], + Header: stubL2Client.headers[test.maxSafeHead], + }, actual) + } else { + require.ErrorIs(t, err, types.ErrL2BlockNumberValid) + } + }) + } +} + +func TestClaimedBlockNumber(t *testing.T) { + tests := []struct { + name string + pos types.Position + expected uint64 + maxSafeHead uint64 + }{ + {"FirstBlockAfterPrestate", types.NewPosition(gameDepth, big.NewInt(0)), prestateBlock + 1, math.MaxUint64}, + {"PostStateBlock", types.NewPositionFromGIndex(big.NewInt(228)), poststateBlock, math.MaxUint64}, + {"AfterPostStateBlock", types.NewPositionFromGIndex(big.NewInt(229)), poststateBlock, math.MaxUint64}, + {"Root", types.NewPositionFromGIndex(big.NewInt(1)), poststateBlock, math.MaxUint64}, + {"MiddleNode1", types.NewPosition(gameDepth-1, big.NewInt(2)), 106, math.MaxUint64}, + {"MiddleNode2", types.NewPosition(gameDepth-1, big.NewInt(3)), 108, math.MaxUint64}, + {"Leaf1", types.NewPosition(gameDepth, big.NewInt(1)), prestateBlock + 2, math.MaxUint64}, + {"Leaf2", types.NewPosition(gameDepth, big.NewInt(2)), prestateBlock + 3, math.MaxUint64}, + + {"RestrictedHead-UnderLimit", types.NewPosition(gameDepth, big.NewInt(48)), prestateBlock + 49, prestateBlock + 50}, + {"RestrictedHead-EqualLimit", types.NewPosition(gameDepth, big.NewInt(49)), prestateBlock + 50, prestateBlock + 50}, + {"RestrictedHead-OverLimit", types.NewPosition(gameDepth, big.NewInt(50)), prestateBlock + 51, prestateBlock + 50}, + {"RestrictedHead-PastPostState", types.NewPosition(gameDepth, big.NewInt(300)), poststateBlock, prestateBlock + 50}, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + provider, stubRollupClient, _ := setupWithTestData(t, prestateBlock, poststateBlock) + stubRollupClient.maxSafeHead = test.maxSafeHead + actual, err := provider.ClaimedBlockNumber(test.pos) + require.NoError(t, err) + require.Equal(t, test.expected, actual) + }) + } + + t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { + deepGame := types.Depth(164) + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock, deepGame) + pos := types.NewPosition(0, big.NewInt(0)) + _, err := provider.ClaimedBlockNumber(pos) + require.ErrorIs(t, err, ErrIndexTooBig) + }) +} + +func TestGetStepData(t *testing.T) { + provider, _, _ := setupWithTestData(t, prestateBlock, poststateBlock) + _, _, _, err := provider.GetStepData(context.Background(), types.NewPosition(1, common.Big0)) + require.ErrorIs(t, err, ErrGetStepData) +} + +func setupWithTestData(t *testing.T, prestateBlock, poststateBlock uint64, customGameDepth ...types.Depth) (*OutputTraceProvider, *stubRollupClient, *stubL2HeaderSource) { + rollupClient := &stubRollupClient{ + outputs: map[uint64]*eth.OutputResponse{ + prestateBlock: { + OutputRoot: eth.Bytes32(prestateOutputRoot), + }, + 101: { + OutputRoot: eth.Bytes32(firstOutputRoot), + }, + poststateBlock: { + OutputRoot: eth.Bytes32(poststateOutputRoot), + }, + }, + maxSafeHead: math.MaxUint64, + } + l2Client := &stubL2HeaderSource{ + headers: make(map[uint64]*ethTypes.Header), + } + inputGameDepth := gameDepth + if len(customGameDepth) > 0 { + inputGameDepth = customGameDepth[0] + } + return &OutputTraceProvider{ + logger: testlog.Logger(t, log.LevelInfo), + rollupProvider: rollupClient, + l2Client: l2Client, + prestateBlock: prestateBlock, + poststateBlock: poststateBlock, + gameDepth: inputGameDepth, + }, rollupClient, l2Client +} + +type stubRollupClient struct { + errorsOnPrestateFetch bool + outputs map[uint64]*eth.OutputResponse + maxSafeHead uint64 +} + +func (s *stubRollupClient) OutputAtBlock(_ context.Context, blockNum uint64) (*eth.OutputResponse, error) { + output, ok := s.outputs[blockNum] + if !ok || s.errorsOnPrestateFetch { + return nil, fmt.Errorf("%w: %d", errNoOutputAtBlock, blockNum) + } + return output, nil +} + +func (s *stubRollupClient) SafeHeadAtL1Block(_ context.Context, l1BlockNum uint64) (*eth.SafeHeadResponse, error) { + return ð.SafeHeadResponse{ + SafeHead: eth.BlockID{ + Number: s.maxSafeHead, + Hash: common.Hash{0x11}, + }, + }, nil +} + +type stubL2HeaderSource struct { + headers map[uint64]*ethTypes.Header +} + +func (s *stubL2HeaderSource) HeaderByNumber(_ context.Context, num *big.Int) (*ethTypes.Header, error) { + header, ok := s.headers[num.Uint64()] + if !ok { + return nil, ethereum.NotFound + } + return header, nil +} diff --git a/op-challenger2/game/fault/trace/outputs/split_adapter.go b/op-challenger2/game/fault/trace/outputs/split_adapter.go new file mode 100644 index 000000000000..dcd400fcfed0 --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/split_adapter.go @@ -0,0 +1,78 @@ +package outputs + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/split" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +type ProposalTraceProviderCreator func(ctx context.Context, localContext common.Hash, depth types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) + +func OutputRootSplitAdapter(topProvider *OutputTraceProvider, creator ProposalTraceProviderCreator) split.ProviderCreator { + return func(ctx context.Context, depth types.Depth, pre types.Claim, post types.Claim) (types.TraceProvider, error) { + localContext := CreateLocalContext(pre, post) + agreed, disputed, err := FetchProposals(ctx, topProvider, pre, post) + if err != nil { + return nil, err + } + return creator(ctx, localContext, depth, agreed, disputed) + } +} + +func FetchProposals(ctx context.Context, topProvider *OutputTraceProvider, pre types.Claim, post types.Claim) (contracts.Proposal, contracts.Proposal, error) { + usePrestateBlock := pre == (types.Claim{}) + var agreed contracts.Proposal + if usePrestateBlock { + prestateRoot, err := topProvider.AbsolutePreStateCommitment(ctx) + if err != nil { + return contracts.Proposal{}, contracts.Proposal{}, fmt.Errorf("failed to retrieve absolute prestate output root: %w", err) + } + agreed = contracts.Proposal{ + L2BlockNumber: new(big.Int).SetUint64(topProvider.prestateBlock), + OutputRoot: prestateRoot, + } + } else { + preBlockNum, err := topProvider.HonestBlockNumber(ctx, pre.Position) + if err != nil { + return contracts.Proposal{}, contracts.Proposal{}, fmt.Errorf("unable to calculate pre-claim block number: %w", err) + } + agreed = contracts.Proposal{ + L2BlockNumber: new(big.Int).SetUint64(preBlockNum), + OutputRoot: pre.Value, + } + } + postBlockNum, err := topProvider.ClaimedBlockNumber(post.Position) + if err != nil { + return contracts.Proposal{}, contracts.Proposal{}, fmt.Errorf("unable to calculate post-claim block number: %w", err) + } + claimed := contracts.Proposal{ + L2BlockNumber: new(big.Int).SetUint64(postBlockNum), + OutputRoot: post.Value, + } + return agreed, claimed, nil +} + +func CreateLocalContext(pre types.Claim, post types.Claim) common.Hash { + return crypto.Keccak256Hash(localContextPreimage(pre, post)) +} + +func localContextPreimage(pre types.Claim, post types.Claim) []byte { + encodeClaim := func(c types.Claim) []byte { + data := make([]byte, 64) + copy(data[0:32], c.Value.Bytes()) + c.Position.ToGIndex().FillBytes(data[32:]) + return data + } + var data []byte + if pre != (types.Claim{}) { + data = encodeClaim(pre) + } + data = append(data, encodeClaim(post)...) + return data +} diff --git a/op-challenger2/game/fault/trace/outputs/split_adapter_test.go b/op-challenger2/game/fault/trace/outputs/split_adapter_test.go new file mode 100644 index 000000000000..900321687e23 --- /dev/null +++ b/op-challenger2/game/fault/trace/outputs/split_adapter_test.go @@ -0,0 +1,228 @@ +package outputs + +import ( + "context" + "errors" + "math" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/split" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var creatorError = errors.New("captured args") + +func TestOutputRootSplitAdapter(t *testing.T) { + tests := []struct { + name string + preTraceIndex int64 + postTraceIndex int64 + expectedAgreedBlockNum int64 + expectedClaimedBlockNum int64 + }{ + { + name: "middleOfBlockRange", + preTraceIndex: 5, + postTraceIndex: 9, + expectedAgreedBlockNum: 26, + expectedClaimedBlockNum: 30, + }, + { + name: "beyondPostBlock", + preTraceIndex: 5, + postTraceIndex: 50, + expectedAgreedBlockNum: 26, + expectedClaimedBlockNum: 40, + }, + { + name: "firstBlock", + preTraceIndex: 0, + postTraceIndex: 1, + expectedAgreedBlockNum: 21, + expectedClaimedBlockNum: 22, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + topDepth := types.Depth(10) + adapter, creator := setupAdapterTest(t, topDepth) + preClaim := types.Claim{ + ClaimData: types.ClaimData{ + Value: common.Hash{0xaa}, + Position: types.NewPosition(topDepth, big.NewInt(test.preTraceIndex)), + }, + ContractIndex: 3, + ParentContractIndex: 2, + } + postClaim := types.Claim{ + ClaimData: types.ClaimData{ + Value: common.Hash{0xbb}, + Position: types.NewPosition(topDepth, big.NewInt(test.postTraceIndex)), + }, + ContractIndex: 7, + ParentContractIndex: 1, + } + + expectedAgreed := contracts.Proposal{ + L2BlockNumber: big.NewInt(test.expectedAgreedBlockNum), + OutputRoot: preClaim.Value, + } + expectedClaimed := contracts.Proposal{ + L2BlockNumber: big.NewInt(test.expectedClaimedBlockNum), + OutputRoot: postClaim.Value, + } + + _, err := adapter(context.Background(), 5, preClaim, postClaim) + require.ErrorIs(t, err, creatorError) + require.Equal(t, CreateLocalContext(preClaim, postClaim), creator.localContext) + require.Equal(t, expectedAgreed, creator.agreed) + require.Equal(t, expectedClaimed, creator.claimed) + }) + } +} + +func TestOutputRootSplitAdapter_FromAbsolutePrestate(t *testing.T) { + topDepth := types.Depth(10) + adapter, creator := setupAdapterTest(t, topDepth) + + postClaim := types.Claim{ + ClaimData: types.ClaimData{ + Value: common.Hash{0xbb}, + Position: types.NewPosition(topDepth, big.NewInt(0)), + }, + ContractIndex: 7, + ParentContractIndex: 1, + } + + expectedAgreed := contracts.Proposal{ + L2BlockNumber: big.NewInt(20), + OutputRoot: prestateOutputRoot, // Absolute prestate output root + } + expectedClaimed := contracts.Proposal{ + L2BlockNumber: big.NewInt(21), + OutputRoot: postClaim.Value, + } + + _, err := adapter(context.Background(), 5, types.Claim{}, postClaim) + require.ErrorIs(t, err, creatorError) + require.Equal(t, CreateLocalContext(types.Claim{}, postClaim), creator.localContext) + require.Equal(t, expectedAgreed, creator.agreed) + require.Equal(t, expectedClaimed, creator.claimed) +} + +func setupAdapterTest(t *testing.T, topDepth types.Depth) (split.ProviderCreator, *capturingCreator) { + prestateBlock := uint64(20) + poststateBlock := uint64(40) + creator := &capturingCreator{} + l1Head := eth.BlockID{ + Hash: common.Hash{0x11, 0x11}, + Number: 11, + } + rollupClient := &stubRollupClient{ + outputs: map[uint64]*eth.OutputResponse{ + prestateBlock: { + OutputRoot: eth.Bytes32(prestateOutputRoot), + }, + }, + maxSafeHead: math.MaxUint64, + } + prestateProvider := &stubPrestateProvider{ + absolutePrestate: prestateOutputRoot, + } + topProvider := NewTraceProvider(testlog.Logger(t, log.LevelInfo), prestateProvider, rollupClient, nil, l1Head, topDepth, prestateBlock, poststateBlock) + adapter := OutputRootSplitAdapter(topProvider, creator.Create) + return adapter, creator +} + +type capturingCreator struct { + localContext common.Hash + agreed contracts.Proposal + claimed contracts.Proposal +} + +func (c *capturingCreator) Create(_ context.Context, localContext common.Hash, _ types.Depth, agreed contracts.Proposal, claimed contracts.Proposal) (types.TraceProvider, error) { + c.localContext = localContext + c.agreed = agreed + c.claimed = claimed + return nil, creatorError +} + +func TestCreateLocalContext(t *testing.T) { + tests := []struct { + name string + preValue common.Hash + prePosition types.Position + postValue common.Hash + postPosition types.Position + expected []byte + }{ + { + name: "PreAndPost", + preValue: common.HexToHash("abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), + prePosition: types.NewPositionFromGIndex(big.NewInt(2)), + postValue: common.HexToHash("cc00000000000000000000000000000000000000000000000000000000000000"), + postPosition: types.NewPositionFromGIndex(big.NewInt(3)), + expected: common.FromHex("abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234567890000000000000000000000000000000000000000000000000000000000000002cc000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003"), + }, + { + name: "LargePositions", + preValue: common.HexToHash("abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), + prePosition: types.NewPositionFromGIndex(new(big.Int).SetBytes(common.FromHex("cbcdef0123456789abcdef0123456789abcdef0123456789abcdef012345678c"))), + postValue: common.HexToHash("dd00000000000000000000000000000000000000000000000000000000000000"), + postPosition: types.NewPositionFromGIndex(new(big.Int).SetUint64(math.MaxUint64)), + expected: common.FromHex("abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789cbcdef0123456789abcdef0123456789abcdef0123456789abcdef012345678cdd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff"), + }, + { + name: "AbsolutePreState", + preValue: common.Hash{}, + prePosition: types.Position{}, + postValue: common.HexToHash("cc00000000000000000000000000000000000000000000000000000000000000"), + postPosition: types.NewPositionFromGIndex(big.NewInt(3)), + expected: common.FromHex("cc000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003"), + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + pre := types.Claim{ + ClaimData: types.ClaimData{ + Value: test.preValue, + Position: test.prePosition, + }, + } + post := types.Claim{ + ClaimData: types.ClaimData{ + Value: test.postValue, + Position: test.postPosition, + }, + } + actualPreimage := localContextPreimage(pre, post) + require.Equal(t, test.expected, actualPreimage) + localContext := CreateLocalContext(pre, post) + require.Equal(t, crypto.Keccak256Hash(test.expected), localContext) + }) + } +} + +type stubPrestateProvider struct { + errorsOnAbsolutePrestateFetch bool + absolutePrestate common.Hash +} + +func (s *stubPrestateProvider) AbsolutePreStateCommitment(_ context.Context) (common.Hash, error) { + if s.errorsOnAbsolutePrestateFetch { + return common.Hash{}, errNoOutputAtBlock + } + return s.absolutePrestate, nil +} diff --git a/op-challenger2/game/fault/trace/prestates/cache.go b/op-challenger2/game/fault/trace/prestates/cache.go new file mode 100644 index 000000000000..6c14892df02b --- /dev/null +++ b/op-challenger2/game/fault/trace/prestates/cache.go @@ -0,0 +1,39 @@ +package prestates + +import ( + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/sources/caching" + "github.com/ethereum/go-ethereum/common" +) + +type PrestateSource interface { + // PrestatePath returns the path to the prestate file to use for the game. + // The provided prestateHash may be used to differentiate between different states but no guarantee is made that + // the returned prestate matches the supplied hash. + PrestatePath(prestateHash common.Hash) (string, error) +} + +type PrestateProviderCache struct { + createProvider func(prestateHash common.Hash) (types.PrestateProvider, error) + cache *caching.LRUCache[common.Hash, types.PrestateProvider] +} + +func NewPrestateProviderCache(m caching.Metrics, label string, createProvider func(prestateHash common.Hash) (types.PrestateProvider, error)) *PrestateProviderCache { + return &PrestateProviderCache{ + createProvider: createProvider, + cache: caching.NewLRUCache[common.Hash, types.PrestateProvider](m, label, 5), + } +} + +func (p *PrestateProviderCache) GetOrCreate(prestateHash common.Hash) (types.PrestateProvider, error) { + provider, ok := p.cache.Get(prestateHash) + if ok { + return provider, nil + } + provider, err := p.createProvider(prestateHash) + if err != nil { + return nil, err + } + p.cache.Add(prestateHash, provider) + return provider, nil +} diff --git a/op-challenger2/game/fault/trace/prestates/cache_test.go b/op-challenger2/game/fault/trace/prestates/cache_test.go new file mode 100644 index 000000000000..cd66a4c828c6 --- /dev/null +++ b/op-challenger2/game/fault/trace/prestates/cache_test.go @@ -0,0 +1,58 @@ +package prestates + +import ( + "context" + "errors" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestPrestateProviderCache_CreateAndCache(t *testing.T) { + cache := NewPrestateProviderCache(nil, "", func(prestateHash common.Hash) (types.PrestateProvider, error) { + return &stubPrestateProvider{commitment: prestateHash}, nil + }) + + hash1 := common.Hash{0xaa} + hash2 := common.Hash{0xbb} + provider1a, err := cache.GetOrCreate(hash1) + require.NoError(t, err) + commitment, err := provider1a.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + require.Equal(t, hash1, commitment) + + provider1b, err := cache.GetOrCreate(hash1) + require.NoError(t, err) + require.Same(t, provider1a, provider1b) + commitment, err = provider1b.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + require.Equal(t, hash1, commitment) + + provider2, err := cache.GetOrCreate(hash2) + require.NoError(t, err) + require.NotSame(t, provider1a, provider2) + commitment, err = provider2.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + require.Equal(t, hash2, commitment) +} + +func TestPrestateProviderCache_CreateFails(t *testing.T) { + hash1 := common.Hash{0xaa} + expectedErr := errors.New("boom") + cache := NewPrestateProviderCache(nil, "", func(prestateHash common.Hash) (types.PrestateProvider, error) { + return nil, expectedErr + }) + provider, err := cache.GetOrCreate(hash1) + require.ErrorIs(t, err, expectedErr) + require.Nil(t, provider) +} + +type stubPrestateProvider struct { + commitment common.Hash +} + +func (s *stubPrestateProvider) AbsolutePreStateCommitment(_ context.Context) (common.Hash, error) { + return s.commitment, nil +} diff --git a/op-challenger2/game/fault/trace/prestates/multi.go b/op-challenger2/game/fault/trace/prestates/multi.go new file mode 100644 index 000000000000..ccc22c6d5d4a --- /dev/null +++ b/op-challenger2/game/fault/trace/prestates/multi.go @@ -0,0 +1,72 @@ +package prestates + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum/go-ethereum/common" +) + +var ( + ErrPrestateUnavailable = errors.New("prestate unavailable") +) + +type MultiPrestateProvider struct { + baseUrl *url.URL + dataDir string +} + +func NewMultiPrestateProvider(baseUrl *url.URL, dataDir string) *MultiPrestateProvider { + return &MultiPrestateProvider{ + baseUrl: baseUrl, + dataDir: dataDir, + } +} + +func (m *MultiPrestateProvider) PrestatePath(hash common.Hash) (string, error) { + path := filepath.Join(m.dataDir, hash.Hex()+".json.gz") + if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { + if err := m.fetchPrestate(hash, path); err != nil { + return "", fmt.Errorf("failed to fetch prestate: %w", err) + } + } else if err != nil { + return "", fmt.Errorf("error checking for existing prestate %v: %w", hash, err) + } + return path, nil +} + +func (m *MultiPrestateProvider) fetchPrestate(hash common.Hash, dest string) error { + if err := os.MkdirAll(m.dataDir, 0755); err != nil { + return fmt.Errorf("error creating prestate dir: %w", err) + } + prestateUrl := m.baseUrl.JoinPath(hash.Hex() + ".json") + resp, err := http.Get(prestateUrl.String()) + if err != nil { + return fmt.Errorf("failed to fetch prestate from %v: %w", prestateUrl, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%w from url %v: status %v", ErrPrestateUnavailable, prestateUrl, resp.StatusCode) + } + out, err := ioutil.NewAtomicWriterCompressed(dest, 0o644) + if err != nil { + return fmt.Errorf("failed to open atomic writer for %v: %w", dest, err) + } + defer func() { + // If errors occur, try to clean up without renaming the file into its final destination as Close() would do + _ = out.Abort() + }() + if _, err := io.Copy(out, resp.Body); err != nil { + return fmt.Errorf("failed to write file %v: %w", dest, err) + } + if err := out.Close(); err != nil { + return fmt.Errorf("failed to close file %v: %w", dest, err) + } + return nil +} diff --git a/op-challenger2/game/fault/trace/prestates/multi_test.go b/op-challenger2/game/fault/trace/prestates/multi_test.go new file mode 100644 index 000000000000..2f825d98da4e --- /dev/null +++ b/op-challenger2/game/fault/trace/prestates/multi_test.go @@ -0,0 +1,91 @@ +package prestates + +import ( + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestDownloadPrestate(t *testing.T) { + dir := t.TempDir() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(r.URL.Path)) + })) + defer server.Close() + provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir) + hash := common.Hash{0xaa} + path, err := provider.PrestatePath(hash) + require.NoError(t, err) + in, err := ioutil.OpenDecompressed(path) + require.NoError(t, err) + defer in.Close() + content, err := io.ReadAll(in) + require.NoError(t, err) + require.Equal(t, "/"+hash.Hex()+".json", string(content)) +} + +func TestCreateDirectory(t *testing.T) { + dir := t.TempDir() + dir = filepath.Join(dir, "test") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(r.URL.Path)) + })) + defer server.Close() + provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir) + hash := common.Hash{0xaa} + path, err := provider.PrestatePath(hash) + require.NoError(t, err) + in, err := ioutil.OpenDecompressed(path) + require.NoError(t, err) + defer in.Close() + content, err := io.ReadAll(in) + require.NoError(t, err) + require.Equal(t, "/"+hash.Hex()+".json", string(content)) +} + +func TestExistingPrestate(t *testing.T) { + dir := t.TempDir() + provider := NewMultiPrestateProvider(parseURL(t, "http://127.0.0.1:1"), dir) + hash := common.Hash{0xaa} + expectedFile := filepath.Join(dir, hash.Hex()+".json.gz") + err := ioutil.WriteCompressedBytes(expectedFile, []byte("expected content"), os.O_WRONLY|os.O_CREATE, 0o644) + require.NoError(t, err) + + path, err := provider.PrestatePath(hash) + require.NoError(t, err) + require.Equal(t, expectedFile, path) + in, err := ioutil.OpenDecompressed(path) + require.NoError(t, err) + defer in.Close() + content, err := io.ReadAll(in) + require.NoError(t, err) + require.Equal(t, "expected content", string(content)) +} + +func TestMissingPrestate(t *testing.T) { + dir := t.TempDir() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + })) + defer server.Close() + provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir) + hash := common.Hash{0xaa} + path, err := provider.PrestatePath(hash) + require.ErrorIs(t, err, ErrPrestateUnavailable) + _, err = os.Stat(path) + require.ErrorIs(t, err, os.ErrNotExist) +} + +func parseURL(t *testing.T, str string) *url.URL { + parsed, err := url.Parse(str) + require.NoError(t, err) + return parsed +} diff --git a/op-challenger2/game/fault/trace/prestates/single.go b/op-challenger2/game/fault/trace/prestates/single.go new file mode 100644 index 000000000000..978f17f55d4d --- /dev/null +++ b/op-challenger2/game/fault/trace/prestates/single.go @@ -0,0 +1,15 @@ +package prestates + +import "github.com/ethereum/go-ethereum/common" + +type SinglePrestateSource struct { + path string +} + +func NewSinglePrestateSource(path string) *SinglePrestateSource { + return &SinglePrestateSource{path: path} +} + +func (s *SinglePrestateSource) PrestatePath(_ common.Hash) (string, error) { + return s.path, nil +} diff --git a/op-challenger2/game/fault/trace/split/split.go b/op-challenger2/game/fault/trace/split/split.go new file mode 100644 index 000000000000..a0ca97337bab --- /dev/null +++ b/op-challenger2/game/fault/trace/split/split.go @@ -0,0 +1,92 @@ +package split + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" +) + +var ( + errRefClaimNotDeepEnough = errors.New("reference claim is not deep enough") +) + +type ProviderCreator func(ctx context.Context, depth types.Depth, pre types.Claim, post types.Claim) (types.TraceProvider, error) + +func NewSplitProviderSelector(topProvider types.TraceProvider, topDepth types.Depth, bottomProviderCreator ProviderCreator) trace.ProviderSelector { + return func(ctx context.Context, game types.Game, ref types.Claim, pos types.Position) (types.TraceProvider, error) { + if pos.Depth() <= topDepth { + return topProvider, nil + } + if ref.Position.Depth() < topDepth { + return nil, fmt.Errorf("%w, claim depth: %v, depth required: %v", errRefClaimNotDeepEnough, ref.Position.Depth(), topDepth) + } + + // Find the ancestor claim at the leaf level for the top game. + topLeaf, err := findAncestorAtDepth(game, ref, topDepth) + if err != nil { + return nil, err + } + + var pre, post types.Claim + // If pos is to the right of the leaf from the top game, we must be defending that output root + // otherwise, we're attacking it. + if pos.TraceIndex(pos.Depth()).Cmp(topLeaf.TraceIndex(pos.Depth())) > 0 { + // Defending the top leaf claim, so use it as the pre-claim and find the post + pre = topLeaf + postTraceIdx := new(big.Int).Add(pre.TraceIndex(topDepth), big.NewInt(1)) + post, err = findAncestorWithTraceIndex(game, topLeaf, topDepth, postTraceIdx) + if err != nil { + return nil, fmt.Errorf("failed to find post claim: %w", err) + } + } else { + // Attacking the top leaf claim, so use it as the post-claim and find the pre + post = topLeaf + postTraceIdx := post.TraceIndex(topDepth) + if postTraceIdx.Cmp(big.NewInt(0)) == 0 { + pre = types.Claim{} + } else { + preTraceIdx := new(big.Int).Sub(postTraceIdx, big.NewInt(1)) + pre, err = findAncestorWithTraceIndex(game, topLeaf, topDepth, preTraceIdx) + if err != nil { + return nil, fmt.Errorf("failed to find pre claim: %w", err) + } + } + } + // The top game runs from depth 0 to split depth *inclusive*. + // The - 1 here accounts for the fact that the split depth is included in the top game. + bottomDepth := game.MaxDepth() - topDepth - 1 + provider, err := bottomProviderCreator(ctx, bottomDepth, pre, post) + if err != nil { + return nil, err + } + // Translate such that the root of the bottom game is the level below the top game leaf + return trace.Translate(provider, topDepth+1), nil + } +} + +func findAncestorAtDepth(game types.Game, claim types.Claim, depth types.Depth) (types.Claim, error) { + for claim.Depth() > depth { + parent, err := game.GetParent(claim) + if err != nil { + return types.Claim{}, fmt.Errorf("failed to find ancestor at depth %v: %w", depth, err) + } + claim = parent + } + return claim, nil +} + +func findAncestorWithTraceIndex(game types.Game, ref types.Claim, depth types.Depth, traceIdx *big.Int) (types.Claim, error) { + candidate := ref + for candidate.TraceIndex(depth).Cmp(traceIdx) != 0 { + parent, err := game.GetParent(candidate) + if err != nil { + return types.Claim{}, fmt.Errorf("failed to get parent of claim %v: %w", candidate.ContractIndex, err) + } + candidate = parent + } + return candidate, nil +} diff --git a/op-challenger2/game/fault/trace/split/split_test.go b/op-challenger2/game/fault/trace/split/split_test.go new file mode 100644 index 000000000000..4a0c5969fb5f --- /dev/null +++ b/op-challenger2/game/fault/trace/split/split_test.go @@ -0,0 +1,325 @@ +package split + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/alphabet" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/stretchr/testify/require" +) + +const ( + gameDepth = 7 + splitDepth = 3 +) + +func TestUseTopProvider(t *testing.T) { + ctx := context.Background() + topProvider, selector, gameBuilder := setupAlphabetSplitSelector(t) + + ref := gameBuilder.Game.Claims()[0] + + pos := ref.Position + for pos.Depth() <= splitDepth { + provider, err := selector(ctx, gameBuilder.Game, ref, ref.Position) + require.NoError(t, err) + require.Same(t, topProvider, provider) + _, err = topProvider.Get(ctx, pos) + require.NoError(t, err, "should be able to use provider for position") + pos = pos.Attack() + } +} + +func TestErrorWhenRefAboveTopGameLeafButPositionInBottom(t *testing.T) { + ctx := context.Background() + _, selector, gameBuilder := setupAlphabetSplitSelector(t) + + // Generate claims at depths up to but not including the leaf of the top providers + createClaimsToDepth(gameBuilder, splitDepth-1) + for _, ref := range gameBuilder.Game.Claims() { + pos := types.NewPosition(splitDepth+1, big.NewInt(0)) + provider, err := selector(ctx, gameBuilder.Game, ref, pos) + require.ErrorIsf(t, err, errRefClaimNotDeepEnough, "should not get provider with ref claim at depth: %v", ref.Depth()) + require.Nil(t, provider) + } +} + +func TestTranslatePositionsForBottomProvider(t *testing.T) { + tests := []struct { + name string + setup func(t *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) + }{ + // There are 4 leaf nodes that can be accessed in the top tree of depth 3: 8, 10, 12, 14 + // Then you can attack and defend any of those to challenge all blocks + {"attackTopLeafGIndex8", attackTopLeafGIndex8}, + {"defendTopLeafGIndex8", defendTopLeafGIndex8}, + {"attackTopLeafGIndex10", attackTopLeafGIndex10}, + {"defendTopLeafGIndex10", defendTopLeafGIndex10}, + {"attackTopLeafGIndex12", attackTopLeafGIndex12}, + {"defendTopLeafGIndex12", defendTopLeafGIndex12}, + {"attackTopLeafGIndex14", attackTopLeafGIndex14}, + {"attackTopLeafGIndex14", defendTopLeafGIndex14}, + } + for _, tCase := range tests { + tCase := tCase + t.Run(tCase.name, func(t *testing.T) { + _, selector, gameBuilder := setupAlphabetSplitSelector(t) + ref, pos, _, _ := tCase.setup(t, gameBuilder) + provider, err := selector(context.Background(), gameBuilder.Game, ref, pos) + require.NoError(t, err) + + claimPos := pos + localClaimPos := types.NewPositionFromGIndex(big.NewInt(1)) + requireSameValue(t, provider, claimPos, asBottomTraceProvider(t, provider).AlphabetTraceProvider, localClaimPos) + requireSameValue(t, provider, claimPos.Attack(), asBottomTraceProvider(t, provider).AlphabetTraceProvider, localClaimPos.Attack()) + requireSameValue(t, provider, claimPos.Attack().Defend(), asBottomTraceProvider(t, provider).AlphabetTraceProvider, localClaimPos.Attack().Defend()) + }) + } +} + +func requireSameValue(t *testing.T, a types.TraceProvider, aPos types.Position, b types.TraceProvider, bPos types.Position) { + // Check Get returns the same results + aValue, err := a.Get(context.Background(), aPos) + require.NoError(t, err) + bValue, err := b.Get(context.Background(), bPos) + require.NoError(t, err) + require.Equal(t, aValue, bValue) + + // Check GetStepData returns the same results + aPrestate, aProofData, aPreimageData, err := a.GetStepData(context.Background(), aPos) + require.NoError(t, err) + bPrestate, bProofData, bPreimageData, err := b.GetStepData(context.Background(), bPos) + require.NoError(t, err) + require.Equal(t, aPrestate, bPrestate) + require.Equal(t, aProofData, bProofData) + require.Equal(t, aPreimageData, bPreimageData) +} + +func TestBottomProviderAttackingTopLeaf(t *testing.T) { + tests := []struct { + name string + setup func(t *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) + }{ + // There are 4 leaf nodes that can be accessed in the top tree of depth 3: 8, 10, 12, 14 + // Then you can attack and defend any of those to challenge all blocks + // We can then use these setups to test any other reference claim descending from what these setup since + // that whole subtree should have the same pre and post claim from the top provider. + {"attackTopLeafGIndex8", attackTopLeafGIndex8}, + {"defendTopLeafGIndex8", defendTopLeafGIndex8}, + {"attackTopLeafGIndex10", attackTopLeafGIndex10}, + {"defendTopLeafGIndex10", defendTopLeafGIndex10}, + {"attackTopLeafGIndex12", attackTopLeafGIndex12}, + {"defendTopLeafGIndex12", defendTopLeafGIndex12}, + {"attackTopLeafGIndex14", attackTopLeafGIndex14}, + {"attackTopLeafGIndex14", defendTopLeafGIndex14}, + } + for _, tCase := range tests { + tCase := tCase + t.Run(tCase.name, func(t *testing.T) { + _, selector, gameBuilder := setupAlphabetSplitSelector(t) + + ref, pos, expectedPre, expectedPost := tCase.setup(t, gameBuilder) + + runTest := func(ref types.Claim, pos types.Position) { + t.Run(fmt.Sprintf("Ref-d%vi%v_Pos-d%vi%v", ref.Depth(), ref.IndexAtDepth(), pos.Depth(), pos.IndexAtDepth()), func(t *testing.T) { + provider, err := selector(context.Background(), gameBuilder.Game, ref, pos) + require.NoError(t, err) + requireBottomProviderForClaims(t, provider, expectedPre, expectedPost) + }) + } + + // Check we get the same pre and post for any reference claim lower in the game + var testDescendantClaims func(ref types.Claim, pos types.Position) + testDescendantClaims = func(ref types.Claim, pos types.Position) { + // For each reference claim, check it works with the claim position, or attacking or defending the claim + runTest(ref, pos) + runTest(ref, pos.Attack()) + runTest(ref, pos.Defend()) + if pos.Depth() >= gameDepth { + return + } + + // If the ref is the leaf of the top claim, ensure we respect whether the test is setup + // to attack or defend the top leaf claim. + if ref.Depth() != splitDepth || !pos.RightOf(ref.Position) { + gameBuilder.SeqFrom(ref).Attack() + attackRef := latestClaim(gameBuilder) + testDescendantClaims(attackRef, attackRef.Position) + } + if ref.Depth() != splitDepth || pos.RightOf(ref.Position) { + gameBuilder.SeqFrom(ref).Defend() + defendRef := latestClaim(gameBuilder) + testDescendantClaims(defendRef, defendRef.Position) + } + } + testDescendantClaims(ref, pos) + }) + } +} + +func attackTopLeafGIndex8(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + // Generate claims down to the top provider's leaf + seq := gameBuilder.Seq() // gindex 1, trace 7 + seq = seq.Attack() // gindex 2, trace 3 + seq = seq.Attack() // gindex 4, trace 1 + seq.Attack() // gindex 8, trace 0 + expectPost = latestClaim(gameBuilder) + + // No pre-claim as the first output root is being challenged. + expectPre = types.Claim{} + + ref = latestClaim(gameBuilder) + pos = ref.Position.Attack() + return +} + +func defendTopLeafGIndex8(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + // Generate claims down to the top provider's leaf + seq := gameBuilder.Seq() // gindex 1, trace 7 + seq = seq.Attack() // gindex 2, trace 3 + seq = seq.Attack() // gindex 4, trace 1 + expectPost = latestClaim(gameBuilder) + seq.Attack() // gindex 8, trace 0 + expectPre = latestClaim(gameBuilder) + + ref = latestClaim(gameBuilder) + pos = ref.Position.Defend() + return +} + +func attackTopLeafGIndex10(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + seq := gameBuilder.Seq() // gindex 1, trace 7 + seq = seq.Attack() // gindex 2, trace 3 + seq = seq.Attack() // gindex 4, trace 1 + expectPre = latestClaim(gameBuilder) + seq.Defend() // gindex 10, trace 2 + expectPost = latestClaim(gameBuilder) + + ref = latestClaim(gameBuilder) + pos = ref.Position.Attack() + return +} + +func defendTopLeafGIndex10(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + seq := gameBuilder.Seq() // gindex 1, trace 7 + seq = seq.Attack() // gindex 2, trace 3 + expectPost = latestClaim(gameBuilder) + seq = seq.Attack() // gindex 4, trace 1 + seq.Defend() // gindex 10, trace 2 + expectPre = latestClaim(gameBuilder) + + ref = latestClaim(gameBuilder) + pos = ref.Position.Defend() + return +} + +func attackTopLeafGIndex12(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + seq := gameBuilder.Seq() // gindex 1, trace 7 + seq = seq.Attack() // gindex 2, trace 3 + expectPre = latestClaim(gameBuilder) + seq = seq.Defend() // gindex 6, trace 5 + seq.Attack() // gindex 12, trace 4 + expectPost = latestClaim(gameBuilder) + + ref = latestClaim(gameBuilder) + pos = ref.Position.Attack() + return +} + +func defendTopLeafGIndex12(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + seq := gameBuilder.Seq() // gindex 1, trace 7 + seq = seq.Attack() // gindex 2, trace 3 + seq = seq.Defend() // gindex 6, trace 5 + expectPost = latestClaim(gameBuilder) + seq.Attack() // gindex 12, trace 4 + expectPre = latestClaim(gameBuilder) + + ref = latestClaim(gameBuilder) + pos = ref.Position.Defend() + return +} + +func attackTopLeafGIndex14(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + seq := gameBuilder.Seq() // gindex 1, trace 7 + seq = seq.Attack() // gindex 2, trace 3 + seq = seq.Defend() // gindex 6, trace 5 + expectPre = latestClaim(gameBuilder) + seq.Defend() // gindex 14, trace 6 + expectPost = latestClaim(gameBuilder) + + ref = latestClaim(gameBuilder) + pos = ref.Position.Attack() + return +} + +func defendTopLeafGIndex14(_ *testing.T, gameBuilder *test.GameBuilder) (ref types.Claim, pos types.Position, expectPre types.Claim, expectPost types.Claim) { + seq := gameBuilder.Seq() // gindex 1, trace 7 + expectPost = latestClaim(gameBuilder) + seq = seq.Attack() // gindex 2, trace 3 + seq = seq.Defend() // gindex 6, trace 5 + seq.Defend() // gindex 14, trace 6 + expectPre = latestClaim(gameBuilder) + + ref = latestClaim(gameBuilder) + pos = ref.Position.Defend() + return +} + +func latestClaim(gameBuilder *test.GameBuilder) types.Claim { + return gameBuilder.Game.Claims()[len(gameBuilder.Game.Claims())-1] +} + +func createClaimsToDepth(gameBuilder *test.GameBuilder, depth int) { + seq := gameBuilder.Seq() + for i := 0; i < depth; i++ { + seq = seq.Attack() + } +} + +func requireBottomProviderForClaims(t *testing.T, actual types.TraceProvider, expectedPre types.Claim, expectedPost types.Claim) { + if expectedPre != (types.Claim{}) { + require.Equal(t, + new(big.Int).Add(expectedPre.TraceIndex(splitDepth), big.NewInt(1)), + expectedPost.TraceIndex(splitDepth), + "should expect adjacent top level trace indices") + } + + bottomProvider := asBottomTraceProvider(t, actual) + require.Equal(t, expectedPre, bottomProvider.pre, "Incorrect pre claim") + require.Equal(t, expectedPost, bottomProvider.post, "Incorrect post claim") +} + +func asBottomTraceProvider(t *testing.T, actual types.TraceProvider) *bottomTraceProvider { + translatingProvider, ok := actual.(*trace.TranslatingProvider) + require.True(t, ok) + bottomProvider, ok := translatingProvider.Original().(*bottomTraceProvider) + require.True(t, ok) + return bottomProvider +} + +func setupAlphabetSplitSelector(t *testing.T) (*alphabet.AlphabetTraceProvider, trace.ProviderSelector, *test.GameBuilder) { + top := alphabet.NewTraceProvider(big.NewInt(0), splitDepth) + bottomCreator := func(ctx context.Context, depth types.Depth, pre types.Claim, post types.Claim) (types.TraceProvider, error) { + return &bottomTraceProvider{ + pre: pre, + post: post, + AlphabetTraceProvider: alphabet.NewTraceProvider(big.NewInt(0), depth), + }, nil + } + selector := NewSplitProviderSelector(top, splitDepth, bottomCreator) + + claimBuilder := test.NewAlphabetClaimBuilder(t, big.NewInt(0), gameDepth) + gameBuilder := claimBuilder.GameBuilder() + return top, selector, gameBuilder +} + +type bottomTraceProvider struct { + pre types.Claim + post types.Claim + *alphabet.AlphabetTraceProvider +} diff --git a/op-challenger2/game/fault/trace/translate.go b/op-challenger2/game/fault/trace/translate.go new file mode 100644 index 000000000000..32d146a78b37 --- /dev/null +++ b/op-challenger2/game/fault/trace/translate.go @@ -0,0 +1,53 @@ +package trace + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/ethereum/go-ethereum/common" +) + +type TranslatingProvider struct { + rootDepth types.Depth + provider types.TraceProvider +} + +// Translate returns a new TraceProvider that translates any requested positions before passing them on to the +// specified provider. +// The translation is done such that the root node for provider is at rootDepth. +func Translate(provider types.TraceProvider, rootDepth types.Depth) types.TraceProvider { + return &TranslatingProvider{ + rootDepth: rootDepth, + provider: provider, + } +} + +func (p *TranslatingProvider) Original() types.TraceProvider { + return p.provider +} + +func (p *TranslatingProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { + relativePos, err := pos.RelativeToAncestorAtDepth(p.rootDepth) + if err != nil { + return common.Hash{}, err + } + return p.provider.Get(ctx, relativePos) +} + +func (p *TranslatingProvider) GetStepData(ctx context.Context, pos types.Position) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) { + relativePos, err := pos.RelativeToAncestorAtDepth(p.rootDepth) + if err != nil { + return nil, nil, nil, err + } + return p.provider.GetStepData(ctx, relativePos) +} + +func (p *TranslatingProvider) AbsolutePreStateCommitment(ctx context.Context) (hash common.Hash, err error) { + return p.provider.AbsolutePreStateCommitment(ctx) +} + +func (p *TranslatingProvider) GetL2BlockNumberChallenge(ctx context.Context) (*types.InvalidL2BlockNumberChallenge, error) { + return p.provider.GetL2BlockNumberChallenge(ctx) +} + +var _ types.TraceProvider = (*TranslatingProvider)(nil) diff --git a/op-challenger2/game/fault/trace/translate_test.go b/op-challenger2/game/fault/trace/translate_test.go new file mode 100644 index 000000000000..c161c6978c7a --- /dev/null +++ b/op-challenger2/game/fault/trace/translate_test.go @@ -0,0 +1,60 @@ +package trace + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/trace/alphabet" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + "github.com/stretchr/testify/require" +) + +func TestTranslate(t *testing.T) { + orig := alphabet.NewTraceProvider(big.NewInt(0), 4) + translated := Translate(orig, 3) + // All nodes on the first translated layer, map to GIndex 1 + for i := int64(8); i <= 15; i++ { + requireSameValue(t, orig, 1, translated, i) + } + // Nodes on the second translated layer map to GIndex 2 and 3 alternately + for i := int64(16); i <= 31; i += 2 { + requireSameValue(t, orig, 2, translated, i) + requireSameValue(t, orig, 3, translated, i+1) + } + // Nodes on the third translated layer map to GIndex 4, 5, 6 and 7 + for i := int64(32); i <= 61; i += 4 { + requireSameValue(t, orig, 4, translated, i) + requireSameValue(t, orig, 5, translated, i+1) + requireSameValue(t, orig, 6, translated, i+2) + requireSameValue(t, orig, 7, translated, i+3) + } +} + +func requireSameValue(t *testing.T, a types.TraceProvider, aGIdx int64, b types.TraceProvider, bGIdx int64) { + // Check Get returns the same results + aValue, err := a.Get(context.Background(), types.NewPositionFromGIndex(big.NewInt(aGIdx))) + require.NoError(t, err) + bValue, err := b.Get(context.Background(), types.NewPositionFromGIndex(big.NewInt(bGIdx))) + require.NoError(t, err) + require.Equal(t, aValue, bValue) + + // Check GetStepData returns the same results + aPrestate, aProofData, aPreimageData, err := a.GetStepData(context.Background(), types.NewPositionFromGIndex(big.NewInt(aGIdx))) + require.NoError(t, err) + bPrestate, bProofData, bPreimageData, err := b.GetStepData(context.Background(), types.NewPositionFromGIndex(big.NewInt(bGIdx))) + require.NoError(t, err) + require.Equal(t, aPrestate, bPrestate) + require.Equal(t, aProofData, bProofData) + require.Equal(t, aPreimageData, bPreimageData) +} + +func TestTranslate_AbsolutePreStateCommitment(t *testing.T) { + orig := alphabet.NewTraceProvider(big.NewInt(0), 4) + translated := Translate(orig, 3) + origValue, err := orig.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + translatedValue, err := translated.AbsolutePreStateCommitment(context.Background()) + require.NoError(t, err) + require.Equal(t, origValue, translatedValue) +} diff --git a/op-challenger2/game/fault/trace/utils/executor.go b/op-challenger2/game/fault/trace/utils/executor.go new file mode 100644 index 000000000000..f3c5feac8311 --- /dev/null +++ b/op-challenger2/game/fault/trace/utils/executor.go @@ -0,0 +1,81 @@ +package utils + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/log" +) + +type SnapshotSelect func(logger log.Logger, dir string, absolutePreState string, i uint64) (string, error) +type CmdExecutor func(ctx context.Context, l log.Logger, binary string, args ...string) error + +const ( + SnapsDir = "snapshots" + PreimagesDir = "preimages" + FinalState = "final.json.gz" +) + +var snapshotNameRegexp = regexp.MustCompile(`^[0-9]+\.json.gz$`) + +func PreimageDir(dir string) string { + return filepath.Join(dir, PreimagesDir) +} + +func RunCmd(ctx context.Context, l log.Logger, binary string, args ...string) error { + cmd := exec.CommandContext(ctx, binary, args...) + stdOut := oplog.NewWriter(l, log.LevelInfo) + defer stdOut.Close() + // Keep stdErr at info level because FPVM uses stderr for progress messages + stdErr := oplog.NewWriter(l, log.LevelInfo) + defer stdErr.Close() + cmd.Stdout = stdOut + cmd.Stderr = stdErr + return cmd.Run() +} + +// FindStartingSnapshot finds the closest snapshot before the specified traceIndex in snapDir. +// If no suitable snapshot can be found it returns absolutePreState. +func FindStartingSnapshot(logger log.Logger, snapDir string, absolutePreState string, traceIndex uint64) (string, error) { + // Find the closest snapshot to start from + entries, err := os.ReadDir(snapDir) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return absolutePreState, nil + } + return "", fmt.Errorf("list snapshots in %v: %w", snapDir, err) + } + bestSnap := uint64(0) + for _, entry := range entries { + if entry.IsDir() { + logger.Warn("Unexpected directory in snapshots dir", "parent", snapDir, "child", entry.Name()) + continue + } + name := entry.Name() + if !snapshotNameRegexp.MatchString(name) { + logger.Warn("Unexpected file in snapshots dir", "parent", snapDir, "child", entry.Name()) + continue + } + index, err := strconv.ParseUint(name[0:len(name)-len(".json.gz")], 10, 64) + if err != nil { + logger.Error("Unable to parse trace index of snapshot file", "parent", snapDir, "child", entry.Name()) + continue + } + if index > bestSnap && index < traceIndex { + bestSnap = index + } + } + if bestSnap == 0 { + return absolutePreState, nil + } + startFrom := fmt.Sprintf("%v/%v.json.gz", snapDir, bestSnap) + + return startFrom, nil +} diff --git a/op-challenger2/game/fault/trace/utils/local.go b/op-challenger2/game/fault/trace/utils/local.go new file mode 100644 index 000000000000..40ca79862139 --- /dev/null +++ b/op-challenger2/game/fault/trace/utils/local.go @@ -0,0 +1,61 @@ +package utils + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" +) + +type LocalGameInputs struct { + L1Head common.Hash + L2Head common.Hash + L2OutputRoot common.Hash + L2Claim common.Hash + L2BlockNumber *big.Int +} + +type L2HeaderSource interface { + HeaderByNumber(context.Context, *big.Int) (*ethtypes.Header, error) +} + +type L1HeadSource interface { + GetL1Head(ctx context.Context) (common.Hash, error) +} + +type GameInputsSource interface { + L1HeadSource + GetProposals(ctx context.Context) (agreed contracts.Proposal, disputed contracts.Proposal, err error) +} + +func FetchLocalInputs(ctx context.Context, caller GameInputsSource, l2Client L2HeaderSource) (LocalGameInputs, error) { + agreedOutput, claimedOutput, err := caller.GetProposals(ctx) + if err != nil { + return LocalGameInputs{}, fmt.Errorf("fetch proposals: %w", err) + } + l1Head, err := caller.GetL1Head(ctx) + if err != nil { + return LocalGameInputs{}, fmt.Errorf("fetch L1 head: %w", err) + } + + return FetchLocalInputsFromProposals(ctx, l1Head, l2Client, agreedOutput, claimedOutput) +} + +func FetchLocalInputsFromProposals(ctx context.Context, l1Head common.Hash, l2Client L2HeaderSource, agreedOutput contracts.Proposal, claimedOutput contracts.Proposal) (LocalGameInputs, error) { + agreedHeader, err := l2Client.HeaderByNumber(ctx, agreedOutput.L2BlockNumber) + if err != nil { + return LocalGameInputs{}, fmt.Errorf("fetch L2 block header %v: %w", agreedOutput.L2BlockNumber, err) + } + l2Head := agreedHeader.Hash() + + return LocalGameInputs{ + L1Head: l1Head, + L2Head: l2Head, + L2OutputRoot: agreedOutput.OutputRoot, + L2Claim: claimedOutput.OutputRoot, + L2BlockNumber: claimedOutput.L2BlockNumber, + }, nil +} diff --git a/op-challenger2/game/fault/trace/utils/local_test.go b/op-challenger2/game/fault/trace/utils/local_test.go new file mode 100644 index 000000000000..9609fe51d54a --- /dev/null +++ b/op-challenger2/game/fault/trace/utils/local_test.go @@ -0,0 +1,101 @@ +package utils + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +func TestFetchLocalInputs(t *testing.T) { + ctx := context.Background() + contract := &mockGameInputsSource{ + l1Head: common.Hash{0xcc}, + starting: contracts.Proposal{ + L2BlockNumber: big.NewInt(2222), + OutputRoot: common.Hash{0xdd}, + }, + disputed: contracts.Proposal{ + L2BlockNumber: big.NewInt(3333), + OutputRoot: common.Hash{0xee}, + }, + } + l2Client := &mockL2DataSource{ + chainID: big.NewInt(88422), + header: ethtypes.Header{ + Number: contract.starting.L2BlockNumber, + }, + } + + inputs, err := FetchLocalInputs(ctx, contract, l2Client) + require.NoError(t, err) + + require.Equal(t, contract.l1Head, inputs.L1Head) + require.Equal(t, l2Client.header.Hash(), inputs.L2Head) + require.EqualValues(t, contract.starting.OutputRoot, inputs.L2OutputRoot) + require.EqualValues(t, contract.disputed.OutputRoot, inputs.L2Claim) + require.Equal(t, contract.disputed.L2BlockNumber, inputs.L2BlockNumber) +} + +func TestFetchLocalInputsFromProposals(t *testing.T) { + ctx := context.Background() + agreed := contracts.Proposal{ + L2BlockNumber: big.NewInt(2222), + OutputRoot: common.Hash{0xdd}, + } + claimed := contracts.Proposal{ + L2BlockNumber: big.NewInt(3333), + OutputRoot: common.Hash{0xee}, + } + l1Head := common.Hash{0xcc} + l2Client := &mockL2DataSource{ + chainID: big.NewInt(88422), + header: ethtypes.Header{ + Number: agreed.L2BlockNumber, + }, + } + + inputs, err := FetchLocalInputsFromProposals(ctx, l1Head, l2Client, agreed, claimed) + require.NoError(t, err) + + require.Equal(t, l1Head, inputs.L1Head) + require.Equal(t, l2Client.header.Hash(), inputs.L2Head) + require.EqualValues(t, agreed.OutputRoot, inputs.L2OutputRoot) + require.EqualValues(t, claimed.OutputRoot, inputs.L2Claim) + require.Equal(t, claimed.L2BlockNumber, inputs.L2BlockNumber) +} + +type mockGameInputsSource struct { + l1Head common.Hash + starting contracts.Proposal + disputed contracts.Proposal +} + +func (s *mockGameInputsSource) GetL1Head(_ context.Context) (common.Hash, error) { + return s.l1Head, nil +} + +func (s *mockGameInputsSource) GetProposals(_ context.Context) (contracts.Proposal, contracts.Proposal, error) { + return s.starting, s.disputed, nil +} + +type mockL2DataSource struct { + chainID *big.Int + header ethtypes.Header +} + +func (s *mockL2DataSource) ChainID(_ context.Context) (*big.Int, error) { + return s.chainID, nil +} + +func (s *mockL2DataSource) HeaderByNumber(_ context.Context, num *big.Int) (*ethtypes.Header, error) { + if s.header.Number.Cmp(num) == 0 { + return &s.header, nil + } + return nil, ethereum.NotFound +} diff --git a/op-challenger2/game/fault/trace/utils/preimage.go b/op-challenger2/game/fault/trace/utils/preimage.go new file mode 100644 index 000000000000..c754018f9505 --- /dev/null +++ b/op-challenger2/game/fault/trace/utils/preimage.go @@ -0,0 +1,120 @@ +package utils + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/params" +) + +const ( + fieldElemKeyLength = 80 + commitmentLength = 48 + lengthPrefixSize = 8 +) + +var ( + ErrInvalidScalarValue = errors.New("invalid scalar value") + ErrInvalidBlobKeyPreimage = errors.New("invalid blob key preimage") +) + +type preimageSource func(key common.Hash) ([]byte, error) + +type PreimageLoader struct { + getPreimage preimageSource +} + +func NewPreimageLoader(getPreimage preimageSource) *PreimageLoader { + return &PreimageLoader{ + getPreimage: getPreimage, + } +} + +func (l *PreimageLoader) LoadPreimage(proof *ProofData) (*types.PreimageOracleData, error) { + if len(proof.OracleKey) == 0 { + return nil, nil + } + switch preimage.KeyType(proof.OracleKey[0]) { + case preimage.BlobKeyType: + return l.loadBlobPreimage(proof) + case preimage.PrecompileKeyType: + return l.loadPrecompilePreimage(proof) + default: + return types.NewPreimageOracleData(proof.OracleKey, proof.OracleValue, proof.OracleOffset), nil + } +} + +func (l *PreimageLoader) loadBlobPreimage(proof *ProofData) (*types.PreimageOracleData, error) { + // The key for a blob field element is a keccak hash of commitment++fieldElementIndex. + // First retrieve the preimage of the key as a keccak hash so we have the commitment and required field element + inputsKey := preimage.Keccak256Key(proof.OracleKey).PreimageKey() + inputs, err := l.getPreimage(inputsKey) + if err != nil { + return nil, fmt.Errorf("failed to get key preimage: %w", err) + } + if len(inputs) != fieldElemKeyLength { + return nil, fmt.Errorf("%w, expected length %v but was %v", ErrInvalidBlobKeyPreimage, fieldElemKeyLength, len(inputs)) + } + commitment := inputs[:commitmentLength] + requiredFieldElement := binary.BigEndian.Uint64(inputs[72:]) + + // Now, reconstruct the full blob by loading the 4096 field elements. + blob := eth.Blob{} + fieldElemKey := make([]byte, fieldElemKeyLength) + copy(fieldElemKey[:commitmentLength], commitment) + for i := 0; i < params.BlobTxFieldElementsPerBlob; i++ { + binary.BigEndian.PutUint64(fieldElemKey[72:], uint64(i)) + key := preimage.BlobKey(crypto.Keccak256(fieldElemKey)).PreimageKey() + fieldElement, err := l.getPreimage(key) + if err != nil { + return nil, fmt.Errorf("failed to load field element %v with key %v: %w", i, common.Hash(key), err) + } + copy(blob[i<<5:(i+1)<<5], fieldElement[:]) + } + + // Sanity check the blob data matches the commitment + blobCommitment, err := blob.ComputeKZGCommitment() + if err != nil || !bytes.Equal(blobCommitment[:], commitment[:]) { + return nil, fmt.Errorf("invalid blob commitment: %w", err) + } + // Compute the KZG proof for the required field element + var point kzg4844.Point + new(big.Int).SetUint64(requiredFieldElement).FillBytes(point[:]) + kzgProof, claim, err := kzg4844.ComputeProof(kzg4844.Blob(blob), point) + if err != nil { + return nil, fmt.Errorf("failed to compute kzg proof: %w", err) + } + err = kzg4844.VerifyProof(kzg4844.Commitment(commitment), point, claim, kzgProof) + if err != nil { + return nil, fmt.Errorf("failed to verify proof: %w", err) + } + + claimWithLength := lengthPrefixed(claim[:]) + return types.NewPreimageOracleBlobData(proof.OracleKey, claimWithLength, proof.OracleOffset, requiredFieldElement, commitment, kzgProof[:]), nil +} + +func (l *PreimageLoader) loadPrecompilePreimage(proof *ProofData) (*types.PreimageOracleData, error) { + inputKey := preimage.Keccak256Key(proof.OracleKey).PreimageKey() + input, err := l.getPreimage(inputKey) + if err != nil { + return nil, fmt.Errorf("failed to get key preimage: %w", err) + } + inputWithLength := lengthPrefixed(input) + return types.NewPreimageOracleData(proof.OracleKey, inputWithLength, proof.OracleOffset), nil +} + +func lengthPrefixed(data []byte) []byte { + dataWithLength := make([]byte, len(data)+lengthPrefixSize) + binary.BigEndian.PutUint64(dataWithLength[:lengthPrefixSize], uint64(len(data))) + copy(dataWithLength[lengthPrefixSize:], data) + return dataWithLength +} diff --git a/op-challenger2/game/fault/trace/utils/preimage_test.go b/op-challenger2/game/fault/trace/utils/preimage_test.go new file mode 100644 index 000000000000..0d7f46429eed --- /dev/null +++ b/op-challenger2/game/fault/trace/utils/preimage_test.go @@ -0,0 +1,214 @@ +package utils + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" + gokzg4844 "github.com/crate-crypto/go-kzg-4844" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-program/host/kvstore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +func TestPreimageLoader_NoPreimage(t *testing.T) { + loader := NewPreimageLoader(kvstore.NewMemKV().Get) + actual, err := loader.LoadPreimage(&ProofData{}) + require.NoError(t, err) + require.Nil(t, actual) +} + +func TestPreimageLoader_LocalPreimage(t *testing.T) { + loader := NewPreimageLoader(kvstore.NewMemKV().Get) + proof := &ProofData{ + OracleKey: common.Hash{byte(preimage.LocalKeyType), 0xaa, 0xbb}.Bytes(), + OracleValue: nil, + OracleOffset: 4, + } + actual, err := loader.LoadPreimage(proof) + require.NoError(t, err) + expected := types.NewPreimageOracleData(proof.OracleKey, nil, proof.OracleOffset) + require.Equal(t, expected, actual) + require.True(t, actual.IsLocal) +} + +func TestPreimageLoader_SimpleTypes(t *testing.T) { + tests := []preimage.KeyType{ + preimage.Keccak256KeyType, + preimage.Sha256KeyType, + } + for _, keyType := range tests { + keyType := keyType + t.Run(fmt.Sprintf("type-%v", keyType), func(t *testing.T) { + loader := NewPreimageLoader(kvstore.NewMemKV().Get) + proof := &ProofData{ + OracleKey: common.Hash{byte(keyType), 0xaa, 0xbb}.Bytes(), + OracleValue: []byte{1, 2, 3, 4, 5, 6}, + OracleOffset: 3, + } + actual, err := loader.LoadPreimage(proof) + require.NoError(t, err) + expected := types.NewPreimageOracleData(proof.OracleKey, proof.OracleValue, proof.OracleOffset) + require.Equal(t, expected, actual) + }) + } +} + +func TestPreimageLoader_BlobPreimage(t *testing.T) { + blob := testBlob() + commitment, err := kzg4844.BlobToCommitment(kzg4844.Blob(blob)) + require.NoError(t, err) + + fieldIndex := uint64(24) + elementData := blob[fieldIndex<<5 : (fieldIndex+1)<<5] + var point kzg4844.Point + new(big.Int).SetUint64(fieldIndex).FillBytes(point[:]) + kzgProof, claim, err := kzg4844.ComputeProof(kzg4844.Blob(blob), point) + require.NoError(t, err) + elementDataWithLengthPrefix := make([]byte, len(elementData)+lengthPrefixSize) + binary.BigEndian.PutUint64(elementDataWithLengthPrefix[:lengthPrefixSize], uint64(len(elementData))) + copy(elementDataWithLengthPrefix[lengthPrefixSize:], elementData) + + keyBuf := make([]byte, 80) + copy(keyBuf[:48], commitment[:]) + binary.BigEndian.PutUint64(keyBuf[72:], fieldIndex) + key := preimage.BlobKey(crypto.Keccak256Hash(keyBuf)).PreimageKey() + + proof := &ProofData{ + OracleKey: key[:], + OracleValue: elementDataWithLengthPrefix, + OracleOffset: 4, + } + + t.Run("NoKeyPreimage", func(t *testing.T) { + kv := kvstore.NewMemKV() + loader := NewPreimageLoader(kv.Get) + proof := &ProofData{ + OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xaf}.Bytes(), + OracleValue: proof.OracleValue, + OracleOffset: proof.OracleOffset, + } + _, err := loader.LoadPreimage(proof) + require.ErrorIs(t, err, kvstore.ErrNotFound) + }) + + t.Run("InvalidKeyPreimage", func(t *testing.T) { + kv := kvstore.NewMemKV() + loader := NewPreimageLoader(kv.Get) + proof := &ProofData{ + OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xad}.Bytes(), + OracleValue: proof.OracleValue, + OracleOffset: proof.OracleOffset, + } + require.NoError(t, kv.Put(preimage.Keccak256Key(proof.OracleKey).PreimageKey(), []byte{1, 2})) + _, err := loader.LoadPreimage(proof) + require.ErrorIs(t, err, ErrInvalidBlobKeyPreimage) + }) + + t.Run("MissingBlobs", func(t *testing.T) { + kv := kvstore.NewMemKV() + loader := NewPreimageLoader(kv.Get) + proof := &ProofData{ + OracleKey: common.Hash{byte(preimage.BlobKeyType), 0xae}.Bytes(), + OracleValue: proof.OracleValue, + OracleOffset: proof.OracleOffset, + } + require.NoError(t, kv.Put(preimage.Keccak256Key(proof.OracleKey).PreimageKey(), keyBuf)) + _, err := loader.LoadPreimage(proof) + require.ErrorIs(t, err, kvstore.ErrNotFound) + }) + + t.Run("Valid", func(t *testing.T) { + kv := kvstore.NewMemKV() + loader := NewPreimageLoader(kv.Get) + storeBlob(t, kv, gokzg4844.KZGCommitment(commitment), blob) + actual, err := loader.LoadPreimage(proof) + require.NoError(t, err) + + claimWithLength := make([]byte, len(claim)+lengthPrefixSize) + binary.BigEndian.PutUint64(claimWithLength[:lengthPrefixSize], uint64(len(claim))) + copy(claimWithLength[lengthPrefixSize:], claim[:]) + + expected := types.NewPreimageOracleBlobData(proof.OracleKey, claimWithLength, proof.OracleOffset, fieldIndex, commitment[:], kzgProof[:]) + require.Equal(t, expected, actual) + require.False(t, actual.IsLocal) + + // Check the KZG proof is valid + var actualPoint kzg4844.Point + new(big.Int).SetUint64(actual.BlobFieldIndex).FillBytes(actualPoint[:]) + actualClaim := kzg4844.Claim(actual.GetPreimageWithoutSize()) + actualCommitment := kzg4844.Commitment(actual.BlobCommitment) + actualProof := kzg4844.Proof(actual.BlobProof) + err = kzg4844.VerifyProof(actualCommitment, actualPoint, actualClaim, actualProof) + require.NoError(t, err) + }) +} + +func TestPreimageLoader_PrecompilePreimage(t *testing.T) { + input := []byte("test input") + key := preimage.PrecompileKey(crypto.Keccak256Hash(input)).PreimageKey() + proof := &ProofData{ + OracleKey: key[:], + } + + t.Run("NoInputPreimage", func(t *testing.T) { + kv := kvstore.NewMemKV() + loader := NewPreimageLoader(kv.Get) + _, err := loader.LoadPreimage(proof) + require.ErrorIs(t, err, kvstore.ErrNotFound) + }) + t.Run("Valid", func(t *testing.T) { + kv := kvstore.NewMemKV() + loader := NewPreimageLoader(kv.Get) + require.NoError(t, kv.Put(preimage.Keccak256Key(proof.OracleKey).PreimageKey(), input)) + actual, err := loader.LoadPreimage(proof) + require.NoError(t, err) + inputWithLength := lengthPrefixed(input) + expected := types.NewPreimageOracleData(proof.OracleKey, inputWithLength, proof.OracleOffset) + require.Equal(t, expected, actual) + }) +} + +// Returns a serialized random field element in big-endian +func fieldElement(val uint64) [32]byte { + r := fr.NewElement(val) + return gokzg4844.SerializeScalar(r) +} + +func testBlob() gokzg4844.Blob { + var blob gokzg4844.Blob + bytesPerBlob := gokzg4844.ScalarsPerBlob * gokzg4844.SerializedScalarSize + for i := 0; i < bytesPerBlob; i += gokzg4844.SerializedScalarSize { + fieldElementBytes := fieldElement(uint64(i)) + copy(blob[i:i+gokzg4844.SerializedScalarSize], fieldElementBytes[:]) + } + return blob +} + +func storeBlob(t *testing.T, kv kvstore.KV, commitment gokzg4844.KZGCommitment, blob gokzg4844.Blob) { + // Pre-store versioned hash preimage (commitment) + key := preimage.Sha256Key(sha256.Sum256(commitment[:])) + err := kv.Put(key.PreimageKey(), commitment[:]) + require.NoError(t, err, "Failed to store versioned hash preimage in kvstore") + + // Pre-store blob field elements + blobKeyBuf := make([]byte, 80) + copy(blobKeyBuf[:48], commitment[:]) + for i := 0; i < params.BlobTxFieldElementsPerBlob; i++ { + binary.BigEndian.PutUint64(blobKeyBuf[72:], uint64(i)) + feKey := crypto.Keccak256Hash(blobKeyBuf) + err := kv.Put(preimage.Keccak256Key(feKey).PreimageKey(), blobKeyBuf) + require.NoError(t, err) + + err = kv.Put(preimage.BlobKey(feKey).PreimageKey(), blob[i<<5:(i+1)<<5]) + require.NoError(t, err, "Failed to store field element preimage in kvstore") + } +} diff --git a/op-challenger2/game/fault/trace/utils/provider.go b/op-challenger2/game/fault/trace/utils/provider.go new file mode 100644 index 000000000000..3417b8386318 --- /dev/null +++ b/op-challenger2/game/fault/trace/utils/provider.go @@ -0,0 +1,96 @@ +package utils + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +const ( + ProofsDir = "proofs" + diskStateCache = "state.json.gz" +) + +type ProofData struct { + ClaimValue common.Hash `json:"post"` + StateData hexutil.Bytes `json:"state-data"` + ProofData hexutil.Bytes `json:"proof-data"` + OracleKey hexutil.Bytes `json:"oracle-key,omitempty"` + OracleValue hexutil.Bytes `json:"oracle-value,omitempty"` + OracleOffset uint32 `json:"oracle-offset,omitempty"` +} + +type ProofGenerator interface { + // GenerateProof executes FPVM binary to generate a proof at the specified trace index in dataDir. + GenerateProof(ctx context.Context, dataDir string, proofAt uint64) error +} + +type diskStateCacheObj struct { + Step uint64 `json:"step"` +} + +// ReadLastStep reads the tracked last step from disk. +func ReadLastStep(dir string) (uint64, error) { + state := diskStateCacheObj{} + file, err := ioutil.OpenDecompressed(filepath.Join(dir, diskStateCache)) + if err != nil { + return 0, err + } + defer file.Close() + err = json.NewDecoder(file).Decode(&state) + if err != nil { + return 0, err + } + return state.Step, nil +} + +// WriteLastStep writes the last step and proof to disk as a persistent cache. +func WriteLastStep(dir string, proof *ProofData, step uint64) error { + state := diskStateCacheObj{Step: step} + lastStepFile := filepath.Join(dir, diskStateCache) + if err := ioutil.WriteCompressedJson(lastStepFile, state); err != nil { + return fmt.Errorf("failed to write last step to %v: %w", lastStepFile, err) + } + if err := ioutil.WriteCompressedJson(filepath.Join(dir, ProofsDir, fmt.Sprintf("%d.json.gz", step)), proof); err != nil { + return fmt.Errorf("failed to write proof: %w", err) + } + return nil +} + +// below methods and definitions are only to be used for testing +type preimageOpts []string + +type PreimageOpt func() preimageOpts + +func PreimageLoad(key preimage.Key, offset uint32) PreimageOpt { + return func() preimageOpts { + return []string{"--stop-at-preimage", fmt.Sprintf("%v@%v", common.Hash(key.PreimageKey()).Hex(), offset)} + } +} + +func FirstPreimageLoadOfType(preimageType string) PreimageOpt { + return func() preimageOpts { + return []string{"--stop-at-preimage-type", preimageType} + } +} + +func FirstKeccakPreimageLoad() PreimageOpt { + return FirstPreimageLoadOfType("keccak") +} + +func FirstPrecompilePreimageLoad() PreimageOpt { + return FirstPreimageLoadOfType("precompile") +} + +func PreimageLargerThan(size int) PreimageOpt { + return func() preimageOpts { + return []string{"--stop-at-preimage-larger-than", strconv.Itoa(size)} + } +} diff --git a/op-challenger2/game/fault/types/actions.go b/op-challenger2/game/fault/types/actions.go new file mode 100644 index 000000000000..e5093d6ac553 --- /dev/null +++ b/op-challenger2/game/fault/types/actions.go @@ -0,0 +1,34 @@ +package types + +import "github.com/ethereum/go-ethereum/common" + +type ActionType string + +func (a ActionType) String() string { + return string(a) +} + +const ( + ActionTypeMove ActionType = "move" + ActionTypeStep ActionType = "step" + ActionTypeChallengeL2BlockNumber ActionType = "challenge-l2-block-number" +) + +type Action struct { + Type ActionType + + // Moves and Steps + ParentClaim Claim + IsAttack bool + + // Moves + Value common.Hash + + // Steps + PreState []byte + ProofData []byte + OracleData *PreimageOracleData + + // Challenge L2 Block Number + InvalidL2BlockNumberChallenge *InvalidL2BlockNumberChallenge +} diff --git a/op-challenger2/game/fault/types/game.go b/op-challenger2/game/fault/types/game.go new file mode 100644 index 000000000000..dd0394e69a82 --- /dev/null +++ b/op-challenger2/game/fault/types/game.go @@ -0,0 +1,154 @@ +package types + +import ( + "errors" + "math/big" + "time" +) + +var ( + // ErrClaimNotFound is returned when a claim does not exist in the game state. + ErrClaimNotFound = errors.New("claim not found in game state") +) + +// Game is an interface that represents the state of a dispute game. +type Game interface { + // Claims returns all of the claims in the game. + Claims() []Claim + + // GetParent returns the parent of the provided claim. + GetParent(claim Claim) (Claim, error) + + // DefendsParent returns true if and only if the claim is a defense (i.e. goes right) of + // its parent. + DefendsParent(claim Claim) bool + + // ChessClock returns the amount of time elapsed on the chess clock of the potential challenger to the supplied claim. + // Specifically, this returns the chess clock of the team that *disagrees* with the supplied claim. + ChessClock(now time.Time, claim Claim) time.Duration + + // IsDuplicate returns true if the provided [Claim] already exists in the game state + // referencing the same parent claim + IsDuplicate(claim Claim) bool + + // AgreeWithClaimLevel returns if the game state agrees with the provided claim level. + AgreeWithClaimLevel(claim Claim, agreeWithRootClaim bool) bool + + MaxDepth() Depth + + // AncestorWithTraceIndex finds the ancestor of claim with trace index idx if present. + // Returns the claim and true if the ancestor is found, or Claim{}, false if not. + AncestorWithTraceIndex(claim Claim, idx *big.Int) (Claim, bool) +} + +// gameState is a struct that represents the state of a dispute game. +// The game state implements the [Game] interface. +type gameState struct { + // claims is the list of claims in the same order as the contract + claims []Claim + claimIDs map[ClaimID]bool + depth Depth +} + +// NewGameState returns a new game state. +// The provided [Claim] is used as the root node. +func NewGameState(claims []Claim, depth Depth) *gameState { + claimIDs := make(map[ClaimID]bool) + for _, claim := range claims { + claimIDs[claim.ID()] = true + } + return &gameState{ + claims: claims, + claimIDs: claimIDs, + depth: depth, + } +} + +// AgreeWithClaimLevel returns if the game state agrees with the provided claim level. +func (g *gameState) AgreeWithClaimLevel(claim Claim, agreeWithRootClaim bool) bool { + isOddLevel := claim.Depth()%2 == 1 + // If we agree with the proposed output, we agree with odd levels + // If we disagree with the proposed output, we agree with the root claim level & even levels + if agreeWithRootClaim { + return !isOddLevel + } else { + return isOddLevel + } +} + +func (g *gameState) IsDuplicate(claim Claim) bool { + return g.claimIDs[claim.ID()] +} + +func (g *gameState) Claims() []Claim { + // Defensively copy to avoid modifications to the underlying array. + return append([]Claim(nil), g.claims...) +} + +func (g *gameState) MaxDepth() Depth { + return g.depth +} + +func (g *gameState) GetParent(claim Claim) (Claim, error) { + parent := g.getParent(claim) + if parent == nil { + return Claim{}, ErrClaimNotFound + } + return *parent, nil +} + +func (g *gameState) DefendsParent(claim Claim) bool { + parent := g.getParent(claim) + if parent == nil { + return false + } + return claim.RightOf(parent.Position) +} + +// ChessClock returns the amount of time elapsed on the chess clock of the potential challenger to the supplied claim. +// Specifically, this returns the chess clock of the team that *disagrees* with the supplied claim. +func (g *gameState) ChessClock(now time.Time, claim Claim) time.Duration { + parentRef := g.getParent(claim) + var parent Claim + if parentRef != nil { + parent = *parentRef + } + return ChessClock(now, claim, parent) +} + +func ChessClock(now time.Time, claim Claim, parent Claim) time.Duration { + // Calculate the time elapsed since the claim was created + duration := now.Sub(claim.Clock.Timestamp) + if parent != (Claim{}) { + // Add total time elapsed from previous turns + duration = parent.Clock.Duration + duration + } + return duration +} + +func (g *gameState) getParent(claim Claim) *Claim { + if claim.IsRoot() { + return nil + } + if claim.ParentContractIndex >= len(g.claims) || claim.ParentContractIndex < 0 { + return nil + } + parent := g.claims[claim.ParentContractIndex] + return &parent +} + +func (g *gameState) AncestorWithTraceIndex(claim Claim, idx *big.Int) (Claim, bool) { + for { + if claim.Position.TraceIndex(g.depth).Cmp(idx) == 0 { + return claim, true + } + if claim.IsRoot() { + return Claim{}, false + } + next := g.getParent(claim) + if next == nil { + return Claim{}, false + } + claim = *next + } +} diff --git a/op-challenger2/game/fault/types/game_test.go b/op-challenger2/game/fault/types/game_test.go new file mode 100644 index 000000000000..a53dd7c7b673 --- /dev/null +++ b/op-challenger2/game/fault/types/game_test.go @@ -0,0 +1,273 @@ +package types + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +const testMaxDepth = 3 + +func createTestClaims() (Claim, Claim, Claim, Claim) { + // root & middle are from the trace "abcdexyz" + // top & bottom are from the trace "abcdefgh" + root := Claim{ + ClaimData: ClaimData{ + Value: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000077a"), + Position: NewPosition(0, common.Big0), + }, + // Root claim has no parent + } + top := Claim{ + ClaimData: ClaimData{ + Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000364"), + Position: NewPosition(1, common.Big0), + }, + ContractIndex: 1, + ParentContractIndex: 0, + } + middle := Claim{ + ClaimData: ClaimData{ + Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000578"), + Position: NewPosition(2, big.NewInt(2)), + }, + ContractIndex: 2, + ParentContractIndex: 1, + } + + bottom := Claim{ + ClaimData: ClaimData{ + Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000465"), + Position: NewPosition(3, big.NewInt(4)), + }, + ContractIndex: 3, + ParentContractIndex: 2, + } + + return root, top, middle, bottom +} + +func TestIsDuplicate(t *testing.T) { + root, top, middle, bottom := createTestClaims() + g := NewGameState([]Claim{root, top}, testMaxDepth) + + // Root + Top should be duplicates + require.True(t, g.IsDuplicate(root)) + require.True(t, g.IsDuplicate(top)) + + // Middle + Bottom should not be a duplicate + require.False(t, g.IsDuplicate(middle)) + require.False(t, g.IsDuplicate(bottom)) +} + +func TestGame_Claims(t *testing.T) { + // Setup the game state. + root, top, middle, bottom := createTestClaims() + expected := []Claim{root, top, middle, bottom} + g := NewGameState(expected, testMaxDepth) + + // Validate claim pairs. + actual := g.Claims() + require.ElementsMatch(t, expected, actual) +} + +func TestGame_DefendsParent(t *testing.T) { + tests := []struct { + name string + game *gameState + expected bool + }{ + { + name: "LeftChildAttacks", + game: buildGameWithClaim(big.NewInt(2), big.NewInt(1)), + expected: false, + }, + { + name: "RightChildDoesntDefend", + game: buildGameWithClaim(big.NewInt(3), big.NewInt(1)), + expected: false, + }, + { + name: "SubChildDoesntDefend", + game: buildGameWithClaim(big.NewInt(4), big.NewInt(1)), + expected: false, + }, + { + name: "SubSecondChildDoesntDefend", + game: buildGameWithClaim(big.NewInt(5), big.NewInt(1)), + expected: false, + }, + { + name: "RightLeftChildDefendsParent", + game: buildGameWithClaim(big.NewInt(6), big.NewInt(1)), + expected: true, + }, + { + name: "SubThirdChildDefends", + game: buildGameWithClaim(big.NewInt(7), big.NewInt(1)), + expected: true, + }, + { + name: "RootDoesntDefend", + game: NewGameState([]Claim{ + { + ClaimData: ClaimData{ + Position: NewPositionFromGIndex(big.NewInt(0)), + }, + ContractIndex: 0, + }, + }, testMaxDepth), + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + claims := test.game.Claims() + require.Equal(t, test.expected, test.game.DefendsParent(claims[len(claims)-1])) + }) + } +} + +func TestAncestorWithTraceIndex(t *testing.T) { + depth := Depth(4) + claims := []Claim{ + { + ClaimData: ClaimData{ + Position: NewPositionFromGIndex(big.NewInt(0)), + }, + ContractIndex: 0, + ParentContractIndex: 0, + }, + } + addClaimAtPos := func(parent Claim, pos Position) Claim { + claim := Claim{ + ClaimData: ClaimData{ + Position: pos, + }, + ParentContractIndex: parent.ContractIndex, + ContractIndex: len(claims), + } + claims = append(claims, claim) + return claim + } + attack := func(claim Claim) Claim { + return addClaimAtPos(claim, claim.Position.Attack()) + } + defend := func(claim Claim) Claim { + return addClaimAtPos(claim, claim.Position.Defend()) + } + // Create a variety of paths to leaf nodes + attack(attack(attack(attack(claims[0])))) + defend(defend(defend(defend(claims[0])))) + defend(attack(defend(attack(claims[0])))) + attack(defend(attack(defend(claims[0])))) + attack(attack(defend(defend(claims[0])))) + defend(defend(attack(attack(claims[0])))) + + game := NewGameState(claims, depth) + // Every claim should be able to find the root's trace index + for _, claim := range claims { + actual, ok := game.AncestorWithTraceIndex(claim, claims[0].TraceIndex(depth)) + require.True(t, ok) + require.Equal(t, claims[0], actual) + } + + // Leaf claims should be able to find the trace index before and after + for _, claim := range game.Claims() { + if claim.Depth() != depth { + // Only leaf nodes are guaranteed to have the pre and post states available + continue + } + claimIdx := claim.TraceIndex(depth) + + actual, ok := game.AncestorWithTraceIndex(claim, claimIdx) + require.True(t, ok) + require.Equal(t, claim, actual, "Should get leaf claim for its own trace index") + + // The right most claim doesn't have + if claim.IndexAtDepth().Cmp(big.NewInt(30)) < 0 { + idx := new(big.Int).Add(claimIdx, big.NewInt(1)) + actual, ok = game.AncestorWithTraceIndex(claim, idx) + require.Truef(t, ok, "Should find claim with next trace index for claim %v index at depth %v", claim.ContractIndex, claim.IndexAtDepth()) + require.Equalf(t, idx, actual.TraceIndex(depth), "Should find claim with next trace index for claim %v index at depth %v", claim.ContractIndex, claim.IndexAtDepth()) + } + + if claimIdx.Cmp(big.NewInt(0)) == 0 { + continue + } + idx := new(big.Int).Sub(claimIdx, big.NewInt(1)) + actual, ok = game.AncestorWithTraceIndex(claim, idx) + require.True(t, ok) + require.Equal(t, idx, actual.TraceIndex(depth), "Should find claim with previous trace index") + } + + actual, ok := game.AncestorWithTraceIndex(claims[0], big.NewInt(0)) + require.False(t, ok) + require.Equal(t, Claim{}, actual) + + actual, ok = game.AncestorWithTraceIndex(claims[1], big.NewInt(1)) + require.False(t, ok) + require.Equal(t, Claim{}, actual) + + actual, ok = game.AncestorWithTraceIndex(claims[3], big.NewInt(1)) + require.True(t, ok) + require.Equal(t, claims[3], actual) +} + +func TestChessClock(t *testing.T) { + rootTime := time.UnixMilli(42978249) + defenderRootClaim, challengerFirstClaim, defenderSecondClaim, challengerSecondClaim := createTestClaims() + defenderRootClaim.Clock = Clock{Timestamp: rootTime, Duration: 0} + challengerFirstClaim.Clock = Clock{Timestamp: rootTime.Add(5 * time.Minute), Duration: 5 * time.Minute} + defenderSecondClaim.Clock = Clock{Timestamp: challengerFirstClaim.Clock.Timestamp.Add(2 * time.Minute), Duration: 2 * time.Minute} + challengerSecondClaim.Clock = Clock{Timestamp: defenderSecondClaim.Clock.Timestamp.Add(3 * time.Minute), Duration: 8 * time.Minute} + claims := []Claim{defenderRootClaim, challengerFirstClaim, defenderSecondClaim, challengerSecondClaim} + game := NewGameState(claims, 10) + + // At the time the root claim is posted, both defender and challenger have no time on their chess clock + // The root claim starts the chess clock for the challenger + require.Equal(t, time.Duration(0), game.ChessClock(rootTime, game.Claims()[0])) + // As time progresses, the challenger's chess clock increases + require.Equal(t, 2*time.Minute, game.ChessClock(rootTime.Add(2*time.Minute), game.Claims()[0])) + + // The challenger's first claim arrives 5 minutes after the root claim and starts the clock for the defender + // This is the defender's first turn so at the time the claim is posted, the defender's chess clock is 0 + require.Equal(t, time.Duration(0), game.ChessClock(challengerFirstClaim.Clock.Timestamp, challengerFirstClaim)) + // As time progresses, the defender's chess clock increases + require.Equal(t, 3*time.Minute, game.ChessClock(challengerFirstClaim.Clock.Timestamp.Add(3*time.Minute), challengerFirstClaim)) + + // The defender's second claim arrives 2 minutes after the challenger's first claim. + // This starts the challenger's clock again. At the time of the claim it already has 5 minutes on the clock + // from the challenger's previous turn + require.Equal(t, 5*time.Minute, game.ChessClock(defenderSecondClaim.Clock.Timestamp, defenderSecondClaim)) + // As time progresses the challenger's chess clock increases + require.Equal(t, 5*time.Minute+30*time.Second, game.ChessClock(defenderSecondClaim.Clock.Timestamp.Add(30*time.Second), defenderSecondClaim)) + + // The challenger's second claim arrives 3 minutes after the defender's second claim. + // This starts the defender's clock again. At the time of the claim it already has 2 minutes on the clock + // from the defenders previous turn + require.Equal(t, 2*time.Minute, game.ChessClock(challengerSecondClaim.Clock.Timestamp, challengerSecondClaim)) + // As time progresses, the defender's chess clock increases + require.Equal(t, 2*time.Minute+45*time.Minute, game.ChessClock(challengerSecondClaim.Clock.Timestamp.Add(45*time.Minute), challengerSecondClaim)) +} + +func buildGameWithClaim(claimGIndex *big.Int, parentGIndex *big.Int) *gameState { + parentClaim := Claim{ + ClaimData: ClaimData{ + Position: NewPositionFromGIndex(parentGIndex), + }, + ContractIndex: 0, + } + claim := Claim{ + ClaimData: ClaimData{ + Position: NewPositionFromGIndex(claimGIndex), + }, + ContractIndex: 1, + ParentContractIndex: 0, + } + return NewGameState([]Claim{parentClaim, claim}, testMaxDepth) +} diff --git a/op-challenger2/game/fault/types/position.go b/op-challenger2/game/fault/types/position.go new file mode 100644 index 000000000000..86d10730f721 --- /dev/null +++ b/op-challenger2/game/fault/types/position.go @@ -0,0 +1,156 @@ +package types + +import ( + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +var ( + ErrPositionDepthTooSmall = errors.New("position depth is too small") + + RootPosition = NewPositionFromGIndex(big.NewInt(1)) +) + +// Depth is the depth of a position in a game tree where the root level has +// depth 0, the root's children have depth 1, their children have depth 2, and +// so on. +type Depth uint64 + +// Position is a golang wrapper around the dispute game Position type. +type Position struct { + depth Depth + indexAtDepth *big.Int +} + +func NewPosition(depth Depth, indexAtDepth *big.Int) Position { + return Position{ + depth: depth, + indexAtDepth: indexAtDepth, + } +} + +// NewPositionFromGIndex creates a new Position given a generalized index. +func NewPositionFromGIndex(x *big.Int) Position { + depth := bigMSB(x) + withoutMSB := new(big.Int).Not(new(big.Int).Lsh(big.NewInt(1), uint(depth))) + indexAtDepth := new(big.Int).And(x, withoutMSB) + return NewPosition(depth, indexAtDepth) +} + +func (p Position) String() string { + return fmt.Sprintf("Position(depth: %v, indexAtDepth: %v)", p.depth, p.IndexAtDepth()) +} + +func (p Position) MoveRight() Position { + return Position{ + depth: p.depth, + indexAtDepth: new(big.Int).Add(p.IndexAtDepth(), big.NewInt(1)), + } +} + +// RelativeToAncestorAtDepth returns a new position for a subtree. +// [ancestor] is the depth of the subtree root node. +func (p Position) RelativeToAncestorAtDepth(ancestor Depth) (Position, error) { + if ancestor > p.depth { + return Position{}, ErrPositionDepthTooSmall + } + newPosDepth := p.depth - ancestor + nodesAtDepth := 1 << newPosDepth + newIndexAtDepth := new(big.Int).Mod(p.IndexAtDepth(), big.NewInt(int64(nodesAtDepth))) + return NewPosition(newPosDepth, newIndexAtDepth), nil +} + +func (p Position) Depth() Depth { + return p.depth +} + +func (p Position) IndexAtDepth() *big.Int { + if p.indexAtDepth == nil { + return common.Big0 + } + return p.indexAtDepth +} + +func (p Position) IsRootPosition() bool { + return p.depth == 0 && common.Big0.Cmp(p.IndexAtDepth()) == 0 +} + +func (p Position) lshIndex(amount Depth) *big.Int { + return new(big.Int).Lsh(p.IndexAtDepth(), uint(amount)) +} + +// TraceIndex calculates the what the index of the claim value would be inside the trace. +// It is equivalent to going right until the final depth has been reached. +// Note: this method will panic if maxDepth < p.depth +func (p Position) TraceIndex(maxDepth Depth) *big.Int { + // When we go right, we do a shift left and set the bottom bit to be 1. + // To do this in a single step, do all the shifts at once & or in all 1s for the bottom bits. + if maxDepth < p.depth { + panic(fmt.Sprintf("maxDepth(%d) < p.depth(%d)", maxDepth, p.depth)) + } + rd := maxDepth - p.depth + rhs := new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), uint(rd)), big.NewInt(1)) + ti := new(big.Int).Or(p.lshIndex(rd), rhs) + return ti +} + +// move returns a new position at the left or right child. +func (p Position) move(right bool) Position { + return Position{ + depth: p.depth + 1, + indexAtDepth: new(big.Int).Or(p.lshIndex(1), big.NewInt(int64(boolToInt(right)))), + } +} + +func boolToInt(b bool) int { + if b { + return 1 + } else { + return 0 + } +} + +func (p Position) parentIndexAtDepth() *big.Int { + return new(big.Int).Div(p.IndexAtDepth(), big.NewInt(2)) +} + +func (p Position) RightOf(parent Position) bool { + return p.parentIndexAtDepth().Cmp(parent.IndexAtDepth()) != 0 +} + +// parent return a new position that is the parent of this Position. +func (p Position) parent() Position { + return Position{ + depth: p.depth - 1, + indexAtDepth: p.parentIndexAtDepth(), + } +} + +// Attack creates a new position which is the attack position of this one. +func (p Position) Attack() Position { + return p.move(false) +} + +// Defend creates a new position which is the defend position of this one. +func (p Position) Defend() Position { + return p.parent().move(true).move(false) +} + +func (p Position) Print(maxDepth Depth) { + fmt.Printf("GIN: %4b\tTrace Position is %4b\tTrace Depth is: %d\tTrace Index is: %d\n", p.ToGIndex(), p.IndexAtDepth(), p.depth, p.TraceIndex(maxDepth)) +} + +func (p Position) ToGIndex() *big.Int { + return new(big.Int).Or(new(big.Int).Lsh(big.NewInt(1), uint(p.depth)), p.IndexAtDepth()) +} + +// bigMSB returns the index of the most significant bit +func bigMSB(x *big.Int) Depth { + if x.Cmp(big.NewInt(0)) == 0 { + return 0 + } + return Depth(x.BitLen() - 1) +} diff --git a/op-challenger2/game/fault/types/position_test.go b/op-challenger2/game/fault/types/position_test.go new file mode 100644 index 000000000000..535c01a7e6bf --- /dev/null +++ b/op-challenger2/game/fault/types/position_test.go @@ -0,0 +1,316 @@ +package types + +import ( + "fmt" + "math" + "math/big" + "testing" + + "github.com/stretchr/testify/require" +) + +func bi(i int) *big.Int { + return big.NewInt(int64(i)) +} + +func TestRootPosition(t *testing.T) { + require.True(t, RootPosition.IsRootPosition()) +} + +func TestBigMSB(t *testing.T) { + large, ok := new(big.Int).SetString("18446744073709551615", 10) + require.True(t, ok) + tests := []struct { + input *big.Int + expected Depth + }{ + {bi(0), 0}, + {bi(1), 0}, + {bi(2), 1}, + {bi(4), 2}, + {bi(8), 3}, + {bi(16), 4}, + {bi(255), 7}, + {bi(1024), 10}, + {large, 63}, + } + + for _, test := range tests { + result := bigMSB(test.input) + if result != test.expected { + t.Errorf("MSBIndex(%d) expected %d, but got %d", test.input, test.expected, result) + } + } +} + +func TestGindexPositionConversions(t *testing.T) { + tests := []struct { + gindex *big.Int + expectedPosition Position + }{ + {bi(1), NewPosition(0, bi(0))}, + + {bi(2), NewPosition(1, bi(0))}, + {bi(3), NewPosition(1, bi(1))}, + + {bi(4), NewPosition(2, bi(0))}, + {bi(5), NewPosition(2, bi(1))}, + {bi(6), NewPosition(2, bi(2))}, + {bi(7), NewPosition(2, bi(3))}, + + {bi(8), NewPosition(3, bi(0))}, + {bi(9), NewPosition(3, bi(1))}, + {bi(10), NewPosition(3, bi(2))}, + {bi(11), NewPosition(3, bi(3))}, + {bi(12), NewPosition(3, bi(4))}, + {bi(13), NewPosition(3, bi(5))}, + {bi(14), NewPosition(3, bi(6))}, + {bi(15), NewPosition(3, bi(7))}, + + {bi(16), NewPosition(4, bi(0))}, + {bi(17), NewPosition(4, bi(1))}, + {bi(18), NewPosition(4, bi(2))}, + {bi(19), NewPosition(4, bi(3))}, + {bi(20), NewPosition(4, bi(4))}, + {bi(21), NewPosition(4, bi(5))}, + {bi(22), NewPosition(4, bi(6))}, + {bi(23), NewPosition(4, bi(7))}, + {bi(24), NewPosition(4, bi(8))}, + {bi(25), NewPosition(4, bi(9))}, + {bi(26), NewPosition(4, bi(10))}, + {bi(27), NewPosition(4, bi(11))}, + {bi(28), NewPosition(4, bi(12))}, + {bi(29), NewPosition(4, bi(13))}, + {bi(30), NewPosition(4, bi(14))}, + {bi(31), NewPosition(4, bi(15))}, + + {bi(1023), NewPosition(9, bi(511))}, + {bi(1024), NewPosition(10, bi(0))}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("convert gindex=%s to Position", test.gindex.String()), func(t *testing.T) { + positionActual := NewPositionFromGIndex(test.gindex) + require.EqualValuesf(t, test.expectedPosition.Depth(), positionActual.Depth(), "expected depth=%s, got=%s", test.expectedPosition.Depth(), positionActual.Depth()) + require.Zerof(t, test.expectedPosition.IndexAtDepth().Cmp(positionActual.IndexAtDepth()), "expected indexAtDepth=%s, got=%s", test.expectedPosition.IndexAtDepth(), positionActual.IndexAtDepth()) + gindex := positionActual.ToGIndex() + require.Truef(t, gindex.Cmp(test.gindex) == 0, "expected gindex=%s, got=%s", test.gindex.String(), gindex.String()) + }) + } +} + +func TestTraceIndexOfRootWithLargeDepth(t *testing.T) { + traceIdx := new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 100), big.NewInt(1)) + pos := NewPositionFromGIndex(big.NewInt(1)) + actual := pos.TraceIndex(100) + require.Equal(t, traceIdx, actual) +} + +// TestTraceIndex creates the position & then tests the trace index function. +func TestTraceIndex(t *testing.T) { + tests := []struct { + depth Depth + indexAtDepth *big.Int + maxDepth Depth + traceIndexExpected *big.Int + }{ + {depth: 0, indexAtDepth: bi(0), maxDepth: 4, traceIndexExpected: bi(15)}, + + {depth: 1, indexAtDepth: bi(0), maxDepth: 4, traceIndexExpected: bi(7)}, + {depth: 1, indexAtDepth: bi(1), maxDepth: 4, traceIndexExpected: bi(15)}, + + {depth: 2, indexAtDepth: bi(0), maxDepth: 4, traceIndexExpected: bi(3)}, + {depth: 2, indexAtDepth: bi(1), maxDepth: 4, traceIndexExpected: bi(7)}, + {depth: 2, indexAtDepth: bi(2), maxDepth: 4, traceIndexExpected: bi(11)}, + {depth: 2, indexAtDepth: bi(3), maxDepth: 4, traceIndexExpected: bi(15)}, + + {depth: 3, indexAtDepth: bi(0), maxDepth: 4, traceIndexExpected: bi(1)}, + {depth: 3, indexAtDepth: bi(1), maxDepth: 4, traceIndexExpected: bi(3)}, + {depth: 3, indexAtDepth: bi(2), maxDepth: 4, traceIndexExpected: bi(5)}, + {depth: 3, indexAtDepth: bi(3), maxDepth: 4, traceIndexExpected: bi(7)}, + {depth: 3, indexAtDepth: bi(4), maxDepth: 4, traceIndexExpected: bi(9)}, + {depth: 3, indexAtDepth: bi(5), maxDepth: 4, traceIndexExpected: bi(11)}, + {depth: 3, indexAtDepth: bi(6), maxDepth: 4, traceIndexExpected: bi(13)}, + {depth: 3, indexAtDepth: bi(7), maxDepth: 4, traceIndexExpected: bi(15)}, + + {depth: 4, indexAtDepth: bi(0), maxDepth: 4, traceIndexExpected: bi(0)}, + {depth: 4, indexAtDepth: bi(1), maxDepth: 4, traceIndexExpected: bi(1)}, + {depth: 4, indexAtDepth: bi(2), maxDepth: 4, traceIndexExpected: bi(2)}, + {depth: 4, indexAtDepth: bi(3), maxDepth: 4, traceIndexExpected: bi(3)}, + {depth: 4, indexAtDepth: bi(4), maxDepth: 4, traceIndexExpected: bi(4)}, + {depth: 4, indexAtDepth: bi(5), maxDepth: 4, traceIndexExpected: bi(5)}, + {depth: 4, indexAtDepth: bi(6), maxDepth: 4, traceIndexExpected: bi(6)}, + {depth: 4, indexAtDepth: bi(7), maxDepth: 4, traceIndexExpected: bi(7)}, + {depth: 4, indexAtDepth: bi(8), maxDepth: 4, traceIndexExpected: bi(8)}, + {depth: 4, indexAtDepth: bi(9), maxDepth: 4, traceIndexExpected: bi(9)}, + {depth: 4, indexAtDepth: bi(10), maxDepth: 4, traceIndexExpected: bi(10)}, + {depth: 4, indexAtDepth: bi(11), maxDepth: 4, traceIndexExpected: bi(11)}, + {depth: 4, indexAtDepth: bi(12), maxDepth: 4, traceIndexExpected: bi(12)}, + {depth: 4, indexAtDepth: bi(13), maxDepth: 4, traceIndexExpected: bi(13)}, + {depth: 4, indexAtDepth: bi(14), maxDepth: 4, traceIndexExpected: bi(14)}, + {depth: 4, indexAtDepth: bi(15), maxDepth: 4, traceIndexExpected: bi(15)}, + + {depth: 63, indexAtDepth: bi(9223372036854775806), maxDepth: 64, traceIndexExpected: bi(0).Sub(bi(0).Mul(bi(math.MaxInt64), bi(2)), bi(1))}, + } + for _, test := range tests { + require.Equal(t, test.traceIndexExpected, NewPosition(test.depth, test.indexAtDepth).TraceIndex(test.maxDepth)) + } +} + +func TestAttack(t *testing.T) { + tests := []struct { + startGIndex *big.Int + attackGIndex *big.Int + }{ + {bi(1), bi(2)}, + {bi(2), bi(4)}, + {bi(3), bi(6)}, + {bi(4), bi(8)}, + {bi(5), bi(10)}, + {bi(6), bi(12)}, + {bi(7), bi(14)}, + {bi(8), bi(16)}, + {bi(9), bi(18)}, + {bi(10), bi(20)}, + {bi(11), bi(22)}, + {bi(12), bi(24)}, + {bi(13), bi(26)}, + {bi(14), bi(28)}, + {bi(15), bi(30)}, + } + for _, test := range tests { + pos := NewPositionFromGIndex(test.startGIndex) + result := pos.Attack() + require.Equalf(t, test.attackGIndex, result.ToGIndex(), "attacking GIndex %s, expected=%s, got=%s", test.startGIndex, test.attackGIndex, result.ToGIndex()) + } +} + +func TestDefend(t *testing.T) { + tests := []struct { + startGIndex *big.Int + defendGIndex *big.Int + }{ + {bi(2), bi(6)}, + {bi(4), bi(10)}, + {bi(6), bi(14)}, + {bi(8), bi(18)}, + {bi(10), bi(22)}, + {bi(12), bi(26)}, + {bi(14), bi(30)}, + } + for _, test := range tests { + pos := NewPositionFromGIndex(test.startGIndex) + result := pos.Defend() + require.Equalf(t, test.defendGIndex, result.ToGIndex(), "defending GIndex %s, expected=%s, got=%s", test.startGIndex, test.defendGIndex, result.ToGIndex()) + } +} + +func TestRelativeToAncestorAtDepth(t *testing.T) { + t.Run("ErrorsForDeepAncestor", func(t *testing.T) { + pos := NewPosition(1, big.NewInt(1)) + _, err := pos.RelativeToAncestorAtDepth(2) + require.ErrorIs(t, err, ErrPositionDepthTooSmall) + }) + + tests := []struct { + gindex int64 + newRootDepth Depth + expectedGIndex int64 + }{ + {gindex: 5, newRootDepth: 1, expectedGIndex: 3}, + + // Depth 0 (should return position unchanged) + {gindex: 1, newRootDepth: 0, expectedGIndex: 1}, + {gindex: 2, newRootDepth: 0, expectedGIndex: 2}, + + // Depth 1 + {gindex: 2, newRootDepth: 1, expectedGIndex: 1}, + {gindex: 3, newRootDepth: 1, expectedGIndex: 1}, + {gindex: 4, newRootDepth: 1, expectedGIndex: 2}, + {gindex: 5, newRootDepth: 1, expectedGIndex: 3}, + {gindex: 6, newRootDepth: 1, expectedGIndex: 2}, + {gindex: 7, newRootDepth: 1, expectedGIndex: 3}, + {gindex: 8, newRootDepth: 1, expectedGIndex: 4}, + {gindex: 9, newRootDepth: 1, expectedGIndex: 5}, + {gindex: 10, newRootDepth: 1, expectedGIndex: 6}, + {gindex: 11, newRootDepth: 1, expectedGIndex: 7}, + {gindex: 12, newRootDepth: 1, expectedGIndex: 4}, + {gindex: 13, newRootDepth: 1, expectedGIndex: 5}, + {gindex: 14, newRootDepth: 1, expectedGIndex: 6}, + {gindex: 15, newRootDepth: 1, expectedGIndex: 7}, + {gindex: 16, newRootDepth: 1, expectedGIndex: 8}, + {gindex: 17, newRootDepth: 1, expectedGIndex: 9}, + {gindex: 18, newRootDepth: 1, expectedGIndex: 10}, + {gindex: 19, newRootDepth: 1, expectedGIndex: 11}, + {gindex: 20, newRootDepth: 1, expectedGIndex: 12}, + {gindex: 21, newRootDepth: 1, expectedGIndex: 13}, + {gindex: 22, newRootDepth: 1, expectedGIndex: 14}, + {gindex: 23, newRootDepth: 1, expectedGIndex: 15}, + {gindex: 24, newRootDepth: 1, expectedGIndex: 8}, + {gindex: 25, newRootDepth: 1, expectedGIndex: 9}, + {gindex: 26, newRootDepth: 1, expectedGIndex: 10}, + {gindex: 27, newRootDepth: 1, expectedGIndex: 11}, + {gindex: 28, newRootDepth: 1, expectedGIndex: 12}, + {gindex: 29, newRootDepth: 1, expectedGIndex: 13}, + {gindex: 30, newRootDepth: 1, expectedGIndex: 14}, + {gindex: 31, newRootDepth: 1, expectedGIndex: 15}, + + // Depth 2 + {gindex: 4, newRootDepth: 2, expectedGIndex: 1}, + {gindex: 5, newRootDepth: 2, expectedGIndex: 1}, + {gindex: 6, newRootDepth: 2, expectedGIndex: 1}, + {gindex: 7, newRootDepth: 2, expectedGIndex: 1}, + {gindex: 8, newRootDepth: 2, expectedGIndex: 2}, + {gindex: 9, newRootDepth: 2, expectedGIndex: 3}, + {gindex: 10, newRootDepth: 2, expectedGIndex: 2}, + {gindex: 11, newRootDepth: 2, expectedGIndex: 3}, + {gindex: 12, newRootDepth: 2, expectedGIndex: 2}, + {gindex: 13, newRootDepth: 2, expectedGIndex: 3}, + {gindex: 14, newRootDepth: 2, expectedGIndex: 2}, + {gindex: 15, newRootDepth: 2, expectedGIndex: 3}, + } + + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("From %v SplitAt %v", test.gindex, test.newRootDepth), func(t *testing.T) { + pos := NewPositionFromGIndex(big.NewInt(test.gindex)) + expectedRelativePosition := NewPositionFromGIndex(big.NewInt(test.expectedGIndex)) + relativePosition, err := pos.RelativeToAncestorAtDepth(test.newRootDepth) + require.NoError(t, err) + require.Equal(t, expectedRelativePosition.ToGIndex(), relativePosition.ToGIndex()) + }) + } +} + +func TestRelativeMoves(t *testing.T) { + tests := []func(pos Position) Position{ + func(pos Position) Position { + return pos.Attack() + }, + func(pos Position) Position { + return pos.Defend() + }, + func(pos Position) Position { + return pos.Attack().Attack() + }, + func(pos Position) Position { + return pos.Defend().Defend() + }, + func(pos Position) Position { + return pos.Attack().Defend() + }, + func(pos Position) Position { + return pos.Defend().Attack() + }, + } + for _, test := range tests { + test := test + t.Run("", func(t *testing.T) { + expectedRelativePosition := test(NewPositionFromGIndex(big.NewInt(1))) + relative := NewPositionFromGIndex(big.NewInt(3)) + start := test(relative) + relativePosition, err := start.RelativeToAncestorAtDepth(relative.Depth()) + require.NoError(t, err) + require.Equal(t, expectedRelativePosition.ToGIndex(), relativePosition.ToGIndex()) + }) + } +} diff --git a/op-challenger2/game/fault/types/types.go b/op-challenger2/game/fault/types/types.go new file mode 100644 index 000000000000..fc81911effa9 --- /dev/null +++ b/op-challenger2/game/fault/types/types.go @@ -0,0 +1,215 @@ +package types + +import ( + "context" + "errors" + "math/big" + "time" + + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" +) + +var ( + ErrGameDepthReached = errors.New("game depth reached") + ErrL2BlockNumberValid = errors.New("l2 block number is valid") +) + +const ( + CannonGameType uint32 = 0 + PermissionedGameType uint32 = 1 + AsteriscGameType uint32 = 2 + AlphabetGameType uint32 = 255 +) + +type ClockReader interface { + Now() time.Time +} + +// PreimageOracleData encapsulates the preimage oracle data +// to load into the onchain oracle. +type PreimageOracleData struct { + IsLocal bool + OracleKey []byte + oracleData []byte + OracleOffset uint32 + + // 4844 blob data + BlobFieldIndex uint64 + BlobCommitment []byte + BlobProof []byte +} + +// GetIdent returns the ident for the preimage oracle data. +func (p *PreimageOracleData) GetIdent() *big.Int { + return new(big.Int).SetBytes(p.OracleKey[1:]) +} + +// GetPreimageWithoutSize returns the preimage for the preimage oracle data. +func (p *PreimageOracleData) GetPreimageWithoutSize() []byte { + return p.oracleData[8:] +} + +// GetPreimageWithSize returns the preimage with its length prefix. +func (p *PreimageOracleData) GetPreimageWithSize() []byte { + return p.oracleData +} + +func (p *PreimageOracleData) GetPrecompileAddress() common.Address { + return common.BytesToAddress(p.oracleData[8:28]) +} + +func (p *PreimageOracleData) GetPrecompileInput() []byte { + return p.oracleData[28:] +} + +// NewPreimageOracleData creates a new [PreimageOracleData] instance. +func NewPreimageOracleData(key []byte, data []byte, offset uint32) *PreimageOracleData { + return &PreimageOracleData{ + IsLocal: len(key) > 0 && key[0] == byte(preimage.LocalKeyType), + OracleKey: key, + oracleData: data, + OracleOffset: offset, + } +} + +func NewPreimageOracleBlobData(key []byte, data []byte, offset uint32, fieldIndex uint64, commitment []byte, proof []byte) *PreimageOracleData { + return &PreimageOracleData{ + IsLocal: false, + OracleKey: key, + oracleData: data, + OracleOffset: offset, + BlobFieldIndex: fieldIndex, + BlobCommitment: commitment, + BlobProof: proof, + } +} + +// StepCallData encapsulates the data needed to perform a step. +type StepCallData struct { + ClaimIndex uint64 + IsAttack bool + StateData []byte + Proof []byte +} + +// TraceAccessor defines an interface to request data from a TraceProvider with additional context for the game position. +// This can be used to implement split games where lower layers of the game may have different values depending on claims +// at higher levels in the game. +type TraceAccessor interface { + // Get returns the claim value at the requested position, evaluated in the context of the specified claim (ref). + Get(ctx context.Context, game Game, ref Claim, pos Position) (common.Hash, error) + + // GetStepData returns the data required to execute the step at the specified position, + // evaluated in the context of the specified claim (ref). + GetStepData(ctx context.Context, game Game, ref Claim, pos Position) (prestate []byte, proofData []byte, preimageData *PreimageOracleData, err error) + + // GetL2BlockNumberChallenge returns the data required to prove the correct L2 block number of the root claim. + // Returns ErrL2BlockNumberValid if the root claim is known to come from the same block as the claimed L2 block. + GetL2BlockNumberChallenge(ctx context.Context, game Game) (*InvalidL2BlockNumberChallenge, error) +} + +// PrestateProvider defines an interface to request the absolute prestate. +type PrestateProvider interface { + // AbsolutePreStateCommitment is the commitment of the pre-image value of the trace that transitions to the trace value at index 0 + AbsolutePreStateCommitment(ctx context.Context) (hash common.Hash, err error) +} + +// TraceProvider is a generic way to get a claim value at a specific step in the trace. +type TraceProvider interface { + PrestateProvider + + // Get returns the claim value at the requested index. + // Get(i) = Keccak256(GetPreimage(i)) + Get(ctx context.Context, i Position) (common.Hash, error) + + // GetStepData returns the data required to execute the step at the specified trace index. + // This includes the pre-state of the step (not hashed), the proof data required during step execution + // and any pre-image data that needs to be loaded into the oracle prior to execution (may be nil) + // The prestate returned from GetStepData for trace 10 should be the pre-image of the claim from trace 9 + GetStepData(ctx context.Context, i Position) (prestate []byte, proofData []byte, preimageData *PreimageOracleData, err error) + + // GetL2BlockNumberChallenge returns the data required to prove the correct L2 block number of the root claim. + // Returns ErrL2BlockNumberValid if the root claim is known to come from the same block as the claimed L2 block. + GetL2BlockNumberChallenge(ctx context.Context) (*InvalidL2BlockNumberChallenge, error) +} + +// ClaimData is the core of a claim. It must be unique inside a specific game. +type ClaimData struct { + Value common.Hash + Bond *big.Int + Position +} + +func (c *ClaimData) ValueBytes() [32]byte { + responseBytes := c.Value.Bytes() + var responseArr [32]byte + copy(responseArr[:], responseBytes[:32]) + return responseArr +} + +type ClaimID common.Hash + +// Claim extends ClaimData with information about the relationship between two claims. +// It uses ClaimData to break cyclicity without using pointers. +// If the position of the game is Depth 0, IndexAtDepth 0 it is the root claim +// and the Parent field is empty & meaningless. +type Claim struct { + ClaimData + // WARN: CounteredBy is a mutable field in the FaultDisputeGame contract + // and rely on it for determining whether to step on leaf claims. + // When caching is implemented for the Challenger, this will need + // to be changed/removed to avoid invalid/stale contract state. + CounteredBy common.Address + Claimant common.Address + Clock Clock + // Location of the claim & it's parent inside the contract. Does not exist + // for claims that have not made it to the contract. + ContractIndex int + ParentContractIndex int +} + +func (c Claim) ID() ClaimID { + return ClaimID(crypto.Keccak256Hash( + c.Position.ToGIndex().Bytes(), + c.Value.Bytes(), + big.NewInt(int64(c.ParentContractIndex)).Bytes(), + )) +} + +// IsRoot returns true if this claim is the root claim. +func (c Claim) IsRoot() bool { + return c.Position.IsRootPosition() +} + +// Clock tracks the chess clock for a claim. +type Clock struct { + // Duration is the time elapsed on the chess clock at the last update. + Duration time.Duration + + // Timestamp is the time that the clock was last updated. + Timestamp time.Time +} + +// NewClock creates a new Clock instance. +func NewClock(duration time.Duration, timestamp time.Time) Clock { + return Clock{ + Duration: duration, + Timestamp: timestamp, + } +} + +type InvalidL2BlockNumberChallenge struct { + Output *eth.OutputResponse + Header *ethTypes.Header +} + +func NewInvalidL2BlockNumberProof(output *eth.OutputResponse, header *ethTypes.Header) *InvalidL2BlockNumberChallenge { + return &InvalidL2BlockNumberChallenge{ + Output: output, + Header: header, + } +} diff --git a/op-challenger2/game/fault/types/types_test.go b/op-challenger2/game/fault/types/types_test.go new file mode 100644 index 000000000000..ca3e25b7e4b7 --- /dev/null +++ b/op-challenger2/game/fault/types/types_test.go @@ -0,0 +1,62 @@ +package types + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewPreimageOracleData(t *testing.T) { + t.Run("LocalData", func(t *testing.T) { + data := NewPreimageOracleData([]byte{1, 2, 3}, []byte{4, 5, 6}, 7) + require.True(t, data.IsLocal) + require.Equal(t, []byte{1, 2, 3}, data.OracleKey) + require.Equal(t, []byte{4, 5, 6}, data.GetPreimageWithSize()) + require.Equal(t, uint32(7), data.OracleOffset) + }) + + t.Run("GlobalData", func(t *testing.T) { + data := NewPreimageOracleData([]byte{0, 2, 3}, []byte{4, 5, 6}, 7) + require.False(t, data.IsLocal) + require.Equal(t, []byte{0, 2, 3}, data.OracleKey) + require.Equal(t, []byte{4, 5, 6}, data.GetPreimageWithSize()) + require.Equal(t, uint32(7), data.OracleOffset) + }) +} + +func TestIsRootPosition(t *testing.T) { + tests := []struct { + name string + position Position + expected bool + }{ + { + name: "ZeroRoot", + position: NewPositionFromGIndex(big.NewInt(0)), + expected: true, + }, + { + name: "ValidRoot", + position: NewPositionFromGIndex(big.NewInt(1)), + expected: true, + }, + { + name: "NotRoot", + position: NewPositionFromGIndex(big.NewInt(2)), + expected: false, + }, + { + // Mostly to avoid nil dereferences in tests which may not set a real Position + name: "DefaultValue", + position: Position{}, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.expected, test.position.IsRootPosition()) + }) + } +} diff --git a/op-challenger2/game/fault/validator.go b/op-challenger2/game/fault/validator.go new file mode 100644 index 000000000000..9430d8ad4d1d --- /dev/null +++ b/op-challenger2/game/fault/validator.go @@ -0,0 +1,50 @@ +package fault + +import ( + "bytes" + "context" + "fmt" + + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" +) + +type PrestateLoader = func(ctx context.Context) (common.Hash, error) + +type Validator interface { + Validate(ctx context.Context) error +} + +var _ Validator = (*PrestateValidator)(nil) + +type PrestateValidator struct { + valueName string + load PrestateLoader + provider types.PrestateProvider +} + +func NewPrestateValidator(valueName string, contractProvider PrestateLoader, localProvider types.PrestateProvider) *PrestateValidator { + return &PrestateValidator{ + valueName: valueName, + load: contractProvider, + provider: localProvider, + } +} + +func (v *PrestateValidator) Validate(ctx context.Context) error { + prestateHash, err := v.load(ctx) + if err != nil { + return fmt.Errorf("failed to get prestate hash from loader: %w", err) + } + prestateCommitment, err := v.provider.AbsolutePreStateCommitment(ctx) + if err != nil { + return fmt.Errorf("failed to fetch provider's prestate hash: %w", err) + } + if !bytes.Equal(prestateCommitment[:], prestateHash[:]) { + return fmt.Errorf("%v %w: Provider: %s | Contract: %s", + v.valueName, gameTypes.ErrInvalidPrestate, prestateCommitment.Hex(), prestateHash.Hex()) + } + return nil +} diff --git a/op-challenger2/game/fault/validator_test.go b/op-challenger2/game/fault/validator_test.go new file mode 100644 index 000000000000..e591045fe162 --- /dev/null +++ b/op-challenger2/game/fault/validator_test.go @@ -0,0 +1,92 @@ +package fault + +import ( + "context" + "fmt" + "testing" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +var ( + prestate = []byte{0x00, 0x01, 0x02, 0x03} + mockProviderError = fmt.Errorf("mock provider error") + mockLoaderError = fmt.Errorf("mock loader error") +) + +func TestValidate(t *testing.T) { + t.Run("ValidPrestates", func(t *testing.T) { + prestateHash := crypto.Keccak256(prestate) + prestateHash[0] = mipsevm.VMStatusUnfinished + player := &PrestateValidator{ + load: newMockPrestateLoader(false, common.BytesToHash(prestateHash)), + provider: newMockPrestateProvider(false, prestate), + } + err := player.Validate(context.Background()) + require.NoError(t, err) + }) + + t.Run("ProviderErrors", func(t *testing.T) { + player := &PrestateValidator{ + load: newMockPrestateLoader(false, common.BytesToHash(prestate)), + provider: newMockPrestateProvider(true, prestate), + } + err := player.Validate(context.Background()) + require.ErrorIs(t, err, mockProviderError) + }) + + t.Run("LoaderErrors", func(t *testing.T) { + player := &PrestateValidator{ + load: newMockPrestateLoader(true, common.BytesToHash(prestate)), + provider: newMockPrestateProvider(false, prestate), + } + err := player.Validate(context.Background()) + require.ErrorIs(t, err, mockLoaderError) + }) + + t.Run("PrestateMismatch", func(t *testing.T) { + player := &PrestateValidator{ + load: newMockPrestateLoader(false, common.BytesToHash([]byte{0x00})), + provider: newMockPrestateProvider(false, prestate), + } + err := player.Validate(context.Background()) + require.ErrorIs(t, err, gameTypes.ErrInvalidPrestate) + }) +} + +var _ types.PrestateProvider = (*mockPrestateProvider)(nil) + +type mockPrestateProvider struct { + prestateErrors bool + prestate []byte +} + +func newMockPrestateProvider(prestateErrors bool, prestate []byte) *mockPrestateProvider { + return &mockPrestateProvider{ + prestateErrors: prestateErrors, + prestate: prestate, + } +} + +func (m *mockPrestateProvider) AbsolutePreStateCommitment(_ context.Context) (common.Hash, error) { + if m.prestateErrors { + return common.Hash{}, mockProviderError + } + hash := common.BytesToHash(crypto.Keccak256(m.prestate)) + hash[0] = mipsevm.VMStatusUnfinished + return hash, nil +} + +func newMockPrestateLoader(prestateError bool, prestate common.Hash) PrestateLoader { + return func(ctx context.Context) (common.Hash, error) { + if prestateError { + return common.Hash{}, mockLoaderError + } + return prestate, nil + } +} diff --git a/op-challenger2/game/keccak/challenger.go b/op-challenger2/game/keccak/challenger.go new file mode 100644 index 000000000000..191d3ab52b30 --- /dev/null +++ b/op-challenger2/game/keccak/challenger.go @@ -0,0 +1,90 @@ +package keccak + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/matrix" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +type Oracle interface { + VerifierPreimageOracle + ChallengeTx(ident keccakTypes.LargePreimageIdent, challenge keccakTypes.Challenge) (txmgr.TxCandidate, error) +} + +type ChallengeMetrics interface { + RecordPreimageChallenged() + RecordPreimageChallengeFailed() +} + +type Verifier interface { + CreateChallenge(ctx context.Context, blockHash common.Hash, oracle VerifierPreimageOracle, preimage keccakTypes.LargePreimageMetaData) (keccakTypes.Challenge, error) +} + +type Sender interface { + SendAndWaitSimple(txPurpose string, txs ...txmgr.TxCandidate) error +} + +type PreimageChallenger struct { + log log.Logger + metrics ChallengeMetrics + verifier Verifier + sender Sender +} + +func NewPreimageChallenger(logger log.Logger, metrics ChallengeMetrics, verifier Verifier, sender Sender) *PreimageChallenger { + return &PreimageChallenger{ + log: logger, + metrics: metrics, + verifier: verifier, + sender: sender, + } +} + +func (c *PreimageChallenger) Challenge(ctx context.Context, blockHash common.Hash, oracle Oracle, preimages []keccakTypes.LargePreimageMetaData) error { + var txLock sync.Mutex + var wg sync.WaitGroup + var txs []txmgr.TxCandidate + for _, preimage := range preimages { + preimage := preimage + wg.Add(1) + go func() { + defer wg.Done() + logger := c.log.New("oracle", oracle.Addr(), "claimant", preimage.Claimant, "uuid", preimage.UUID) + challenge, err := c.verifier.CreateChallenge(ctx, blockHash, oracle, preimage) + if errors.Is(err, matrix.ErrValid) { + logger.Debug("Preimage is valid") + return + } else if err != nil { + logger.Error("Failed to verify large preimage", "err", err) + return + } + logger.Info("Challenging preimage", "block", challenge.Poststate.Index) + tx, err := oracle.ChallengeTx(preimage.LargePreimageIdent, challenge) + if err != nil { + logger.Error("Failed to create challenge transaction", "err", err) + return + } + txLock.Lock() + defer txLock.Unlock() + txs = append(txs, tx) + }() + } + wg.Wait() + c.log.Debug("Created preimage challenge transactions", "count", len(txs)) + if len(txs) > 0 { + err := c.sender.SendAndWaitSimple("challenge preimages", txs...) + if err != nil { + c.metrics.RecordPreimageChallengeFailed() + return fmt.Errorf("failed to send challenge txs: %w", err) + } + c.metrics.RecordPreimageChallenged() + } + return nil +} diff --git a/op-challenger2/game/keccak/challenger_test.go b/op-challenger2/game/keccak/challenger_test.go new file mode 100644 index 000000000000..6a4a831bde1c --- /dev/null +++ b/op-challenger2/game/keccak/challenger_test.go @@ -0,0 +1,174 @@ +package keccak + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/matrix" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestChallenge(t *testing.T) { + preimages := []keccakTypes.LargePreimageMetaData{ + { + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xff, 0x00}, + UUID: big.NewInt(0), + }, + }, + { + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xff, 0x01}, + UUID: big.NewInt(1), + }, + }, + { + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xff, 0x02}, + UUID: big.NewInt(2), + }, + }, + } + + logger := testlog.Logger(t, log.LevelInfo) + + t.Run("SendChallenges", func(t *testing.T) { + verifier, sender, oracle, challenger := setupChallengerTest(logger) + verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}} + verifier.challenges[preimages[2].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x02}} + err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) + require.NoError(t, err) + + // Should send the two challenges before returning + require.Len(t, sender.sent, 1, "Should send a single batch of transactions") + for ident, challenge := range verifier.challenges { + tx, err := oracle.ChallengeTx(ident, challenge) + require.NoError(t, err) + require.Contains(t, sender.sent[0], tx) + } + }) + + t.Run("ReturnErrorWhenSendingFails", func(t *testing.T) { + verifier, sender, oracle, challenger := setupChallengerTest(logger) + verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}} + sender.err = errors.New("boom") + err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) + require.ErrorIs(t, err, sender.err) + }) + + t.Run("LogErrorWhenCreateTxFails", func(t *testing.T) { + logger, logs := testlog.CaptureLogger(t, log.LevelInfo) + + verifier, _, oracle, challenger := setupChallengerTest(logger) + verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}} + oracle.err = errors.New("boom") + err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) + require.NoError(t, err) + + levelFilter := testlog.NewLevelFilter(log.LevelError) + msgFilter := testlog.NewMessageFilter("Failed to create challenge transaction") + errLog := logs.FindLog(levelFilter, msgFilter) + require.ErrorIs(t, errLog.AttrValue("err").(error), oracle.err) + }) + + t.Run("LogErrorWhenVerifierFails", func(t *testing.T) { + logger, logs := testlog.CaptureLogger(t, log.LevelInfo) + + verifier, _, oracle, challenger := setupChallengerTest(logger) + verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}} + verifier.err = errors.New("boom") + err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) + require.NoError(t, err) + + levelFilter := testlog.NewLevelFilter(log.LevelError) + msgFilter := testlog.NewMessageFilter("Failed to verify large preimage") + errLog := logs.FindLog(levelFilter, msgFilter) + require.ErrorIs(t, errLog.AttrValue("err").(error), verifier.err) + }) + + t.Run("DoNotLogErrValid", func(t *testing.T) { + logger, logs := testlog.CaptureLogger(t, log.LevelInfo) + + _, _, oracle, challenger := setupChallengerTest(logger) + // All preimages are valid + err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) + require.NoError(t, err) + + levelFilter := testlog.NewLevelFilter(log.LevelError) + msgFilter := testlog.NewMessageFilter("Failed to verify large preimage") + errLog := logs.FindLog(levelFilter, msgFilter) + require.Nil(t, errLog) + + levelFilter = testlog.NewLevelFilter(log.LevelDebug) + msgFilter = testlog.NewMessageFilter("Preimage is valid") + dbgLog := logs.FindLog(levelFilter, msgFilter) + require.NotNil(t, dbgLog) + }) +} + +func setupChallengerTest(logger log.Logger) (*stubVerifier, *stubSender, *stubChallengerOracle, *PreimageChallenger) { + verifier := &stubVerifier{ + challenges: make(map[keccakTypes.LargePreimageIdent]keccakTypes.Challenge), + } + sender := &stubSender{} + oracle := &stubChallengerOracle{} + metrics := &mockChallengeMetrics{} + challenger := NewPreimageChallenger(logger, metrics, verifier, sender) + return verifier, sender, oracle, challenger +} + +type mockChallengeMetrics struct{} + +func (m *mockChallengeMetrics) RecordPreimageChallenged() {} +func (m *mockChallengeMetrics) RecordPreimageChallengeFailed() {} + +type stubVerifier struct { + challenges map[keccakTypes.LargePreimageIdent]keccakTypes.Challenge + err error +} + +func (s *stubVerifier) CreateChallenge(_ context.Context, _ common.Hash, _ VerifierPreimageOracle, preimage keccakTypes.LargePreimageMetaData) (keccakTypes.Challenge, error) { + if s.err != nil { + return keccakTypes.Challenge{}, s.err + } + challenge, ok := s.challenges[preimage.LargePreimageIdent] + if !ok { + return keccakTypes.Challenge{}, matrix.ErrValid + } + return challenge, nil +} + +type stubSender struct { + err error + sent [][]txmgr.TxCandidate +} + +func (s *stubSender) SendAndWaitSimple(_ string, txs ...txmgr.TxCandidate) error { + if s.err != nil { + return s.err + } + s.sent = append(s.sent, txs) + return nil +} + +type stubChallengerOracle struct { + stubOracle + err error +} + +func (s *stubChallengerOracle) ChallengeTx(ident keccakTypes.LargePreimageIdent, challenge keccakTypes.Challenge) (txmgr.TxCandidate, error) { + if s.err != nil { + return txmgr.TxCandidate{}, s.err + } + return txmgr.TxCandidate{ + To: &ident.Claimant, + TxData: append(ident.UUID.Bytes(), challenge.StateMatrix.Pack()...), + }, nil +} diff --git a/op-challenger2/game/keccak/fetcher/fetcher.go b/op-challenger2/game/keccak/fetcher/fetcher.go new file mode 100644 index 000000000000..b8464cb9bbe2 --- /dev/null +++ b/op-challenger2/game/keccak/fetcher/fetcher.go @@ -0,0 +1,120 @@ +package fetcher + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +var ( + ErrNoLeavesFound = errors.New("no leaves found in block") +) + +type L1Source interface { + BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + ChainID(ctx context.Context) (*big.Int, error) +} + +type Oracle interface { + Addr() common.Address + GetInputDataBlocks(ctx context.Context, block rpcblock.Block, ident keccakTypes.LargePreimageIdent) ([]uint64, error) + DecodeInputData(data []byte) (*big.Int, keccakTypes.InputData, error) +} + +type InputFetcher struct { + log log.Logger + source L1Source +} + +func (f *InputFetcher) FetchInputs(ctx context.Context, blockHash common.Hash, oracle Oracle, ident keccakTypes.LargePreimageIdent) ([]keccakTypes.InputData, error) { + blockNums, err := oracle.GetInputDataBlocks(ctx, rpcblock.ByHash(blockHash), ident) + if err != nil { + return nil, fmt.Errorf("failed to retrieve leaf block nums: %w", err) + } + var inputs []keccakTypes.InputData + for _, blockNum := range blockNums { + foundRelevantTx := false + block, err := f.source.BlockByNumber(ctx, new(big.Int).SetUint64(blockNum)) + if err != nil { + return nil, fmt.Errorf("failed getting tx for block %v: %w", blockNum, err) + } + for _, tx := range block.Transactions() { + inputData, err := f.extractRelevantLeavesFromTx(ctx, oracle, tx, ident) + if err != nil { + return nil, err + } + if len(inputData) > 0 { + foundRelevantTx = true + inputs = append(inputs, inputData...) + } + } + if !foundRelevantTx { + // The contract said there was a relevant transaction in this block that we failed to find. + // There was either a reorg or the extraction logic is broken. + // Either way, abort this attempt to validate the preimage. + return nil, fmt.Errorf("%w %v", ErrNoLeavesFound, blockNum) + } + } + return inputs, nil +} + +func (f *InputFetcher) extractRelevantLeavesFromTx(ctx context.Context, oracle Oracle, tx *types.Transaction, ident keccakTypes.LargePreimageIdent) ([]keccakTypes.InputData, error) { + rcpt, err := f.source.TransactionReceipt(ctx, tx.Hash()) + if err != nil { + return nil, fmt.Errorf("failed to retrieve receipt for tx %v: %w", tx.Hash(), err) + } + if rcpt.Status != types.ReceiptStatusSuccessful { + f.log.Trace("Skipping transaction with failed receipt status", "tx", tx.Hash(), "status", rcpt.Status) + return nil, nil + } + + // Iterate over the logs from in this receipt, looking for relevant logs emitted from the oracle contract + var inputs []keccakTypes.InputData + for i, txLog := range rcpt.Logs { + if txLog.Address != oracle.Addr() { + f.log.Trace("Skip tx log not emitted by the oracle contract", "tx", tx.Hash(), "logIndex", i, "targetContract", oracle.Addr(), "actualContract", txLog.Address) + continue + } + if len(txLog.Data) < 20 { + f.log.Trace("Skip tx log with insufficient data (less than 20 bytes)", "tx", tx.Hash(), "logIndex", i, "dataLength", len(txLog.Data)) + continue + } + caller := common.Address(txLog.Data[0:20]) + callData := txLog.Data[20:] + + if caller != ident.Claimant { + f.log.Trace("Skip tx log from irrelevant claimant", "tx", tx.Hash(), "logIndex", i, "targetClaimant", ident.Claimant, "actualClaimant", caller) + continue + } + uuid, inputData, err := oracle.DecodeInputData(callData) + if errors.Is(err, contracts.ErrInvalidAddLeavesCall) { + f.log.Trace("Skip tx log with call data not targeting expected method", "tx", tx.Hash(), "logIndex", i, "err", err) + continue + } else if err != nil { + return nil, err + } + if uuid.Cmp(ident.UUID) != 0 { + f.log.Trace("Skip tx log with irrelevant UUID", "tx", tx.Hash(), "logIndex", i, "targetUUID", ident.UUID, "actualUUID", uuid) + continue + } + inputs = append(inputs, inputData) + } + + return inputs, nil +} + +func NewPreimageFetcher(logger log.Logger, source L1Source) *InputFetcher { + return &InputFetcher{ + log: logger, + source: source, + } +} diff --git a/op-challenger2/game/keccak/fetcher/fetcher_test.go b/op-challenger2/game/keccak/fetcher/fetcher_test.go new file mode 100644 index 000000000000..fd2046a0e13c --- /dev/null +++ b/op-challenger2/game/keccak/fetcher/fetcher_test.go @@ -0,0 +1,505 @@ +package fetcher + +import ( + "context" + "crypto/ecdsa" + "errors" + "fmt" + "math" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +const ( + // Signal to indicate a receipt should be considered missing + MissingReceiptStatus = math.MaxUint64 +) + +var ( + oracleAddr = common.Address{0x99, 0x98} + otherAddr = common.Address{0x12, 0x34} + claimantKey, _ = crypto.GenerateKey() + otherKey, _ = crypto.GenerateKey() + ident = keccakTypes.LargePreimageIdent{ + Claimant: crypto.PubkeyToAddress(claimantKey.PublicKey), + UUID: big.NewInt(888), + } + chainID = big.NewInt(123) + blockHash = common.Hash{0xdd} + input1 = keccakTypes.InputData{ + Input: []byte{0xbb, 0x11}, + Commitments: []common.Hash{{0xcc, 0x11}}, + } + input2 = keccakTypes.InputData{ + Input: []byte{0xbb, 0x22}, + Commitments: []common.Hash{{0xcc, 0x22}}, + } + input3 = keccakTypes.InputData{ + Input: []byte{0xbb, 0x33}, + Commitments: []common.Hash{{0xcc, 0x33}}, + } + input4 = keccakTypes.InputData{ + Input: []byte{0xbb, 0x44}, + Commitments: []common.Hash{{0xcc, 0x44}}, + Finalize: true, + } +) + +func TestFetchLeaves_NoBlocks(t *testing.T) { + fetcher, oracle, _ := setupFetcherTest(t) + oracle.leafBlocks = []uint64{} + leaves, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Empty(t, leaves) +} + +func TestFetchLeaves_ErrorOnUnavailableInputBlocks(t *testing.T) { + fetcher, oracle, _ := setupFetcherTest(t) + mockErr := fmt.Errorf("oops") + oracle.inputDataBlocksError = mockErr + + leaves, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.ErrorContains(t, err, "failed to retrieve leaf block nums") + require.Empty(t, leaves) +} + +func TestFetchLeaves_ErrorOnUnavailableL1Block(t *testing.T) { + blockNum := uint64(7) + fetcher, oracle, _ := setupFetcherTest(t) + oracle.leafBlocks = []uint64{blockNum} + + // No txs means stubL1Source will return an error when we try to fetch the block + leaves, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.ErrorContains(t, err, fmt.Sprintf("failed getting tx for block %v", blockNum)) + require.Empty(t, leaves) +} + +func TestFetchLeaves_SingleTxSingleLog(t *testing.T) { + cases := []struct { + name string + txSender *ecdsa.PrivateKey + txModifier TxModifier + }{ + {"from EOA claimant address", claimantKey, ValidTx}, + {"from contract call", otherKey, WithToAddr(otherAddr)}, + {"from contract creation", otherKey, WithoutToAddr()}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + proposal := oracle.createProposal(input1) + tx := l1Source.createTx(blockNum, tc.txSender, tc.txModifier) + l1Source.createLog(tx, proposal) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) + }) + } +} + +func TestFetchLeaves_SingleTxMultipleLogs(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + proposal1 := oracle.createProposal(input1) + proposal2 := oracle.createProposal(input2) + tx := l1Source.createTx(blockNum, otherKey, WithToAddr(otherAddr)) + l1Source.createLog(tx, proposal1) + l1Source.createLog(tx, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1, input2}, inputs) +} + +func TestFetchLeaves_MultipleBlocksAndLeaves(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + block1 := uint64(7) + block2 := uint64(15) + oracle.leafBlocks = []uint64{block1, block2} + + proposal1 := oracle.createProposal(input1) + proposal2 := oracle.createProposal(input2) + proposal3 := oracle.createProposal(input3) + proposal4 := oracle.createProposal(input4) + block1Tx := l1Source.createTx(block1, claimantKey, ValidTx) + block2TxA := l1Source.createTx(block2, claimantKey, ValidTx) + l1Source.createTx(block2, claimantKey, ValidTx) // Add tx with no logs + block2TxB := l1Source.createTx(block2, otherKey, WithoutToAddr()) + l1Source.createLog(block1Tx, proposal1) + l1Source.createLog(block2TxA, proposal2) + l1Source.createLog(block2TxB, proposal3) + l1Source.createLog(block2TxB, proposal4) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1, input2, input3, input4}, inputs) +} + +func TestFetchLeaves_SkipLogFromWrongContract(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Emit log from an irrelevant contract address + proposal1 := oracle.createProposal(input2) + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + log1 := l1Source.createLog(tx1, proposal1) + log1.Address = otherAddr + // Valid tx + proposal2 := oracle.createProposal(input1) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) +} + +func TestFetchLeaves_SkipProposalWithWrongUUID(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Valid tx but with a different UUID + proposal1 := oracle.createProposal(input2) + proposal1.uuid = big.NewInt(874927294) + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx1, proposal1) + // Valid tx + proposal2 := oracle.createProposal(input1) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) +} + +func TestFetchLeaves_SkipProposalWithWrongClaimant(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Valid tx but with a different claimant + proposal1 := oracle.createProposal(input2) + proposal1.claimantAddr = otherAddr + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx1, proposal1) + // Valid tx + proposal2 := oracle.createProposal(input1) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) +} + +func TestFetchLeaves_SkipInvalidProposal(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Set up proposal decoding to fail + proposal1 := oracle.createProposal(input2) + proposal1.valid = false + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx1, proposal1) + // Valid tx + proposal2 := oracle.createProposal(input1) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) +} + +func TestFetchLeaves_SkipProposalWithInsufficientData(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Log contains insufficient data + // It should hold a 20 byte address followed by the proposal payload + proposal1 := oracle.createProposal(input2) + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + log1 := l1Source.createLog(tx1, proposal1) + log1.Data = proposal1.claimantAddr[:19] + // Valid tx + proposal2 := oracle.createProposal(input1) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) +} + +func TestFetchLeaves_SkipProposalMissingCallData(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Truncate call data from log so that is only contains an address + proposal1 := oracle.createProposal(input2) + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + log1 := l1Source.createLog(tx1, proposal1) + log1.Data = log1.Data[0:20] + // Valid tx + proposal2 := oracle.createProposal(input1) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) +} + +func TestFetchLeaves_SkipTxWithReceiptStatusFail(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Valid proposal, but tx reverted + proposal1 := oracle.createProposal(input2) + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx1, proposal1) + l1Source.rcptStatus[tx1.Hash()] = types.ReceiptStatusFailed + // Valid tx + proposal2 := oracle.createProposal(input1) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) +} + +func TestFetchLeaves_ErrorsOnMissingReceipt(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Valid tx + proposal1 := oracle.createProposal(input1) + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx1, proposal1) + // Valid proposal, but tx receipt is missing + proposal2 := oracle.createProposal(input2) + tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx2, proposal2) + l1Source.rcptStatus[tx2.Hash()] = MissingReceiptStatus + + input, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.ErrorContains(t, err, fmt.Sprintf("failed to retrieve receipt for tx %v", tx2.Hash())) + require.Nil(t, input) +} + +func TestFetchLeaves_ErrorsWhenNoValidLeavesInBlock(t *testing.T) { + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} + + // Irrelevant tx - reverted + proposal1 := oracle.createProposal(input2) + tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createLog(tx1, proposal1) + l1Source.rcptStatus[tx1.Hash()] = types.ReceiptStatusFailed + // Irrelevant tx - no logs are emitted + l1Source.createTx(blockNum, claimantKey, ValidTx) + + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.ErrorIs(t, err, ErrNoLeavesFound) + require.Nil(t, inputs) +} + +func setupFetcherTest(t *testing.T) (*InputFetcher, *stubOracle, *stubL1Source) { + oracle := &stubOracle{ + proposals: make(map[byte]*proposalConfig), + } + l1Source := &stubL1Source{ + txs: make(map[uint64]types.Transactions), + rcptStatus: make(map[common.Hash]uint64), + logs: make(map[common.Hash][]*types.Log), + } + fetcher := NewPreimageFetcher(testlog.Logger(t, log.LevelTrace), l1Source) + return fetcher, oracle, l1Source +} + +type proposalConfig struct { + id byte + claimantAddr common.Address + inputData keccakTypes.InputData + uuid *big.Int + valid bool +} + +type stubOracle struct { + leafBlocks []uint64 + nextProposalId byte + proposals map[byte]*proposalConfig + // Add a field to allow for mocking of errors + inputDataBlocksError error +} + +func (o *stubOracle) Addr() common.Address { + return oracleAddr +} + +func (o *stubOracle) GetInputDataBlocks(_ context.Context, _ rpcblock.Block, _ keccakTypes.LargePreimageIdent) ([]uint64, error) { + if o.inputDataBlocksError != nil { + return nil, o.inputDataBlocksError + } + return o.leafBlocks, nil +} + +func (o *stubOracle) DecodeInputData(data []byte) (*big.Int, keccakTypes.InputData, error) { + if len(data) == 0 { + return nil, keccakTypes.InputData{}, contracts.ErrInvalidAddLeavesCall + } + proposalId := data[0] + proposal, ok := o.proposals[proposalId] + if !ok || !proposal.valid { + return nil, keccakTypes.InputData{}, contracts.ErrInvalidAddLeavesCall + } + + return proposal.uuid, proposal.inputData, nil +} + +type TxModifier func(tx *types.DynamicFeeTx) + +var ValidTx TxModifier = func(_ *types.DynamicFeeTx) { + // no-op +} + +func WithToAddr(addr common.Address) TxModifier { + return func(tx *types.DynamicFeeTx) { + tx.To = &addr + } +} + +func WithoutToAddr() TxModifier { + return func(tx *types.DynamicFeeTx) { + tx.To = nil + } +} + +func (o *stubOracle) createProposal(input keccakTypes.InputData) *proposalConfig { + id := o.nextProposalId + o.nextProposalId++ + + proposal := &proposalConfig{ + id: id, + claimantAddr: ident.Claimant, + inputData: input, + uuid: ident.UUID, + valid: true, + } + o.proposals[id] = proposal + + return proposal +} + +type stubL1Source struct { + nextTxId uint64 + // Map block number to tx + txs map[uint64]types.Transactions + // Map txHash to receipt + rcptStatus map[common.Hash]uint64 + // Map txHash to logs + logs map[common.Hash][]*types.Log +} + +func (s *stubL1Source) ChainID(_ context.Context) (*big.Int, error) { + return chainID, nil +} + +func (s *stubL1Source) BlockByNumber(_ context.Context, number *big.Int) (*types.Block, error) { + txs, ok := s.txs[number.Uint64()] + if !ok { + return nil, errors.New("not found") + } + return (&types.Block{}).WithBody(txs, nil), nil +} + +func (s *stubL1Source) TransactionReceipt(_ context.Context, txHash common.Hash) (*types.Receipt, error) { + rcptStatus, ok := s.rcptStatus[txHash] + if !ok { + rcptStatus = types.ReceiptStatusSuccessful + } else if rcptStatus == MissingReceiptStatus { + return nil, errors.New("not found") + } + + logs := s.logs[txHash] + return &types.Receipt{Status: rcptStatus, Logs: logs}, nil +} + +func (s *stubL1Source) createTx(blockNum uint64, key *ecdsa.PrivateKey, txMod TxModifier) *types.Transaction { + txId := s.nextTxId + s.nextTxId++ + + inner := &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: txId, + To: &oracleAddr, + Value: big.NewInt(0), + GasTipCap: big.NewInt(1), + GasFeeCap: big.NewInt(2), + Gas: 3, + Data: []byte{}, + } + txMod(inner) + tx := types.MustSignNewTx(key, types.LatestSignerForChainID(inner.ChainID), inner) + + // Track tx internally + txSet := s.txs[blockNum] + txSet = append(txSet, tx) + s.txs[blockNum] = txSet + + return tx +} + +func (s *stubL1Source) createLog(tx *types.Transaction, proposal *proposalConfig) *types.Log { + // Concat the claimant address and the proposal id + // These will be split back into address and id in fetcher.extractRelevantLeavesFromTx + data := append(proposal.claimantAddr[:], proposal.id) + + txLog := &types.Log{ + Address: oracleAddr, + Data: data, + Topics: []common.Hash{}, + + // ignored (zeroed): + BlockNumber: 0, + TxHash: common.Hash{}, + TxIndex: 0, + BlockHash: common.Hash{}, + Index: 0, + Removed: false, + } + + // Track tx log + logSet := s.logs[tx.Hash()] + logSet = append(logSet, txLog) + s.logs[tx.Hash()] = logSet + + return txLog +} diff --git a/op-challenger2/game/keccak/matrix/immediateeof_test.go b/op-challenger2/game/keccak/matrix/immediateeof_test.go new file mode 100644 index 000000000000..27d0410d8cae --- /dev/null +++ b/op-challenger2/game/keccak/matrix/immediateeof_test.go @@ -0,0 +1,66 @@ +package matrix + +import ( + "errors" + "fmt" + "io" + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/stretchr/testify/require" +) + +type sameCallEOFReader struct { + idx int + data []byte +} + +// newSameCallEOFReader returns an io.Reader that returns io.EOF in the same call that returns the final byte of data. +// This is valid as per io.Reader: +// An instance of this general case is that a Reader returning +// a non-zero number of bytes at the end of the input stream may +// return either err == EOF or err == nil. The next Read should +// return 0, EOF. +func newSameCallEOFReader(data []byte) *sameCallEOFReader { + return &sameCallEOFReader{data: data} +} + +func (i *sameCallEOFReader) Read(out []byte) (int, error) { + end := min(len(i.data), i.idx+len(out)) + n := copy(out, i.data[i.idx:end]) + i.idx += n + if i.idx >= len(i.data) { + return n, io.EOF + } + return n, nil +} + +func TestImmediateEofReader(t *testing.T) { + rng := rand.New(rand.NewSource(223)) + data := testutils.RandomData(rng, 100) + + batchSizes := []int{1, 2, 3, 5, 10, 33, 99, 100, 101} + for _, size := range batchSizes { + size := size + t.Run(fmt.Sprintf("Size-%v", size), func(t *testing.T) { + + reader := &sameCallEOFReader{data: data} + out := make([]byte, size) + actual := make([]byte, 0, len(data)) + for { + n, err := reader.Read(out) + actual = append(actual, out[:n]...) + if errors.Is(err, io.EOF) { + break + } else { + require.NoError(t, err) + } + } + require.Equal(t, data, actual) + n, err := reader.Read(out) + require.Zero(t, n) + require.ErrorIs(t, err, io.EOF) + }) + } +} diff --git a/op-challenger2/game/keccak/matrix/keccak.go b/op-challenger2/game/keccak/matrix/keccak.go new file mode 100644 index 000000000000..d1243cacf6d6 --- /dev/null +++ b/op-challenger2/game/keccak/matrix/keccak.go @@ -0,0 +1,674 @@ +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This code is taken from the standard golang.org/x/crypto module + +package matrix + +import ( + "encoding/binary" + "math/bits" +) + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func newLegacyKeccak256() *state { + return &state{rate: 136, outputLen: 32, dsbyte: 0x01} +} + +var ( + xorIn = xorInGeneric + copyOut = copyOutGeneric +) + +// xorInGeneric xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorInGeneric(d *state, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOutGeneric copies uint64s to a byte buffer. +func copyOutGeneric(d *state, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + buf []byte // points into storage + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.buf = d.storage.asBytes()[:0] +} + +func (d *state) clone() *state { + ret := *d + if ret.state == spongeAbsorbing { + ret.buf = ret.storage.asBytes()[:len(ret.buf)] + } else { + ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] + } + + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf) + d.buf = d.storage.asBytes()[:0] + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + keccakF1600(&d.a) + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute(dsbyte byte) { + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.buf = append(d.buf, dsbyte) + zerosStart := len(d.buf) + d.buf = d.storage.asBytes()[:d.rate] + for i := zerosStart; i < d.rate; i++ { + d.buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) +} + +// Write absorbs more data into the hash's state. It panics if any +// output has already been read. +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + written = len(p) + + for len(p) > 0 { + if len(d.buf) == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - len(d.buf) + if todo > len(p) { + todo = len(p) + } + d.buf = append(d.buf, p[:todo]...) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if len(d.buf) == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.buf) + d.buf = d.buf[n:] + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if len(d.buf) == 0 { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. It panics if any output has already been read. +func (d *state) Sum(in []byte) []byte { + if d.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation + _, _ = dup.Read(hash) + return append(in, hash...) +} + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[12] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[18] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[24] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[16] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[22] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[3] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[1] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[7] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[19] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[11] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[23] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[4] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[2] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[8] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[14] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[7] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[23] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[14] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[11] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[2] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[18] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[6] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[22] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[4] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[1] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[8] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[24] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[12] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[3] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[19] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[22] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[8] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[19] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[1] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[12] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[23] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[16] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[2] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[24] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[6] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[3] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[14] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[7] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[18] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[4] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[2] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[3] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[4] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[6] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[7] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[8] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[11] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[12] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[14] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[16] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[18] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[19] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[22] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[23] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[24] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/op-challenger2/game/keccak/matrix/matrix.go b/op-challenger2/game/keccak/matrix/matrix.go new file mode 100644 index 000000000000..1e5429ea6e5a --- /dev/null +++ b/op-challenger2/game/keccak/matrix/matrix.go @@ -0,0 +1,241 @@ +package matrix + +import ( + "errors" + "fmt" + "io" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/merkle" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// StateMatrix implements a stateful keccak sponge with the ability to create state commitments after each permutation +type StateMatrix struct { + s *state + //prestateMatrix is the state matrix snapshot after processing prestateLeaf but before processing poststateLeaf + prestateMatrix types.StateSnapshot + // prestateLeaf is the last prestate leaf. + // Used to retrieve the prestate to squeeze. + prestateLeaf types.Leaf + // poststateLeaf is the last poststate leaf. + // Used to retrieve the poststate to squeeze. + poststateLeaf types.Leaf + // merkleTree is the internal [merkle.BinaryMerkleTree] used to generate proofs + merkleTree *merkle.BinaryMerkleTree +} + +var ( + ErrInvalidMaxLen = errors.New("invalid max length to absorb") + ErrIncorrectCommitmentCount = errors.New("incorrect number of commitments for input length") + ErrValid = errors.New("state commitments are valid") +) + +// Challenge creates a [types.Challenge] to invalidate the provided preimage data if possible. +// [ErrValid] is returned if the provided inputs are valid and no challenge can be created. +func Challenge(data io.Reader, commitments []common.Hash) (types.Challenge, error) { + s := NewStateMatrix() + lastValidState := s.StateSnapshot() + var lastValidLeaf types.Leaf + var firstInvalidLeaf types.Leaf + for i := 0; ; i++ { + if i >= len(commitments) { + // There should have been more commitments. + // The contracts should prevent this so it can't be challenged, return an error + return types.Challenge{}, ErrIncorrectCommitmentCount + } + claimedCommitment := commitments[i] + _, err := s.absorbNextLeafInput(data, func() common.Hash { return claimedCommitment }) + isEOF := errors.Is(err, io.EOF) + if err != nil && !isEOF { + return types.Challenge{}, fmt.Errorf("failed to verify inputs: %w", err) + } + validCommitment := s.StateCommitment() + + if firstInvalidLeaf == (types.Leaf{}) { + if validCommitment != claimedCommitment { + lastValidLeaf = s.prestateLeaf + firstInvalidLeaf = s.poststateLeaf + } else { + lastValidState = s.StateSnapshot() + } + } + if isEOF { + if i < len(commitments)-1 { + // We got too many commitments + // The contracts should prevent this so it can't be challenged, return an error + return types.Challenge{}, ErrIncorrectCommitmentCount + } + break + } + } + if firstInvalidLeaf != (types.Leaf{}) { + var prestateProof merkle.Proof + if lastValidLeaf != (types.Leaf{}) { + prestateProof = s.merkleTree.ProofAtIndex(lastValidLeaf.Index) + } + poststateProof := s.merkleTree.ProofAtIndex(firstInvalidLeaf.Index) + return types.Challenge{ + StateMatrix: lastValidState, + Prestate: lastValidLeaf, + PrestateProof: prestateProof, + Poststate: firstInvalidLeaf, + PoststateProof: poststateProof, + }, nil + } + return types.Challenge{}, ErrValid +} + +// NewStateMatrix creates a new state matrix initialized with the initial, zero keccak block. +func NewStateMatrix() *StateMatrix { + return &StateMatrix{ + s: newLegacyKeccak256(), + merkleTree: merkle.NewBinaryMerkleTree(), + } +} + +// StateCommitment returns the state commitment for the current state matrix. +// Additional data may be absorbed after calling this method. +func (d *StateMatrix) StateCommitment() common.Hash { + return crypto.Keccak256Hash(d.StateSnapshot().Pack()) +} + +func (d *StateMatrix) StateSnapshot() types.StateSnapshot { + var snap types.StateSnapshot + copy(snap[:], d.s.a[:]) + return snap +} + +// newLeafWithPadding creates a new [Leaf] from inputs, padding the input to the [BlockSize]. +func (d *StateMatrix) newLeafWithPadding(input []byte, index uint64, commitment common.Hash, final bool) types.Leaf { + var paddedInput [types.BlockSize]byte + copy(paddedInput[:], input) + + if final { + pad(input, &paddedInput, d.s.dsbyte) + } + return types.Leaf{ + Input: paddedInput, + Index: index, + StateCommitment: commitment, + } +} + +func pad(input []byte, paddedInput *[types.BlockSize]byte, dsbyte byte) { + // Pad with this instance's domain-separator bits. We know that there's + // at least one more byte of space in paddedInput because, if it were full, + // this wouldn't be the last block and the padding would be in the next block. + // dsbyte also contains the first one bit for the padding. See the comment in the state struct. + paddedInput[len(input)] = dsbyte + // The remaining bytes are already zeros since paddedInput is a new array. + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + paddedInput[types.BlockSize-1] ^= 0x80 +} + +func (d *StateMatrix) AbsorbUpTo(in io.Reader, maxLen int) (types.InputData, error) { + if maxLen < types.BlockSize || maxLen%types.BlockSize != 0 { + return types.InputData{}, ErrInvalidMaxLen + } + input := make([]byte, 0, maxLen) + commitments := make([]common.Hash, 0, maxLen/types.BlockSize) + for len(input)+types.BlockSize <= maxLen { + readData, err := d.absorbNextLeafInput(in, d.StateCommitment) + if errors.Is(err, io.EOF) { + input = append(input, readData...) + commitments = append(commitments, d.StateCommitment()) + return types.InputData{ + Input: input, + Commitments: commitments, + Finalize: true, + }, io.EOF + } else if err != nil { + return types.InputData{}, err + } + input = append(input, readData...) + commitments = append(commitments, d.StateCommitment()) + } + + return types.InputData{ + Input: input, + Commitments: commitments, + Finalize: false, + }, nil +} + +func (d *StateMatrix) PrestateMatrix() types.StateSnapshot { + return d.prestateMatrix +} + +// PrestateWithProof returns the prestate leaf with its merkle proof. +func (d *StateMatrix) PrestateWithProof() (types.Leaf, merkle.Proof) { + proof := d.merkleTree.ProofAtIndex(d.prestateLeaf.Index) + return d.prestateLeaf, proof +} + +// PoststateWithProof returns the poststate leaf with its merkle proof. +func (d *StateMatrix) PoststateWithProof() (types.Leaf, merkle.Proof) { + proof := d.merkleTree.ProofAtIndex(d.poststateLeaf.Index) + return d.poststateLeaf, proof +} + +// absorbNextLeafInput reads up to [BlockSize] bytes from in and absorbs them into the state matrix. +// If EOF is reached while reading, the state matrix is finalized and [io.EOF] is returned. +func (d *StateMatrix) absorbNextLeafInput(in io.Reader, stateCommitment func() common.Hash) ([]byte, error) { + data := make([]byte, types.BlockSize) + read := 0 + final := false + for read < types.BlockSize { + n, err := in.Read(data[read:]) + if errors.Is(err, io.EOF) { + read += n + final = true + break + } else if err != nil { + return nil, err + } + read += n + } + input := data[:read] + // Don't add the padding if we read a full block of input data, even if we reached EOF. + // Just absorb the full block and return so the caller can capture the state commitment after the block + // The next call will read no data from the Reader (already at EOF) and so add the final padding as an + // additional block. We can then return EOF to indicate there are no further blocks. + final = final && len(input) < types.BlockSize + d.prestateMatrix = d.StateSnapshot() + d.absorbLeafInput(input, final) + commitment := stateCommitment() + if d.poststateLeaf == (types.Leaf{}) { + d.prestateLeaf = types.Leaf{} + d.poststateLeaf = d.newLeafWithPadding(input, 0, commitment, final) + } else { + d.prestateLeaf = d.poststateLeaf + d.poststateLeaf = d.newLeafWithPadding(input, d.prestateLeaf.Index+1, commitment, final) + } + d.merkleTree.AddLeaf(d.poststateLeaf.Hash()) + if final { + return input, io.EOF + } + return input, nil +} + +// absorbLeafInput absorbs the specified data into the keccak sponge. +// If final is true, the data is padded to the required length, otherwise it must be exactly [types.BlockSize] bytes. +func (d *StateMatrix) absorbLeafInput(data []byte, final bool) { + if !final && len(data) != types.BlockSize { + panic("sha3: Incorrect leaf data length") + } + _, _ = d.s.Write(data[:]) + if final { + d.s.padAndPermute(d.s.dsbyte) + } +} + +// Hash finalizes the keccak permutation and returns the final hash. +// No further leaves can be absorbed after this is called +func (d *StateMatrix) Hash() (h common.Hash) { + _, _ = d.s.Read(h[:]) + return h +} diff --git a/op-challenger2/game/keccak/matrix/matrix_test.go b/op-challenger2/game/keccak/matrix/matrix_test.go new file mode 100644 index 000000000000..b4c7ad54760f --- /dev/null +++ b/op-challenger2/game/keccak/matrix/matrix_test.go @@ -0,0 +1,436 @@ +package matrix + +import ( + "bytes" + _ "embed" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/merkle" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +//go:embed testdata/commitments.json +var refTests []byte + +func TestStateCommitment(t *testing.T) { + tests := []struct { + expectedPacked string + matrix []uint64 // Automatically padded with 0s to the required length + }{ + { + expectedPacked: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, + { + expectedPacked: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f0000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000013000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000150000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000001700000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000019", + matrix: []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}, + }, + { + expectedPacked: "000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + matrix: []uint64{18446744073709551615}, + }, + } + for _, test := range tests { + test := test + t.Run("", func(t *testing.T) { + state := NewStateMatrix() + copy(state.s.a[:], test.matrix) + expected := crypto.Keccak256Hash(common.FromHex(test.expectedPacked)) + actual := state.StateCommitment() + require.Equal(t, test.expectedPacked, common.Bytes2Hex(state.StateSnapshot().Pack())) + require.Equal(t, expected, actual) + }) + } +} + +type testData struct { + Input []byte `json:"input"` + Commitments []common.Hash `json:"commitments"` + PrestateLeaf []byte `json:"prestateLeaf"` + PoststateLeaf []byte `json:"poststateLeaf"` +} + +func TestAbsorbNextLeaf_ReferenceCommitments(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("Ref-%v-%v", i, len(test.Input)), func(t *testing.T) { + prevLeaf := types.Leaf{} + s := NewStateMatrix() + commitments := []common.Hash{s.StateCommitment()} + in := bytes.NewReader(test.Input) + for { + readData, err := s.absorbNextLeafInput(in, s.StateCommitment) + isEOF := errors.Is(err, io.EOF) + if !isEOF { + // Shouldn't get any error except EOF + require.NoError(t, err) + } + prestate, _ := s.PrestateWithProof() + poststate, _ := s.PoststateWithProof() + require.Equal(t, prevLeaf, prestate, "Prestate should be the previous post state") + require.Equal(t, poststate.Input[:len(readData)], readData, "Post state should have returned input data") + prevLeaf = poststate + commitments = append(commitments, s.StateCommitment()) + if isEOF { + break + } + } + actual := s.Hash() + expected := crypto.Keccak256Hash(test.Input) + require.Equal(t, expected, actual) + require.Equal(t, test.Commitments, commitments) + + prestate, _ := s.PrestateWithProof() + var expectedPre [types.BlockSize]byte + copy(expectedPre[:], test.PrestateLeaf) + require.Equal(t, expectedPre, prestate.Input, "Final prestate") + poststate, _ := s.PoststateWithProof() + var expectedPost [types.BlockSize]byte + copy(expectedPost[:], test.PoststateLeaf) + require.Equal(t, expectedPost, poststate.Input, "Final poststate") + }) + } +} + +func TestAbsorbUpTo_ReferenceCommitments(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("Ref-%v", i), func(t *testing.T) { + s := NewStateMatrix() + commitments := []common.Hash{s.StateCommitment()} + in := bytes.NewReader(test.Input) + for { + input, err := s.AbsorbUpTo(in, types.BlockSize*3) + if errors.Is(err, io.EOF) { + commitments = append(commitments, input.Commitments...) + break + } + // Shouldn't get any error except EOF + require.NoError(t, err) + commitments = append(commitments, input.Commitments...) + } + actual := s.Hash() + expected := crypto.Keccak256Hash(test.Input) + require.Equal(t, expected, actual) + require.Equal(t, test.Commitments, commitments) + }) + } +} + +func TestAbsorbUpTo_ReferenceCommitments_SameCallEOF(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("Ref-%v", i), func(t *testing.T) { + s := NewStateMatrix() + commitments := []common.Hash{s.StateCommitment()} + in := newSameCallEOFReader(test.Input) + for { + input, err := s.AbsorbUpTo(in, types.BlockSize*3) + if errors.Is(err, io.EOF) { + commitments = append(commitments, input.Commitments...) + break + } + // Shouldn't get any error except EOF + require.NoError(t, err) + commitments = append(commitments, input.Commitments...) + } + actual := s.Hash() + expected := crypto.Keccak256Hash(test.Input) + require.Equal(t, expected, actual) + require.Equal(t, test.Commitments, commitments) + }) + } +} + +func TestAbsorbUpTo_LimitsDataRead(t *testing.T) { + s := NewStateMatrix() + data := testutils.RandomData(rand.New(rand.NewSource(2424)), types.BlockSize*6+20) + in := bytes.NewReader(data) + // Should fully read the first four leaves worth + inputData, err := s.AbsorbUpTo(in, types.BlockSize*4) + require.NoError(t, err) + require.Equal(t, data[0:types.BlockSize*4], inputData.Input) + require.Len(t, inputData.Commitments, 4) + require.False(t, inputData.Finalize) + + // Should read the remaining data and return EOF + inputData, err = s.AbsorbUpTo(in, types.BlockSize*10) + require.ErrorIs(t, err, io.EOF) + require.Equal(t, data[types.BlockSize*4:], inputData.Input) + require.Len(t, inputData.Commitments, 3, "2 full leaves plus the final partial leaf") + require.True(t, inputData.Finalize) +} + +func TestAbsorbUpTo_InvalidLengths(t *testing.T) { + s := NewStateMatrix() + lengths := []int{-types.BlockSize, -1, 0, 1, types.BlockSize - 1, types.BlockSize + 1, 2*types.BlockSize + 1} + for _, length := range lengths { + _, err := s.AbsorbUpTo(bytes.NewReader(nil), length) + require.ErrorIsf(t, err, ErrInvalidMaxLen, "Should get invalid length for length %v", length) + } +} + +func TestMatrix_absorbNextLeaf(t *testing.T) { + fullLeaf := make([]byte, types.BlockSize) + for i := 0; i < types.BlockSize; i++ { + fullLeaf[i] = byte(i) + } + tests := []struct { + name string + input []byte + leafInputs [][]byte + errs []error + }{ + { + name: "empty", + input: []byte{}, + leafInputs: [][]byte{{}}, + errs: []error{io.EOF}, + }, + { + name: "single", + input: fullLeaf, + leafInputs: [][]byte{fullLeaf}, + errs: []error{io.EOF}, + }, + { + name: "single-overflow", + input: append(fullLeaf, byte(9)), + leafInputs: [][]byte{fullLeaf, {byte(9)}}, + errs: []error{nil, io.EOF}, + }, + { + name: "double", + input: append(fullLeaf, fullLeaf...), + leafInputs: [][]byte{fullLeaf, fullLeaf}, + errs: []error{nil, io.EOF}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + state := NewStateMatrix() + in := bytes.NewReader(test.input) + for i, leaf := range test.leafInputs { + buf, err := state.absorbNextLeafInput(in, state.StateCommitment) + if errors.Is(err, io.EOF) { + require.Equal(t, test.errs[i], err) + break + } + require.NoError(t, err) + require.Equal(t, leaf, buf) + } + }) + } +} + +func TestVerifyPreimage_ReferenceCommitments(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("Ref-%v", i), func(t *testing.T) { + // Exclude the empty state commitment + challenge, err := Challenge(bytes.NewReader(test.Input), test.Commitments[1:]) + require.ErrorIs(t, err, ErrValid) + require.Equal(t, types.Challenge{}, challenge) + }) + } +} + +func TestVerifyPreimage_ReferenceCommitments_SameCallEOF(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("Ref-%v", i), func(t *testing.T) { + // Exclude the empty state commitment + challenge, err := Challenge(newSameCallEOFReader(test.Input), test.Commitments[1:]) + require.ErrorIs(t, err, ErrValid) + require.Equal(t, types.Challenge{}, challenge) + }) + } +} + +func TestVerifyPreimage(t *testing.T) { + preimage := testutils.RandomData(rand.New(rand.NewSource(2323)), 1024) + validCommitments := func() []common.Hash { + valid, err := NewStateMatrix().AbsorbUpTo(bytes.NewReader(preimage), 1000*types.BlockSize) + require.ErrorIs(t, err, io.EOF, "Should read all preimage data") + return valid.Commitments + } + leafData := func(idx int) (out [types.BlockSize]byte) { + end := min((idx+1)*types.BlockSize, len(preimage)) + input := preimage[idx*types.BlockSize : end] + copy(out[:], input) + if len(input) < types.BlockSize { + pad(input, &out, newLegacyKeccak256().dsbyte) + } + return + } + // merkleTree creates the final merkle tree after including all leaves. + merkleTree := func(commitments []common.Hash) *merkle.BinaryMerkleTree { + m := merkle.NewBinaryMerkleTree() + for i, commitment := range commitments { + leaf := types.Leaf{ + Input: leafData(i), + Index: uint64(i), + StateCommitment: commitment, + } + m.AddLeaf(leaf.Hash()) + } + return m + } + + challengeLeaf := func(commitments []common.Hash, invalidIdx int) types.Challenge { + invalidLeafStart := invalidIdx * types.BlockSize + s := NewStateMatrix() + _, err := s.AbsorbUpTo(bytes.NewReader(preimage), invalidLeafStart) + require.NoError(t, err) + + fullMerkle := merkleTree(commitments) + prestateLeaf := leafData(invalidIdx - 1) + poststateLeaf := leafData(invalidIdx) + return types.Challenge{ + StateMatrix: s.StateSnapshot(), + Prestate: types.Leaf{ + Input: prestateLeaf, + Index: uint64(invalidIdx - 1), + StateCommitment: commitments[invalidIdx-1], + }, + PrestateProof: fullMerkle.ProofAtIndex(uint64(invalidIdx - 1)), + + Poststate: types.Leaf{ + Input: poststateLeaf, + Index: uint64(invalidIdx), + StateCommitment: commitments[invalidIdx], + }, + PoststateProof: fullMerkle.ProofAtIndex(uint64(invalidIdx)), + } + } + + type testInputs struct { + name string + commitments func() []common.Hash + expected types.Challenge + expectedErr error + } + + poststateLeaf := leafData(0) + tests := []testInputs{ + { + name: "Valid", + commitments: validCommitments, + expectedErr: ErrValid, + }, + func() testInputs { + incorrectFirstCommitment := validCommitments() + incorrectFirstCommitment[0] = common.Hash{0xaa} + return testInputs{ + name: "IncorrectFirstLeaf", + commitments: func() []common.Hash { + return incorrectFirstCommitment + }, + expected: types.Challenge{ + StateMatrix: NewStateMatrix().StateSnapshot(), + Prestate: types.Leaf{}, + Poststate: types.Leaf{ + Input: poststateLeaf, + Index: 0, + StateCommitment: common.Hash{0xaa}, + }, + PoststateProof: merkleTree(incorrectFirstCommitment).ProofAtIndex(0), + }, + } + }(), + } + + for i := 1; i < len(preimage)/types.BlockSize; i++ { + commitments := validCommitments() + commitments[i] = common.Hash{0xaa} + tests = append(tests, testInputs{ + name: fmt.Sprintf("Incorrect-%v", i), + commitments: func() []common.Hash { + return commitments + }, + expected: challengeLeaf(commitments, i), + }) + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + challenge, err := Challenge(bytes.NewReader(preimage), test.commitments()) + require.ErrorIs(t, err, test.expectedErr) + require.Equal(t, test.expected.StateMatrix, challenge.StateMatrix, "Correct state matrix") + require.Equal(t, test.expected.Prestate, challenge.Prestate, "Correct prestate") + if test.expected.Prestate != (types.Leaf{}) { + require.Equal(t, test.expected.Prestate.StateCommitment, crypto.Keccak256Hash(challenge.StateMatrix.Pack()), "Prestate matches leaf commitment") + } + require.Equal(t, test.expected.PrestateProof, challenge.PrestateProof, "Correct prestate proof") + require.Equal(t, test.expected.Poststate, challenge.Poststate, "Correct poststate") + require.Equal(t, test.expected.PoststateProof, challenge.PoststateProof, "Correct poststate proof") + require.Equal(t, test.expected, challenge, "Challenge correct overall") + }) + } +} + +func TestVerifyPreimage_DataMultipleOfBlockSize(t *testing.T) { + preimage := testutils.RandomData(rand.New(rand.NewSource(2323)), 5*types.BlockSize) + valid, err := NewStateMatrix().AbsorbUpTo(bytes.NewReader(preimage), 1000*types.BlockSize) + require.ErrorIs(t, err, io.EOF, "Should read all preimage data") + + _, err = Challenge(bytes.NewReader(preimage), valid.Commitments) + require.ErrorIs(t, err, ErrValid) +} + +func TestVerifyPreimage_TooManyCommitments(t *testing.T) { + data := []byte{1} + valid, err := NewStateMatrix().AbsorbUpTo(bytes.NewReader(data[:]), 10*types.BlockSize) + require.ErrorIs(t, err, io.EOF) + commitments := append(valid.Commitments, common.Hash{0xaa}) + _, err = Challenge(bytes.NewReader(data), commitments) + require.ErrorIs(t, err, ErrIncorrectCommitmentCount) +} + +func TestVerifyPreimage_TooFewCommitments(t *testing.T) { + data := [types.BlockSize * 3]byte{} + valid, err := NewStateMatrix().AbsorbUpTo(bytes.NewReader(data[:]), 10*types.BlockSize) + require.ErrorIs(t, err, io.EOF) + commitments := valid.Commitments[:len(valid.Commitments)-1] + _, err = Challenge(bytes.NewReader(data[:]), commitments) + require.ErrorIs(t, err, ErrIncorrectCommitmentCount) +} + +func FuzzKeccak(f *testing.F) { + f.Fuzz(func(t *testing.T, number, time uint64, data []byte) { + s := NewStateMatrix() + for i := 0; i < len(data); i += types.BlockSize { + end := min(i+types.BlockSize, len(data)) + s.absorbLeafInput(data[i:end], end == len(data)) + } + actual := s.Hash() + expected := crypto.Keccak256Hash(data) + require.Equal(t, expected, actual) + }) +} diff --git a/op-challenger2/game/keccak/matrix/testdata/commitments.json b/op-challenger2/game/keccak/matrix/testdata/commitments.json new file mode 100644 index 000000000000..c94b29261495 --- /dev/null +++ b/op-challenger2/game/keccak/matrix/testdata/commitments.json @@ -0,0 +1,1093 @@ +[ + { + "input": "kYbKyRm02RR58P+t8WPE/UNL4zmr4XIe3qfMfFwRZVWlpzgDSmAXGNhqCGJZivDoNDRM1zrLaPAk5Ol8dAyi3shQgLHaBur7TLczzwxS9b8rU7WKPhTMsK+zblk3ps19yKixxPMjzMA5L70JOy0ng7/ZoJJhMuHC7TD1SYL5LEA=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x87d8548924b20b95fe1ed923236c44d2f98b8dc7a3c8c378e4193cc2745b1e8d" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "kYbKyRm02RR58P+t8WPE/UNL4zmr4XIe3qfMfFwRZVWlpzgDSmAXGNhqCGJZivDoNDRM1zrLaPAk5Ol8dAyi3shQgLHaBur7TLczzwxS9b8rU7WKPhTMsK+zblk3ps19yKixxPMjzMA5L70JOy0ng7/ZoJJhMuHC7TD1SYL5LEABAAAAAAAAgA==" + }, + { + "input": null, + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xb87c3be3d5bf2230e08186014204ab2d4e5eb457ec9e95fd4a703f9f97b8c49a" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "d2w7VVWNxlrhpX8iaT9gU3dqU/LStT7JDOxkPmKDpdExTm23fLcfou2UP+1koHU2XOlRU3gyde/InQA2axIOqmaWn7oU+Zyt8yChgn0fbY6icCcf3ILIKneVRxQQYef6lX7gloDl65pA85pQYZxSfAOIfqrMsAkWoebxmJsoTLSsCHSGrdlE8Cy9hzyNjx2MwnjwTc/GIrX+cpbEfnRXHTQSYrwHjILe57MaJ5EuVA==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x74df6384655843a30a8064467ece134524f68d18ca2e2290e0cd6f1c159d901b", + "0xb0729c23d40d7d05656a9c0af18caf4234b2d51f7b11f8ad091fb6c6c5f41788" + ], + "prestateLeaf": "d2w7VVWNxlrhpX8iaT9gU3dqU/LStT7JDOxkPmKDpdExTm23fLcfou2UP+1koHU2XOlRU3gyde/InQA2axIOqmaWn7oU+Zyt8yChgn0fbY6icCcf3ILIKneVRxQQYef6lX7gloDl65pA85pQYZxSfAOIfqrMsAkWoebxmJsoTLSsCHSGrdlE8A==", + "poststateLeaf": "LL2HPI2PHYzCePBNz8Yitf5ylsR+dFcdNBJivAeMgt7nsxonkS5UAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "QbjuDum/N3hGQLgqXL+MCiPEUrt4bF+xejAngdRK3D8IOKnZfk6TaiGMdsG8vg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xafaa5d998cf20052dea34066e466691f3a59af8a6143b7ff5626f87cb76f820b" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "QbjuDum/N3hGQLgqXL+MCiPEUrt4bF+xejAngdRK3D8IOKnZfk6TaiGMdsG8vgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "PaMoGKkQH3/NFs0YYsAG2u80tRrxq+flLSff0yCFhe5akRB8t03nLlhPgofL80CDBxC0Yc5hGj7bxMVgg4mfjLQ8tKjfxYK99F+IO9cq5cqxHS/QMSQ97KgyNUVb89PB2JwdDuqbCe+RnioFR1MpKyroSJpfmFPt+icQ/OZs6bVKqCw2TnALoheU/9r8MMDWW9NdHzf72ZZJC8UPa86dhESNMnL5VYv27wZibh8FaNw5oAFmEg2C8NBn2RsIkn9yEBD7M1YVA/7QDJghK7iVYhy5x7Hhd+VFikbBmgbWJKG1TQuNc5T1+iYS90GOvOHBwFbJB4/C4b5gUAaJcc7Y4V3/nUPhVMGHj+PA5emMS3J/9Qt43RzJa0/fn0LtCH4mvx73Ll6a91iINQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xccd955f751642425fd9b3bf7785da8c9f6e9d55027d1048ade2b9f0a3071860b", + "0xf77a4c6e252daf3f67bbe40f3275aa854418b0e0afb389bfc4425899898b1953", + "0xf3402153d5503ec0a32cff2b3590764fb284d15f77a6688a6f5d0fc579c7f64a" + ], + "prestateLeaf": "F5T/2vwwwNZb010fN/vZlkkLxQ9rzp2ERI0ycvlVi/bvBmJuHwVo3DmgAWYSDYLw0GfZGwiSf3IQEPszVhUD/tAMmCEruJViHLnHseF35UWKRsGaBtYkobVNC41zlPX6JhL3QY684cHAVskHj8LhvmBQBolxztjhXf+dQ+FUwYeP48Dl6YxLcg==", + "poststateLeaf": "f/ULeN0cyWtP359C7Qh+Jr8e9y5emvdYiDUBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "vKG+6yYjKSdPcwd7e0iBfiwk7dek86+aVEoSgpkf9IdxiukoQFjZsx4XiOHcwv4=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xc59f075e03c34088cfe44693f1260a94c0ebb6075b07c00875d0c2c072a87708" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "vKG+6yYjKSdPcwd7e0iBfiwk7dek86+aVEoSgpkf9IdxiukoQFjZsx4XiOHcwv4BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "8qf1VUYedwv+s6Swd3mfEOvga7Hq/DGOXlV/zZucPT1IdXnG119li9Q+ELxWLSMXD4T4TKoeQnyOXr0t7sNLguKzca1Ra5+iZKCWsDQD77wGJgjh+rVw93NupUjHOtRYyixRVBHe/NrsQIy248NZc5B7fc7XcND3wFDk6gsqiXWmoSdu7ak6n23IruOGlZ5K1ZhlXh5ax1Z3YDfdJQSFbeGcQg0RVg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x73aa7c55e21e7d1da31e759983a2d81d88ad9306b45e1448c8cc7fbbd8f48e57", + "0x640dd808bb3264f28233d889b062c26fe80cf4477946dc0162cfb3033aa92cee" + ], + "prestateLeaf": "8qf1VUYedwv+s6Swd3mfEOvga7Hq/DGOXlV/zZucPT1IdXnG119li9Q+ELxWLSMXD4T4TKoeQnyOXr0t7sNLguKzca1Ra5+iZKCWsDQD77wGJgjh+rVw93NupUjHOtRYyixRVBHe/NrsQIy248NZc5B7fc7XcND3wFDk6gsqiXWmoSdu7ak6nw==", + "poststateLeaf": "bciu44aVnkrVmGVeHlrHVndgN90lBIVt4ZxCDRFWAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "3MZjvdJnTVg343pKZjz1OVl1yY8e+Tg8tKM4lPZHys+s0w4R8f6n1Iz7k00DCF3GLQDk96PaNBlGoLZBYjp5EXqXwj/o2q0gYz9wZXxDP0Lkkqo24PRExtAtIPUmVMKOtjpsQ9hnZtAap9fI1CYcokU0ub+HwsB6aZcekWEJHCbfiQKt2eojkVRs/6TGWfyhSo1CB5jZh42btX9NcPY0+NvfcVk+pM34GGgrH//bMDx952t6MxwgN3CevaJMybeZAKzikCvAZU/xj9mPD1WY5tW+YKN1/YoRRba6mSDKkAcYv3PZXk/mfLISvXD1wlIJVjCwffN0d7nhXzha7tXpdkkx4r9/TdwaXMkWFuEBN1Xd0yPRWTELFos=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xf483f22d3ab0f7ed87d97b62150f67e41ea771c0c3cdb6187d041756d3e93aaf", + "0x86a10716ffd3208dc77e6ccc5b7a614c9e0543db2058dec6e3878605237a63d2", + "0x9ae0878d45796e55b8b866a1c91672b921aee33964af6351f8221c996058e03b" + ], + "prestateLeaf": "VGz/pMZZ/KFKjUIHmNmHjZu1f01w9jT4299xWT6kzfgYaCsf/9swPH3na3ozHCA3cJ69okzJt5kArOKQK8BlT/GP2Y8PVZjm1b5go3X9ihFFtrqZIMqQBxi/c9leT+Z8shK9cPXCUglWMLB983R3ueFfOFru1el2STHiv39N3BpcyRYW4QE3VQ==", + "poststateLeaf": "3dMj0VkxCxaLAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "3EMdZkAwjohNpPLE/zR6nAXNkEk4DP0eNuXjKgQ0hDtQ2mdnsbCCwJ3Lih8qsVTyGRNc/xfZHc/ChlntAvTunU92p0JJvP7TMnyAZUPmmqGWtvk1vne2KbDXV0sD/sfhMG5cW8vHt8XGq7S/2GsljPjUjZyHV2Gg0s0G/r4W/FPrMw/GNNKKP9GQXfSDnqoUYRdJEtnz8rWi1w==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x5ef96af187147cc0ab594b8989269a10417a1aaa8dc382cf82ca5febba03d2da", + "0xef1e1ebbd234ac51de0fc268f182b1b5b9d27f98d5052d59aed0fa8e0bcb6b5a" + ], + "prestateLeaf": "3EMdZkAwjohNpPLE/zR6nAXNkEk4DP0eNuXjKgQ0hDtQ2mdnsbCCwJ3Lih8qsVTyGRNc/xfZHc/ChlntAvTunU92p0JJvP7TMnyAZUPmmqGWtvk1vne2KbDXV0sD/sfhMG5cW8vHt8XGq7S/2GsljPjUjZyHV2Gg0s0G/r4W/FPrMw/GNNKKPw==", + "poststateLeaf": "0ZBd9IOeqhRhF0kS2fPytaLXAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "JU9To00l+wzr7e2q+IGxvyanpdY8nI/LPGy9YJye2BlWkUPvDyXBSWwKzNML9rgCtNwvfMp49Yv0dkEjuOsijZaOi1OVB+uTFrw2NHaoK6t94riJLLyGFo3ObYsTFxXVYqdRUv3y8dqOyrJ2bA9rXmnExDFvLSkqE7grxQ6rW3MuiMGaLsjObw7L+yOey2P5K8GSnjgnvSl1PSFdTvcW0XwNiUmxF9mj/CLl0VLj0+tcp7PTOL+rGda+ERID35cNSxAqiy37r1RD0Aku5e3A9KlRW6XY8HtitjgbUgkyncd/+i49XLMOy6Z7+VRHW5ySOMQzBM4aZF0494wo8+hnYkyOQF4uzQdqp1nE5fAmpwG6ZBS1Il2d1IulXjjdcY+wTCBzWQZoRnxxYbU=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xc2244d44050cd1bf8c48ef1fd151118e24d0f4d055c6f8e2d9380393ecf536db", + "0x4217f2d70fe289c61f167410b02f0e21cea079e677abc7211e3419acfbfbb68f", + "0xbd91622393a0ce9bff67d7746746e9510fc00f9333d76461c17cdfa3dbfeca1f" + ], + "prestateLeaf": "Dsv7I57LY/krwZKeOCe9KXU9IV1O9xbRfA2JSbEX2aP8IuXRUuPT61yns9M4v6sZ1r4REgPflw1LECqLLfuvVEPQCS7l7cD0qVFbpdjwe2K2OBtSCTKdx3/6Lj1csw7Lpnv5VEdbnJI4xDMEzhpkXTj3jCjz6GdiTI5AXi7NB2qnWcTl8CanAQ==", + "poststateLeaf": "umQUtSJdndSLpV443XGPsEwgc1kGaEZ8cWG1AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "U+4ryEt1iB943t/XSZVdzLlwakB+Y+hPoG9iSrjOiyfL08ir1RT5PgquCmGzHi6wz6h+F5NnmByCJym79jxR9kfWm2I1B03pT0YwqfX6Z3t2RRDbG68VVK9vffAb+8tyReLczU+rF+PjF4KPhBeKIjHJ/D1QapCnCpq5wwVMOGixw7qr+SNPamMjg/KbVCNI9oaxDlyoXJy1cG3WlsU2OXRpUX0Ny/58kR70fXyjLIEADP7VRgz2M5k74W4PWd5XzssOnajYJ32tgAMLdje3NGyupp5c10LZjO3F2tks7POoMmzdNPrlkgXnfmwPsj7e8+Dq14LHVPOVWIMw9dpx+T+empmJNCLXNdkQni0ONijwtKc5iARcIe4NB6l0q0YiEw3w3MjK8E5BKG1j5w5CIx1MIfuUc/4yp842Geiv08u/4yhBIPgEUtdGqdQc1J3hDZfWgs/434158ANQG8kbE9Fcr0/phrlh5+fIkW1tVKrtwrmNG/YCE35gVLKd6pJ+MBG1WIN30f1yiNfde9HU20fjcV8Zw90ir1GYr1PNchXGG/FuvDrLkd6m2IbDNzsQuijBCRxefsjcJsnhuZgs3WkSCaRr6q9ka0o1yUQKigfYLR/ern3rF4G+dOlV3zh6Vm832UPqRP5J/VNYjVqajjx4MALzQ7OkIDGr8T/Hb2cKxMFWc5xoEf7yZgAGC2N5jyQUSw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xc0f0ab359c32b6aa50a73d3aeaec405c311434e1029ab5b4069cce69bc621047", + "0xab029840421422083410d3ea591cc1d6d7f987e11fd1fb41673e4886cc7f9ec0", + "0xec87dfbf5373bbf99dde1e23353bedfa596df7887141406cc75a7788c75b1725", + "0x26d05dcd547e9c12fd9372c7a72a756d43eab9b1b184f573e87200e516f5199b" + ], + "prestateLeaf": "8LSnOYgEXCHuDQepdKtGIhMN8NzIyvBOQShtY+cOQiMdTCH7lHP+MqfONhnor9PLv+MoQSD4BFLXRqnUHNSd4Q2X1oLP+N+NefADUBvJGxPRXK9P6Ya5YefnyJFtbVSq7cK5jRv2AhN+YFSyneqSfjARtViDd9H9cojX3XvR1NtH43FfGcPdIg==", + "poststateLeaf": "r1GYr1PNchXGG/FuvDrLkd6m2IbDNzsQuijBCRxefsjcJsnhuZgs3WkSCaRr6q9ka0o1yUQKigfYLR/ern3rF4G+dOlV3zh6Vm832UPqRP5J/VNYjVqajjx4MALzQ7OkIDGr8T/Hb2cKxMFWc5xoEf7yZgAGC2N5jyQUSwEAAAAAAAAAAAAAgA==" + }, + { + "input": "ivixMAmdy1yLuqm6Z95ZCzFzPgJJwLbWxMLLECdqYwqyjscxsa55NlLoGIA6wdMUNjEYHifxsmhXRhfvu9vPS3nXJ0oLzvnwLtHp8nZJHfGfcVttccT3c5ODPLaxYArShpMIbeBOu7Cp5BrYt3WnjoWkdpvlD0SJex8VP4mz/XJ0bTt2/xyJg8GSQbyxQn+GzCnpUKDPPK9L8s+Fh/1tCYdzbX+YzCeW9+8vXkoVTQ9QrrPBxgU=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x89271427bd0052876b950a60146c74fa471497eaa803e2c24c4a358c768019a0", + "0x41235a392b38e63d2cad93c5a5cd4f9cd4caecfbd9ef0459dc4a17c2893f37a4" + ], + "prestateLeaf": "ivixMAmdy1yLuqm6Z95ZCzFzPgJJwLbWxMLLECdqYwqyjscxsa55NlLoGIA6wdMUNjEYHifxsmhXRhfvu9vPS3nXJ0oLzvnwLtHp8nZJHfGfcVttccT3c5ODPLaxYArShpMIbeBOu7Cp5BrYt3WnjoWkdpvlD0SJex8VP4mz/XJ0bTt2/xyJgw==", + "poststateLeaf": "wZJBvLFCf4bMKelQoM88r0vyz4WH/W0Jh3Ntf5jMJ5b37y9eShVND1Cus8HGBQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "Ck5zoYOAXqv3Uh/+JT/v86YSU9pO47Tg0RttlXad8wZnXyOVT6XqyFT+LpDufiRtYLWctKgbSOYGm/kH3UNbLDRdCItm6aCom/0HJ++ZvhUu8dX1FG1S4P+Jr/kNuokvOgHFTlQhrp5sIzD9jOqPSemxWmXhjbRbU5LuSz53AHxg/OshWA1CokEDDfH+2DrEBVDnqXHXBXKL3McudqosDj/jU1NW50GVlX4ZGjyZLYF5jxVfIDz4vFzW6uThNoTyJ+6jMWl1m93FSylA8bFtq3sxz9yMYnKFESv7YvsQND5EpwnoGfWK9ff1OHLv3XSY7AQv5igZpOkA2rBOdJFT0KH+kj9SKxPH/SnZgIjlNVbCALMgt81ZTtBS1CioTPIx2eAKKiEjn8ESVA1O5oTP7fxF1xvtj/gM1M0dOhY+u4c7jnWmcIDCVyXhBqgTkrhPqJ8eKXdTF/p5cmjs34gjF8TFIL7+", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xbc47fa6d888b9725f9d5c4d7b586161e7fc0fac8049df9b36d39dac2f5a6c738", + "0xfa87ab1b88d828bca2748721f15f86f725edf0d3f578e172a013cb2e8d6fec00", + "0x13921647e516ba9af4a595fb8141525a79c78c1f00fb7837a07682871efe1289" + ], + "prestateLeaf": "QQMN8f7YOsQFUOepcdcFcovcxy52qiwOP+NTU1bnQZWVfhkaPJktgXmPFV8gPPi8XNbq5OE2hPIn7qMxaXWb3cVLKUDxsW2rezHP3IxicoURK/ti+xA0PkSnCegZ9Yr19/U4cu/ddJjsBC/mKBmk6QDasE50kVPQof6SP1IrE8f9KdmAiOU1Vg==", + "poststateLeaf": "wgCzILfNWU7QUtQoqEzyMdngCiohI5/BElQNTuaEz+38Rdcb7Y/4DNTNHToWPruHO451pnCAwlcl4QaoE5K4T6ifHil3Uxf6eXJo7N+IIxfExSC+/gEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "6HjAPGpmaPGPLCx9xAj1Vhl8nKdgpXryqReIbBsNyxDJuqaJW5D0Te4+UWi2yYdbAy6fQPmq73XVG+N3sVHNt7vbKXNrVvSpoCS4m+p0PsFNztulo15r/gxcNeCaI9IKf+m8D0TSNFYYfOhF4k1aZS+74bWrcgYuOmeqv6XgdWR7mL9UjK6+AZryeUyEChjqydX8JGK8soYWOWNrTzV2u6GizHeOCFed8cZ4mXhdEjzlAcKjKxEO24f8qih37sI+6V6WokMW6+E/LZqQ/VxaUqMfTjQVoJ/RRsS79jBAoJ1yi5/oCu+3kHE3leNMMN7aJ+8nnDxuG+jW7ckYdNcjCFoSQLibTGxIbUPixXY4ZCDenuELGETWRy2wFuZllZBD+sRyLtcHPGREFO98GIgfNGBN3u+iCgY6jw2LRWUCkeLtzwWvx7WU4tTWDm3NYqhCQFuo1pH4ttX/ah/RnD+nt8tmdgwKcLeV+pOvNO/MAyBKNi9qbtmMexH8nXyC1immp0u3eVfhJ2DjWGAto43k904V7KJuUDnrlYkikK0sGX/w/qVUfPJFhjaFvF2bSDGAvywCQ53sO8ZgCTWUn66kkDgEpudtLQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x60fd3e6b5913fc082a47e438c988fefe3da4322870d961d27d330b6d3f7f02e0", + "0xaea0c1b37e1e2705800f5b90a14284a9bc7eca441368b493516961b3a98e1a13", + "0x341f062f82ad17beb00c8a01a18b17783cf2987fbf3174f768ca42a5a19069e4", + "0xc6f3092dbe89d9406fc98aeb99de16f69c9045dcda1f9f72f81537c0e26ab71a" + ], + "prestateLeaf": "3p7hCxhE1kctsBbmZZWQQ/rEci7XBzxkRBTvfBiIHzRgTd7vogoGOo8Ni0VlApHi7c8Fr8e1lOLU1g5tzWKoQkBbqNaR+LbV/2of0Zw/p7fLZnYMCnC3lfqTrzTvzAMgSjYvam7ZjHsR/J18gtYppqdLt3lX4Sdg41hgLaON5PdOFeyiblA56w==", + "poststateLeaf": "lYkikK0sGX/w/qVUfPJFhjaFvF2bSDGAvywCQ53sO8ZgCTWUn66kkDgEpudtLQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "+CYzcP/9LE+EdqOG+9a5RTQG00FP7wvzuwEsw/yuTYI+xxGehPkz9n5YKlJ3BrjkX3jQv2jKM98fMEvSkiJ2+u1sB66qmILE0m/4SDeSRXVphqrP758vExOEsL7NHEgBVd7IvslUFU7gSMZ0+ee1ee0CD8i51lBcpP7rQK3kheu+6RuNTQK+8SJ+bpWgzYqpXisZGIXDefKhANcQWDs+dg4xo/y9FyluuECVtzSGQxwDCLmZDfvKXLozcl+f8wMbRE2MXNMwVYefP6uy0/GgEXIDkwPH4o9dv65JBnlTyUDBEIdB/fbEJRneWTOxOl+SyRz8FLNCkygP953Efo0fcXpKjbIrzuROgxyI+3Amm0Hsj5Zl34RErO1p33m8NBnMK6jBHeXcWp/40tZTmF1j2TE8xyzt76cC9ancmkoipvVCUmyp3S1XWUQZWVrSCdY7QPLXcG81AsdoCwlOe7ZpL5h/b8AZ3wr6ccu3qjYtmfsRLkwA+oUTsNJ6Xr/cRUKhumCdDU0+CLuo6UuNTq4DNPVJNUF4XtFjMh/UsR4GfOts7SBtXjOQgIJf6YAl0S3Cl1bhR9g2hvZyuXxK8kTfwg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x49837749118bd52989e66683b0c390e648d6bd14617a48db4fb0ac3c23fa6002", + "0x38c6d5cbf76e73a3fb3f8c056e936dccf9e8b2fe6379cebd58a5bd3358e0da52", + "0xe0d2db8687a4e0f0e475fc73a6b4d01b4c2798588940c476313111c3e2eab19d", + "0xeec6ab48f1d2149b86dfa43a21ebce258f2dee0f29cc3b133cf94f8eda6f2cb7" + ], + "prestateLeaf": "7I+WZd+ERKztad95vDQZzCuowR3l3Fqf+NLWU5hdY9kxPMcs7e+nAvWp3JpKIqb1QlJsqd0tV1lEGVla0gnWO0Dy13BvNQLHaAsJTnu2aS+Yf2/AGd8K+nHLt6o2LZn7ES5MAPqFE7DSel6/3EVCobpgnQ1NPgi7qOlLjU6uAzT1STVBeF7RYw==", + "poststateLeaf": "Mh/UsR4GfOts7SBtXjOQgIJf6YAl0S3Cl1bhR9g2hvZyuXxK8kTfwgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "V+Je3MC72y9Nj/ndRQ22pHbmfIk3L04GyLRx1y8NdQ8MW/VDtk4DFoJytmVgYMhgk14XRGrdjnRV6qZjElRbxhYkKOCVPMlHfFSLRuJywuMrbZD/ED4enQkcwGBbc0y4xD7SGMxmAN/xe3s115EKb7CmMd1OJKgFXtWCxjqZw3+cTMckBaTamyAhF2wZOFqLCiwACHeaSNuxEfr4tnKt0/WORdepDMBFHQQ2PyyRV6rSpX/wC9n9hTipbADlW+UCoiM0ohC9s+4D89xUWElcl+e0EsomsKduFctgqrExO9O5qkY8buHJZWViNKvpoTaRBfeYCaiGh/rHOBzyC5wfp5gloXw7vOkWV1pB", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x1f98ad3e4862403d0ab27479c5949c3e68f715806316dad1139287ae00e9e936", + "0xe25797a6a442516c9fe6adff1104f7238df721e74f26494958ac5d0cfcbdad54" + ], + "prestateLeaf": "V+Je3MC72y9Nj/ndRQ22pHbmfIk3L04GyLRx1y8NdQ8MW/VDtk4DFoJytmVgYMhgk14XRGrdjnRV6qZjElRbxhYkKOCVPMlHfFSLRuJywuMrbZD/ED4enQkcwGBbc0y4xD7SGMxmAN/xe3s115EKb7CmMd1OJKgFXtWCxjqZw3+cTMckBaTamw==", + "poststateLeaf": "ICEXbBk4WosKLAAId5pI27ER+vi2cq3T9Y5F16kMwEUdBDY/LJFXqtKlf/AL2f2FOKlsAOVb5QKiIzSiEL2z7gPz3FRYSVyX57QSyiawp24Vy2CqsTE707mqRjxu4cllZWI0q+mhNpEF95gJqIaH+sc4HPILnB+nmCWhfDu86RZXWkEBAAAAgA==" + }, + { + "input": "RyWgwd+Qr5ZO+DBrEHeoHOYKkGSv27AC8AUv7u2tGD8nb4x63hVOAvXbiJNrHqz/hiKCN0PS04CHDVDuugALETTnNJbBzxsXxFQut7x1sQgTrfMI9ZYusMnUgKZU8qgNk0b1QYWEbTOvaCLpMQ9oeyjJbMqIv8X9eqZx8vwKreJvKEa7WWJIxCaLqhMme2Lyq7So2I9YGpAgo5QRbxHSAARLIdtUA66lE37l1Fx+JNlwcl4GD30X1sU9GUdZOtzSHyowWzjz0actn3rjVSmGaOuS3Ah2/gvjQYWC7i3M81anjhdV/2ZCY94AYpr9Lqs+kMxE+nAoBNmJfBoJTZBJrIPqigbSouhH0ijV/5TbuMwfAUQfxSI5ZXQMwdxGwuBImixeglEtxvaYqyM9OJz8n3RHlO8rbSIR8yxShm2SAMn3ry/GoRH5xlxUbLdWN/Q965PJUv0KDi8Vovdj+5ic1XCyLUvWV84ptZyiel+UZdb/CS9YHoPkbcp/GWQIvAUWpNg493eNfwbuZqI=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x9306fc1d963a177e83f4294d9eb3925accaed510895a1e3f574891cf55129601", + "0xe2c0e7a580553e21a6a6871769ab80e27077320bc12104b8292fe3e42f753b75", + "0x4927ece860e24003c45d18f0baec468dfd68e61eaef117b9568e2bada6743903" + ], + "prestateLeaf": "JouqEyZ7YvKrtKjYj1gakCCjlBFvEdIABEsh21QDrqUTfuXUXH4k2XByXgYPfRfWxT0ZR1k63NIfKjBbOPPRpy2feuNVKYZo65LcCHb+C+NBhYLuLczzVqeOF1X/ZkJj3gBimv0uqz6QzET6cCgE2Yl8GglNkEmsg+qKBtKi6EfSKNX/lNu4zA==", + "poststateLeaf": "HwFEH8UiOWV0DMHcRsLgSJosXoJRLcb2mKsjPTic/J90R5TvK20iEfMsUoZtkgDJ968vxqER+cZcVGy3Vjf0PeuTyVL9Cg4vFaL3Y/uYnNVwsi1L1lfOKbWconpflGXW/wkvWB6D5G3KfxlkCLwFFqTYOPd3jX8G7maiAQAAAAAAAAAAAAAAgA==" + }, + { + "input": "7uyV3gHKCTWno05X0NsqCo+ecSbBndWk2chOcOj9/OEdE2e8xZA9D2PwsHc0Pa/kD7Kz44HRg9tZ2x12S9+8tyrX1Xt3qSVa/rgq9tCp2pK47d/ErS2uMN+7z6Q5YjoCb8opZhOVJYMQ1ydyjfKHklPbkWwe5fxZ8kyX7v1om9RgetMEJC6N++NZA8EiTg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x6aa1207bbebcbdb562d89afcb3cc5eedd746af84a099ee5782d39c02a444c866", + "0xbebbf678447f96212658092d091eb3460ab4f40fd714c02f2526dc79e0853f74" + ], + "prestateLeaf": "7uyV3gHKCTWno05X0NsqCo+ecSbBndWk2chOcOj9/OEdE2e8xZA9D2PwsHc0Pa/kD7Kz44HRg9tZ2x12S9+8tyrX1Xt3qSVa/rgq9tCp2pK47d/ErS2uMN+7z6Q5YjoCb8opZhOVJYMQ1ydyjfKHklPbkWwe5fxZ8kyX7v1om9RgetMEJC6N+w==", + "poststateLeaf": "41kDwSJOAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "TOjxDmmIhy7Mlq4a4V11/CFNbv8NeLrJ7cCi9m2fT537JagWJiMdobTVLNuN71dEO8ER074q5m93RkHwt7oJ73+iTUWGiYgfU5yo6hAIK8kV+Z/lL2p7SGza+xJkbTnrR5lfeNyhynrWGL+TqR54y5ZustFTvDEVWCTk87RjXYUHzseDkYE3JP8AHxKQzZgc3GCTsDjmrtzd1SkKb0s9uF15CfGJqAx+wryGlfkeWrAhf72hp5YGH8Umo5onUBWPvWQyRzHUFKv6LX92kGsJ5t9l9iSuYK6rSmMeTYM3LNM2V2S+vGreS+kzLWa1yJlaiQqN0mEJwdvSzTxbBwt6C4y76+hGmY3Puq7fzN5esJEdsXQh1JBjxG3bGZ2JsJHUF9xaxLN5XVRnMNwykHCW/AV4GYNVlB5tJvwY6bKORgvcf5pZN3iw5QZUVlc8a7KEoyA7e1PxSQiulTp1Jx5tD8WJkSFr8Yh3Ce5Pm6H+48LY4AZV2cWIbsq1FJAPMMxcmejnGxN1sG27SLP/HS/7YOJfclRTPy+ZR7DGcwBwj/TKWpWW/OTlMVv2mPrJVUvTD7qQ53PXdJM1FYQRMolHHFEK4ggJO7n12eyDqKR+aHZYLED8Izt8hUrpzAOTstFX3iov0wEr5N38ldQ2stYA7d/X", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x91bc28ac308d1ac17d009f234d35738e9b1692439304b54459d3a2495eb3b616", + "0x07c48b7c309b8cff89cd1c96f7596ebd1218f588aac8238f5f81704780e4ad93", + "0x43673747aca7a3e7428f4ed0702b099e4cb6b3a07b9d876cfe26c93827c7c48a", + "0xf9cd4a3c8e8110e3d72dd2196cddfb4e7f7c0cb487743ab4b0d7c27e9dbb203a" + ], + "prestateLeaf": "HbF0IdSQY8Rt2xmdibCR1BfcWsSzeV1UZzDcMpBwlvwFeBmDVZQebSb8GOmyjkYL3H+aWTd4sOUGVFZXPGuyhKMgO3tT8UkIrpU6dScebQ/FiZEha/GIdwnuT5uh/uPC2OAGVdnFiG7KtRSQDzDMXJno5xsTdbBtu0iz/x0v+2DiX3JUUz8vmQ==", + "poststateLeaf": "R7DGcwBwj/TKWpWW/OTlMVv2mPrJVUvTD7qQ53PXdJM1FYQRMolHHFEK4ggJO7n12eyDqKR+aHZYLED8Izt8hUrpzAOTstFX3iov0wEr5N38ldQ2stYA7d/XAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "AvTCu82tHn4vqAeJvRF0D0ZzROQvXqApfQS8wMMPGYeMKMoxv1ylmcZVy8449joQXyI/MCvRBw9ifmRPvvQrz6hAEFrrc9vnVeOGZ3JO4kqqwtXCsBk0uWdX54fTJwKltF79gR2FUWv6jdub+vIPPc/2GNV24VwrYCLJKM3ngc/V6MjklLR2SUufmcU13Nz1GZrNegoEwXzq3SjSX7Jy+UBMaD3OIqwB6TP83BHVlgDGSmyROUcFtXs/P7xakgS4ehCQImklzuDO9DRdMOHM8QVG+7Ru5liTcdnLHGZmuw9w15xzukp3lcBz9r0YE4JpNp88jHSRWy9je1YLiQZqCzsezJTZayTV+PDg7yhzknV7I9ilh9F2Km715Uq8mfqLoSu3P8jhFV6TpfxdbTsf//GiOj2vn50p1OTpw4szVojg3zKFM8LBIsnBlZlhjehSik5/UgXkXidGxftAIgs8QpK1ZM9+Egq77myavndGABvf4Jdj4evZPxg/7lbfF5M5NMRta3z2EL7F", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x7ecf008045f77c20bef4585b6fd642c26fe09f2f56a5acb504cfd4bce2e3fadc", + "0x253fb668c2363e1f74df922439adbe18e4825be8bf1f900b13c63fdee66855c5", + "0x9280e4e10b784a59a620a6e7f6486dc9191529cb19ecb76a909cd99797f269c4" + ], + "prestateLeaf": "S5+ZxTXc3PUZms16CgTBfOrdKNJfsnL5QExoPc4irAHpM/zcEdWWAMZKbJE5RwW1ez8/vFqSBLh6EJAiaSXO4M70NF0w4czxBUb7tG7mWJNx2cscZma7D3DXnHO6SneVwHP2vRgTgmk2nzyMdJFbL2N7VguJBmoLOx7MlNlrJNX48ODvKHOSdQ==", + "poststateLeaf": "eyPYpYfRdipu9eVKvJn6i6Ertz/I4RVek6X8XW07H//xojo9r5+dKdTk6cOLM1aI4N8yhTPCwSLJwZWZYY3oUopOf1IF5F4nRsX7QCILPEKStWTPfhIKu+5smr53RgAb3+CXY+Hr2T8YP+5W3xeTOTTEbWt89hC+xQEAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "KxErXRtL1JhC7Tl36SnTA6yN1kPJ+FHWHd9Qw3EL1nx7/WjXd3adHalOH+IoiUE8avgcjU0OswihibvyaphqgjYW4aFleB50E4ezd6d6mh5uWbXJM1NXdmUH8BBwCmLoJokM8lv16OJSqavrVRUEcY8Ldl09pmg83KO4wRxjhVe20DUWBnTXztMNlx4HaAcKN7Y67KZQ3m9Oos+CdAL75Wytioo0WVYhef2zg2ZdreOePOACaK+6alFL81Iki/C8Uja2v+YjX8YUs0AsfOpWOZL/Fn8p7BizAwYj", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x8a396ea38a21ce7e46777864c464f655b0774045c084d8aa04d5f62ce1ceea4d", + "0xf72e61753fd7f471e01d8ad73e725b8251d981ff03161a9b21c195b59bb9e05d" + ], + "prestateLeaf": "KxErXRtL1JhC7Tl36SnTA6yN1kPJ+FHWHd9Qw3EL1nx7/WjXd3adHalOH+IoiUE8avgcjU0OswihibvyaphqgjYW4aFleB50E4ezd6d6mh5uWbXJM1NXdmUH8BBwCmLoJokM8lv16OJSqavrVRUEcY8Ldl09pmg83KO4wRxjhVe20DUWBnTXzg==", + "poststateLeaf": "0w2XHgdoBwo3tjrsplDeb06iz4J0AvvlbK2KijRZViF5/bODZl2t45484AJor7pqUUvzUiSL8LxSNra/5iNfxhSzQCx86lY5kv8WfynsGLMDBiMBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "XCg8YCiTI2f46kIaq7zPtH6/qXjsCjjTqJ9T8OIRWrxk7S44HcXD/V72OryGTkghwWJEaZGE3h8iSOeimpclxuVoHl2lgucVrja/U9cqgJryH3pMiikQnrIxRiWLdVfrkYK9F98iNxwd3cbZMYd6MfH620ISHBek84dz4+Ae+aeyLlUd/CKwHZ1CZYOymLaRvlkviy7vhqq9Fyrf1FYXg+bp/gLUxup9cNsX1Q1/OVbieyhOFd39rXJ6ACxWfGZCMPJ+Wuse7ngKebZ2he+G96hX2dUWGSOYxsiozqoP9kg=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x3840877367d901f1bf14f3d589aed7e83948595e0222868a08b59873cb9230df", + "0x4ea862d23c02f1a6cc17dbf5ecf1822fa357304128fb6d2edb82a640b0a3ee09" + ], + "prestateLeaf": "XCg8YCiTI2f46kIaq7zPtH6/qXjsCjjTqJ9T8OIRWrxk7S44HcXD/V72OryGTkghwWJEaZGE3h8iSOeimpclxuVoHl2lgucVrja/U9cqgJryH3pMiikQnrIxRiWLdVfrkYK9F98iNxwd3cbZMYd6MfH620ISHBek84dz4+Ae+aeyLlUd/CKwHQ==", + "poststateLeaf": "nUJlg7KYtpG+WS+LLu+Gqr0XKt/UVheD5un+AtTG6n1w2xfVDX85VuJ7KE4V3f2tcnoALFZ8ZkIw8n5a6x7ueAp5tnaF74b3qFfZ1RYZI5jGyKjOqg/2SAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "3tCzFeEQ/cLtfrIrf3xDuG3HIlg3bn9sOR4FFzYSjPqma9CORBkKrq1myqUT5/WFkFRB+6zj/17Kbl/T0M6BWTgQZktrDpup+xCiU2m7ZKqvYImjW1X638odReqVo6XdrC6NyFn9YE/ssHioUKX5FMs3X4+TLi9UmW3brrNfrkw+lum704sAPctDtMwh/LVPapDfhqZlG+f6yTrmp3D6oBLqwcrAMNjIJ5LRJHsI31XG9/Pcd99qCPyg/xzcObVq9XyDg5vX22dSDe204eesHUyfsc1z7Et6MQY99MW83Lp1CGKJc7EgDgj3", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x2b345b6138b0af72126dacf2bb933135ec22d9927e66905753ed4dd7673fbcc7", + "0x14b3d8fae959461ff1a0682c7e82d63477b4a1bdd83fd7425ae51bd1df4b3188" + ], + "prestateLeaf": "3tCzFeEQ/cLtfrIrf3xDuG3HIlg3bn9sOR4FFzYSjPqma9CORBkKrq1myqUT5/WFkFRB+6zj/17Kbl/T0M6BWTgQZktrDpup+xCiU2m7ZKqvYImjW1X638odReqVo6XdrC6NyFn9YE/ssHioUKX5FMs3X4+TLi9UmW3brrNfrkw+lum704sAPQ==", + "poststateLeaf": "y0O0zCH8tU9qkN+GpmUb5/rJOuancPqgEurBysAw2MgnktEkewjfVcb389x332oI/KD/HNw5tWr1fIODm9fbZ1IN7bTh56wdTJ+xzXPsS3oxBj30xbzcunUIYolzsSAOCPcBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "bfAo9s0t5LyD6E/FL3YItSp7iHRxZsByDcTBKVGLTTYZUOEWd5PDCX0J5GgN82XcwfW8zko0e/ejkOrDk0fvVefhLAD8pcG17MbtQ4wnZ8yKUSAZjA5LMA1gz8MpSMsDWt77ZNb2HXGeHNL8BWu7WhuGzm8d2GpSNCm0b7IpY6mBi1m8LNByXyxkHauXXkNgK4qM/gMHWwsPsW7V344EtcM5YjhT6rfboO1zuAF2udqcZblSuHEQmq8psLJj0mj7t2EPY+p1yxmax8zkfG5qlJUNY+jOTiACzMGGFBOh373aC1MrrVd+Et0BX6IC+k46gZT/jIs2MBMeKl/0NGG0urvHvayTLJXYJEKzplvyDaR7PrUSdxprJIQHfrZe9LnOlZe4PIw1LHo2vrbJ+9TJPR3izoeeUPeIIQjSSG28NzJ5Iar+86ZdSTEI/LnXEaAYSvQ7X28ShxZuHThkWxi9q8pdEv9BWXc=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x70ca6cbbd65222ea5e90b0c595d1cf55b423e1503dd07edaffc5a4a8babf3004", + "0x9e04f3082de4ecf1669b7cb0b458a8cafe452a424b395808cc2262236b71b729", + "0x115b8dba0fc4524995038e8e06953799cefd0119c75b306020ec53f884480e8b" + ], + "prestateLeaf": "LGQdq5deQ2Arioz+AwdbCw+xbtXfjgS1wzliOFPqt9ug7XO4AXa52pxluVK4cRCarymwsmPSaPu3YQ9j6nXLGZrHzOR8bmqUlQ1j6M5OIALMwYYUE6HfvdoLUyutV34S3QFfogL6TjqBlP+MizYwEx4qX/Q0YbS6u8e9rJMsldgkQrOmW/INpA==", + "poststateLeaf": "ez61EncaaySEB362XvS5zpWXuDyMNSx6Nr62yfvUyT0d4s6HnlD3iCEI0khtvDcyeSGq/vOmXUkxCPy51xGgGEr0O19vEocWbh04ZFsYvavKXRL/QVl3AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "Fn/bjMz27Pmo0ojZ1PbO1pulKlSVEA3NkoaIXezePMDDWhuLfbwngufLxwoyPb3cDAg8JZWXaE0O+liog7xV0p99KYUVOf2bmXcc/Yx1P40ikNkJT2zhhUoqaPBgBAsfSvw0eGBNmTer5Io/sjCdAaY46UnAVG0m303QSc7seGgY52p3Q+p0fblbjieR0iDmtStbnmeRMoc1CybVEWrkUM8mhnoHGzP8fe5i5BIOhGdOEhFEfNr/E128wwTjRYP80nAPZO2vx3sSIrYBl1uAAxglTZd1qqY8ds3LRAT7ioZSImxJgoGwQhaZ2g8zGBFRQlXu", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x5bb3fd28f16cf712e71ef328668bddb7304a23900d57281146e3d11379f787a1", + "0xfa09386208b45515b49b97f859070db73d59b4d5a701ac3925c9ffc65c5648a5" + ], + "prestateLeaf": "Fn/bjMz27Pmo0ojZ1PbO1pulKlSVEA3NkoaIXezePMDDWhuLfbwngufLxwoyPb3cDAg8JZWXaE0O+liog7xV0p99KYUVOf2bmXcc/Yx1P40ikNkJT2zhhUoqaPBgBAsfSvw0eGBNmTer5Io/sjCdAaY46UnAVG0m303QSc7seGgY52p3Q+p0fQ==", + "poststateLeaf": "uVuOJ5HSIOa1K1ueZ5EyhzULJtURauRQzyaGegcbM/x97mLkEg6EZ04SEUR82v8TXbzDBONFg/zScA9k7a/HexIitgGXW4ADGCVNl3Wqpjx2zctEBPuKhlIibEmCgbBCFpnaDzMYEVFCVe4BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "aKNFw0pM0XmPQULBOYhXDEpG7xyGpTPpLKWdfAqcx/TsA8M5fxeCyWV01nQK", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xd587c1123b4ff0fa464810067afb76c630d2f55e699e10c87c0a0ef9b8066fb7" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "aKNFw0pM0XmPQULBOYhXDEpG7xyGpTPpLKWdfAqcx/TsA8M5fxeCyWV01nQKAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "1AjkmC7TXNJh/LirlVsFxMYFLR0QNB4Wiwi16JAepiRK42RpxBrbhNPOIJFX16r+vK31OWgJwxlCQamOACegcfgDYj6dRhQolfOFs8rccy7bYLsvUA8wgg1LBs4iQ6GJgZHkpaOq83zxZSqiZ+wNyhBH7ZRayhkti1XAbO3zZxJaG/RHfP3JKs6m/VOA7A47/unOIwaZjIsJZPLJABJ+wAhAXNQLQlgL+Tt9JWPr4mWW3d+K5iMNG4R7VJC8Y9MPVWX1P1/h64bToqRj4ELOp8nxsbDmEEnPmXpm", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x7c5a831165523b98e237daa7ec4f137fd85b4dd3e667bc49064cc31965129850", + "0x61fd8ed7abeb89608f8e48309f153b8885b4b4e4bea4c3b7e2c318b977737092" + ], + "prestateLeaf": "1AjkmC7TXNJh/LirlVsFxMYFLR0QNB4Wiwi16JAepiRK42RpxBrbhNPOIJFX16r+vK31OWgJwxlCQamOACegcfgDYj6dRhQolfOFs8rccy7bYLsvUA8wgg1LBs4iQ6GJgZHkpaOq83zxZSqiZ+wNyhBH7ZRayhkti1XAbO3zZxJaG/RHfP3JKg==", + "poststateLeaf": "zqb9U4DsDjv+6c4jBpmMiwlk8skAEn7ACEBc1AtCWAv5O30lY+viZZbd34rmIw0bhHtUkLxj0w9VZfU/X+HrhtOipGPgQs6nyfGxsOYQSc+ZemYBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "7GzH5TVEhQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x1a1f489ba334d4068d05ea8347fd83a14a73082101f8d2daba09553485e73c60" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "7GzH5TVEhQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "x9xGIRFaMdQq0l4MEJbBDSzCSTZLsd/0UkOSiHtALhd75Gi/c7ObBdk=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x1811020a0424b78b1697b9cfaf67ea339be0988c5bfa859725bfbb0aec7ed76a" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "x9xGIRFaMdQq0l4MEJbBDSzCSTZLsd/0UkOSiHtALhd75Gi/c7ObBdkBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "O3mF1GL0vHkl1aZEKundwta4m+07cngnlLAtOOlhZA2n9NVQHvffMRc7kgg1zs9nUWNPUcj+O994cRWgMC1k", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xdf980f53d2c8527c8438791243c129bd2aadb23872502fa0e4447e1b0b2202c3" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "O3mF1GL0vHkl1aZEKundwta4m+07cngnlLAtOOlhZA2n9NVQHvffMRc7kgg1zs9nUWNPUcj+O994cRWgMC1kAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "h0RMltv7EyhwfqVLrCEDhVvTJ1tNmbM+C0InbnQY+Kk5eUWfMUMHvUqR/Qgmg9aLlmoFROkcUpfH4AcMm0eFlxnav5yoWlJJmkLKR0CWUAT1jhsxkLOVqpNPWKRvl1IZSpgPW2Czp9SlYP7Z7yfrIovxoNM3qjNWiiXQIqmxl9rKXcXNY/jo0jla8avWXD7lIF7HbE0IWxNTqSwGc111TQUMrzRyonNz2uwW+2YzC4hrwJzkUfAe98pc7qWDv5iJyOU6iPhymXr1Rgead3DRgE6gpdOrVwWqLpUFajhk8LVKH0muEVsQqkSL3/RkuxyYJ1SdbZG0FD70wiGSTBlY", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xfa1bafcf85ada822d18184fbc939be33cd6bea9203d763a6baba6daa13e925c0", + "0x175913e44136b3a8b1eeb4ecd8544a12d5d79b98885c2453c7823a958245953c" + ], + "prestateLeaf": "h0RMltv7EyhwfqVLrCEDhVvTJ1tNmbM+C0InbnQY+Kk5eUWfMUMHvUqR/Qgmg9aLlmoFROkcUpfH4AcMm0eFlxnav5yoWlJJmkLKR0CWUAT1jhsxkLOVqpNPWKRvl1IZSpgPW2Czp9SlYP7Z7yfrIovxoNM3qjNWiiXQIqmxl9rKXcXNY/jo0g==", + "poststateLeaf": "OVrxq9ZcPuUgXsdsTQhbE1OpLAZzXXVNBQyvNHKic3Pa7Bb7ZjMLiGvAnORR8B73ylzupYO/mInI5TqI+HKZevVGB5p3cNGATqCl06tXBaoulQVqOGTwtUofSa4RWxCqRIvf9GS7HJgnVJ1tkbQUPvTCIZJMGVgBAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "Gy9pBTkfOeD0fUr8zbC58w5zP6YqVJZGv52nGLRXSlBlWeJLI35GmP219qhBhPZipJID71G+Hmjea/IB/N4poRzxS+9Ky/7wQNWMOadc/d+VVADXPu71Cy9IV3w=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x85baa18bcb8dac402216a7c99c6bb508bfc96caabdfc83c7bccc0898a0389910" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "Gy9pBTkfOeD0fUr8zbC58w5zP6YqVJZGv52nGLRXSlBlWeJLI35GmP219qhBhPZipJID71G+Hmjea/IB/N4poRzxS+9Ky/7wQNWMOadc/d+VVADXPu71Cy9IV3wBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "JstIxj+GOiDJaEtSsZLg0MDSvXRLEFo3DhAkyInR0UA/i+s4R4YYvGUw0TCGsWsGepVPFYbTK+iivctM8ayPPEQGsFCVbKj78kG2QiOswmFy57L/8HhNatetUY9cwMDXwUY+z/7pEsh7GjfL9Sb60HKJ6a+imgyxiUbYdbx06mLfwRy8O4xn0ZoUZKacGvR6XXfnKkBx6EZnQ4X/CmRJGEMyDtm9f+386hlnYVyrZKgGh0C8NiLh++Op1jSYUJzCDkSet1ZFVXo1pAEzSCR3cueBzR1M6GM=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xe3d55fc5170899ae0ca36a90920c6ca338608f4a5dcd8d93e44e234f36917d68", + "0x159eaff490f4841410fe8eded5148778aa5ecbd0fb1bd908f992a85b36e9a613" + ], + "prestateLeaf": "JstIxj+GOiDJaEtSsZLg0MDSvXRLEFo3DhAkyInR0UA/i+s4R4YYvGUw0TCGsWsGepVPFYbTK+iivctM8ayPPEQGsFCVbKj78kG2QiOswmFy57L/8HhNatetUY9cwMDXwUY+z/7pEsh7GjfL9Sb60HKJ6a+imgyxiUbYdbx06mLfwRy8O4xn0Q==", + "poststateLeaf": "mhRkppwa9Hpdd+cqQHHoRmdDhf8KZEkYQzIO2b1/7fzqGWdhXKtkqAaHQLw2IuH746nWNJhQnMIORJ63VkVVejWkATNIJHdy54HNHUzoYwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "/tWP3uV4e9CVYkPJL8Z4kw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xf38cdf1a78988c925677b1078d433a759ca4500334818977f2a480532921d78d" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "/tWP3uV4e9CVYkPJL8Z4kwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "bQHlLQQAa9z8oay/nSDVQJURbG/WJkpRBmzYuxfSn7gtd2BWzMxBYegXsaZ4HtpSOIYfRJQX6q2dASJI+naNH3BcUgbcn63G1XeyaZdEptndY0d2JqnWQjMEsurhZE+xJYhdv3PISfZxeSbxCiL1xE3vPfqbJ8zz//E1C+DSnHMfOc91tf09bBBgrqsfUyarZkYoT91m8mTZhEHfXE6Go0lEe4xuKugeArM3XIvcaen/q0i2Om3mh6dddYaOVtpy1tuevw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xddcb88fe5a9d0df55e4c94569523b05b58eb515ca6114674f61e9721c8357f7e", + "0x4781cefa8e8bdb384f677c4a87ffd201f31980eea5ed4c321b5770bd6d5bb9b0" + ], + "prestateLeaf": "bQHlLQQAa9z8oay/nSDVQJURbG/WJkpRBmzYuxfSn7gtd2BWzMxBYegXsaZ4HtpSOIYfRJQX6q2dASJI+naNH3BcUgbcn63G1XeyaZdEptndY0d2JqnWQjMEsurhZE+xJYhdv3PISfZxeSbxCiL1xE3vPfqbJ8zz//E1C+DSnHMfOc91tf09bA==", + "poststateLeaf": "EGCuqx9TJqtmRihP3WbyZNmEQd9cToajSUR7jG4q6B4Cszdci9xp6f+rSLY6beaHp111ho5W2nLW256/AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "evoB", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x38859b60e7bc24fc4fb617ce269ca5eb01359eaf45a40b0b50f9248cf0904a32" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "evoBAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "tC4HUMrXZ3p1UM4PpGTSYOjLWuLb9BUCGwbTU317sa/az2/NlC7+96kJyL+NVS7+b010XdHKFtS5RcuANAgyqSEwSvV3pTA0wmUymaERp2yEVlwfWwLz7/COl10+rLOft//Q48PVuYxqswRCpbWC570Jfz6RPAX9CZtbIe7+9Jzxfp4hIPlqC4dBuiGoIAuRu7U3jssEYLjtjlWTn5Kip9VYyYyzhMshcYQwOsvm6nIGe2aD9DtqICEaPCAu3mUEIKZ3B1BlyiEkMk3RoTI0n00PxgzKnSZ/MgdyjKoAhV8OyN/8PJJvNB3Jhqiblb+pVhP/DYilY5eFJ3Oy3YpPZcknzX9GwirQP+jWr4N7spt0c8Wzssi0cHXOxLkcy5MuHMrvkcI5lih5HQyzo3Z9rSS5apDdidANjar368I4f9HwPquq9+WRA90qf9dhFp6KlZotH4LzrHyIbPSshpIhgwg1bnVq15PICPT29MSH2I/LrS+RWnNS3A==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xbc688ec6014c19982e8950edc434aab78dda6c284b8bf6685c1ba6199c65005f", + "0x837c62188c9acc191b9860e11d5607ebf25715bb4ed725b62eb0051bf7842420", + "0xd4cb178f83a9174a2f984aab73b0d599f288cb7c8df9ea7643194f63456c0a1c" + ], + "prestateLeaf": "h0G6IaggC5G7tTeOywRguO2OVZOfkqKn1VjJjLOEyyFxhDA6y+bqcgZ7ZoP0O2ogIRo8IC7eZQQgpncHUGXKISQyTdGhMjSfTQ/GDMqdJn8yB3KMqgCFXw7I3/w8km80HcmGqJuVv6lWE/8NiKVjl4Unc7Ldik9lySfNf0bCKtA/6Navg3uymw==", + "poststateLeaf": "dHPFs7LItHB1zsS5HMuTLhzK75HCOZYoeR0Ms6N2fa0kuWqQ3YnQDY2q9+vCOH/R8D6rqvflkQPdKn/XYRaeipWaLR+C86x8iGz0rIaSIYMINW51ateTyAj09vTEh9iPy60vkVpzUtwBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "EC+6W9qOHpBbkZ4bddtI5XLTkI+RqMvwGV30CdmjcFck/tJGV+6slQu/zD7YqL/hSHCZiqV+uzfN7qohYLSv6SRgnRDcnvcOT5Cjpg8asRH8ZRjBxahqXnVIlY9nOIYoyu+HqMUFm6TJNorPTtiIpUMW1waBYDOFhEBEtAAdS9U80jrf3WmhVBOCVURwQSveLHzZNdZxIHzyOFmZEkijELI/m3Ob/isfh5udJMk0yRTXq6jO+bATgzPegfRjaxSIA8l527VNR/MEuopKB6Cfza9TgWdTn04hk3koxI8BvExOxBI5a2SUm6ZKbI63QgbaTkif+NDIS5GZkIrGPaO4Dyk2gxFAzPWnoBVmAr3TO4BnXdelxSb80ROee74zVVOqQbEAE+caMjXOHCXQZY25grC9ufY4iqMNQRKAiUg=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xbe64214360727aaf82768e2c8c6eea5f5fbca0f0a6a8a5d4bbeb2aaa999cd9b5", + "0xdcc702820b8512ab42e85ecbed1c1ad91bda6f27dc3d5c62c01c5e2cf6b55f58", + "0x4bc0af92dea76212482183a6822c450d3dd2babf0451f14fe107b4a4bb962a50" + ], + "prestateLeaf": "E4JVRHBBK94sfNk11nEgfPI4WZkSSKMQsj+bc5v+Kx+Hm50kyTTJFNerqM75sBODM96B9GNrFIgDyXnbtU1H8wS6ikoHoJ/Nr1OBZ1OfTiGTeSjEjwG8TE7EEjlrZJSbpkpsjrdCBtpOSJ/40MhLkZmQisY9o7gPKTaDEUDM9aegFWYCvdM7gA==", + "poststateLeaf": "Z13XpcUm/NETnnu+M1VTqkGxABPnGjI1zhwl0GWNuYKwvbn2OIqjDUESgIlIAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "ZeDJP67Sw5nYOdhdEIfRUVZL1fFstyMtPOsqF5UYQzOBhgH8dpwpjuvcN6/O1uhtGhFWwbljx/jcazFF2BWfBWf3eC0LWHRMPUiiD6OLrV6k+zrS2yWxqGkulIDOndPUaVeFKZWNSAEi19zmckdy5ItruOIDgo/fBDsH9Nvz/2Wk8j2HkuE+EE+nR9Y9M4fnH79BZk0iTqaDCjwO6bxcfX7yWOHJiWcK6Dl+9fP/Ol24E73andCsbzjcqRs3", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x1bfaf14d73c3d16d6277777e30253e43da7ae876a245749379bc2870f8f9fc35", + "0xfcfc506a0bca1c16ff13fcf14c7f4f1efd72cfd3d74aa17e61ddcbcfe56258a9" + ], + "prestateLeaf": "ZeDJP67Sw5nYOdhdEIfRUVZL1fFstyMtPOsqF5UYQzOBhgH8dpwpjuvcN6/O1uhtGhFWwbljx/jcazFF2BWfBWf3eC0LWHRMPUiiD6OLrV6k+zrS2yWxqGkulIDOndPUaVeFKZWNSAEi19zmckdy5ItruOIDgo/fBDsH9Nvz/2Wk8j2HkuE+EA==", + "poststateLeaf": "T6dH1j0zh+cfv0FmTSJOpoMKPA7pvFx9fvJY4cmJZwroOX718/86XbgTvdqd0KxvONypGzcBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "+2Sm8j0qq7iqM0k3evWER+e28ZIHbQYaXV79/6cPAkBd1pcuCsA2rsFHJ4YE60RtUviaiDKZmn+yrEd23ZET2De0Ws+eYO/V+D/qldWvL8LI87BSuy62adDjq6bO6e5sEKsopovHiJDYq1LALR+EqOftQPvAT4/xC8yoFEC/rV/k81QmGJ4cpmG0zYOJpIeAZJ+D85AFCDv1M/yj0tVVdhYmcQCobbvUMy7EblzJZs5ZFnuJ78D/6LH50Ldy4H37AJH3pptsV80D0vvPQU5zdVfUA4swUYSQqRw9KGrtjGQgCCHHLrSQzFcMbpvtDsZFb56ix/JWGLKLVVGbA1btq80RH90EFG5IcXo7VyIqGnDQIPJW8hQk1vAluokZ9dciBhkVrsvg5mg4f4clZ7TN0G/uXBzkF9mzjRo4qvGHyIRYqL94kWMZHT8EkeQ27tcXf49hBkBA9ftF66MzMrOiiPaESYhCCyOFHI6iIy7587FfDDUGE0wkNbafxkvZtgJ7e1s20Dtu3d0aDUH1szIrWIvrfRpe7mbP6/+/9EXMwOK23lC+MCm/ykuK3Px8l5jHIEvj/0ZMlIn5OWjTAEY4G1c=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x65815809d69250b67f6ec42c96f375fde201e879ed394d318a2840592535ab68", + "0x6c2d4ce7ac4605c324e7a23628a3846b98d28af41db84680d8a598670dee9928", + "0x40bce7b1abce938dc4f8b76c8dca3dd2740096a8e1f511bca4c6ade3fe730220", + "0x8181226e378240f61866948ba6984e16009879643db599225dc170b05b584bec" + ], + "prestateLeaf": "0CDyVvIUJNbwJbqJGfXXIgYZFa7L4OZoOH+HJWe0zdBv7lwc5BfZs40aOKrxh8iEWKi/eJFjGR0/BJHkNu7XF3+PYQZAQPX7ReujMzKzooj2hEmIQgsjhRyOoiMu+fOxXww1BhNMJDW2n8ZL2bYCe3tbNtA7bt3dGg1B9bMyK1iL630aXu5mzw==", + "poststateLeaf": "6/+/9EXMwOK23lC+MCm/ykuK3Px8l5jHIEvj/0ZMlIn5OWjTAEY4G1cBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "L+x6UWa4Rgm5LHUcwpki2J9NtnTVoIGMbYopms9v/U0bicETMtSC15rxm0KvOSkekklULQLz5CrvoFl8ikQKV8tTnedD5UWWjtP40Qs0MCDUJVzbdNZKzQaPY40vO4bMt7A4wv+1tRtM+eH1RkEhvil/lpZd9diiMxFkjKt3EFZ9uxD3r0KGtFmdwPCXec12tcT42t2yucXEPVFJu3zhggp76H7FPeO8GjAHN/paoNq3wNsIIHfmPXyWaQrq7I+cMd5J5eDRKoct4ZjALXXGT1+5QLIGWctzxq3jMCz7vP6XUlRqUOntEtkliCK/7e0S1ELzlG0=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x8587343e3e6c2059a0335c44fc19a11bf63cb0790e3eb8e57173ef54bfd363b2", + "0xf38b109226bec4ec6685d4253e00c26c72aabb581b2376ecf07a475b559733c3" + ], + "prestateLeaf": "L+x6UWa4Rgm5LHUcwpki2J9NtnTVoIGMbYopms9v/U0bicETMtSC15rxm0KvOSkekklULQLz5CrvoFl8ikQKV8tTnedD5UWWjtP40Qs0MCDUJVzbdNZKzQaPY40vO4bMt7A4wv+1tRtM+eH1RkEhvil/lpZd9diiMxFkjKt3EFZ9uxD3r0KGtA==", + "poststateLeaf": "WZ3A8Jd5zXa1xPja3bK5xcQ9UUm7fOGCCnvofsU947waMAc3+lqg2rfA2wggd+Y9fJZpCursj5wx3knl4NEqhy3hmMAtdcZPX7lAsgZZy3PGreMwLPu8/pdSVGpQ6e0S2SWIIr/t7RLUQvOUbQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "SBpvZe6U4DpWPmgdDCMRTdWN3kLWrzjM6uKzA2/08baRIUhYguRJeP12DgO6O7XYMyzJMPffzpoqtbEBcdneMLjP+V/qt+sA36RDAyPusmMgx1tX6kA22+ddJyF4AxQEyh3gEjiMFBj+RAu27qiP22hdBlQDbkzwfMjkf7MqWJ/JqrBZXkkUIn9eCEKci7llGB44Vd0In7nssZ5XnMIq/wWV8NlUABr5IQEMstGWVN1ujOTl9ATinpFGqaKSmcXsEiOkDRuBJ3IAyPzbjPv2jDwfon63IIWa3gWTKkhXN03n/Wkx7YaoLiI1/NNU3i+4sJp8j0YWTJJUcuMYRkN6H+o/3bIS+TB//GO4vj66MPHWK+bpGZgv7dslUpowS7c9+DRIF2peKR6SV0zhMzdfjghPafu32/e/FsD5kbs6YY/5oRuFHqYgCobjJxyrEdMdVoE1/ug4OnC/TywE8krvpmCNrlNvH+YJ5I9/BIFOsg9PObjX5gxnJb4Ziu/+2B3phg5K7IGen+yTp61qwmZZxQMCWr4cc4/x1/c1uDcA15UIBO44ezvtfevdt4VQJCVMcbkSbZOIEBirKgY1a24uFFfy7IKLa2EDFWSyZxLwwBPbI4OM4UqltlNs2UN2soTAK15ODQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xb967f1c88e6409034042352fd9457edd5242011eb3bb311f79e8dcff2fb7ece4", + "0x422817ae854712d4b23180f01303cbe6eb53d13152059619433a38999cdc32aa", + "0xdc0935a99ce5cbe914c53d7a83d7cd735ad3b8d8bc4b833ad411cc815f3dec98", + "0xe46d6fcd0fd03eb50aa8d43420f3234a15387cbac907c9fbdde1a914a01e624c" + ], + "prestateLeaf": "1ivm6RmYL+3bJVKaMEu3Pfg0SBdqXikekldM4TM3X44IT2n7t9v3vxbA+ZG7OmGP+aEbhR6mIAqG4yccqxHTHVaBNf7oODpwv08sBPJK76Zgja5Tbx/mCeSPfwSBTrIPTzm41+YMZyW+GYrv/tgd6YYOSuyBnp/sk6etasJmWcUDAlq+HHOP8Q==", + "poststateLeaf": "1/c1uDcA15UIBO44ezvtfevdt4VQJCVMcbkSbZOIEBirKgY1a24uFFfy7IKLa2EDFWSyZxLwwBPbI4OM4UqltlNs2UN2soTAK15ODQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "2QJIWbRUBBG2N/NWq/0sBTi+1xV17LSEombqPr1d1B/e+PRV7KV7a4ztE6rAwCfaFXzb8O/muDh3Wkcs29q+0FSkdVrJEUWH77XEvrjg3j/thC4ARnE/Usy7b/+MDdJl+0p/7uuF+bUbYsF6NgJT4WHqDSiGBs5ZGiwPe8gBiDX2HEyLQDQDEAtcAVbLI8on23epD9/cCS91UygxKYrx1CZyfRYLYEKZ9LyH0ltmll0AqGRvQHuCHq/obtlnW+wfw9Pg+E/0dzMX/B0ESCIKBTgLFLCTupSvV4rwYxo7yHYcAvIKD67HECyncSqp6yVSbsxavmNzlWgo4sfjOhWmPGUsQSb5gmw+5gccDOWmTouTVQ7tOkP8A4n3UpUM04qDQtgbhSWSuxtJ9bb0wPtavJVb1fsZ7N/zb0URAG3M5Kye7ZtDYDwhgEjS4+MBb3DbBilViAuIM6D7fHXpdkSi67TOMmoCz0JWaaNPuWuHxVixPckB2uE8M6cXSNkUVyHsWi1+VVJqHq+bLTSSt76F+EBAICDCtGpmoAxcYWZIXU8xfDG4osgoAyROyR5z18m6Me4Llcw=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x5ad3e588bc4e1176b34088fef035587577c589936a98ab49905bc4bddfc99b67", + "0x90f85be195450bb1744d71057b67c90c1b75bb59d8cf4c3bdea1e757f28f7655", + "0xa284918096cb63f386d38a9493583da530c7937ed570c5f6bfc952c99ac76959", + "0x6597135748dd127cc5f095c2da3b7a28a78aa67b4e1fc218ec090defc1201fc0" + ], + "prestateLeaf": "k1UO7TpD/AOJ91KVDNOKg0LYG4UlkrsbSfW29MD7WryVW9X7Gezf829FEQBtzOSsnu2bQ2A8IYBI0uPjAW9w2wYpVYgLiDOg+3x16XZEouu0zjJqAs9CVmmjT7lrh8VYsT3JAdrhPDOnF0jZFFch7FotflVSah6vmy00kre+hfhAQCAgwrRqZg==", + "poststateLeaf": "oAxcYWZIXU8xfDG4osgoAyROyR5z18m6Me4LlcwBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "Nn2tikbbV4mNX+t5FfPtWH+fYknuGvo/rC2dAmatoA6PAOCuRlWM5ogABBwX+LAXjJeNX5AL/L6TRcz4UGNLsyo/1V4ZNQe5+NPQxeh/2470e5z3QsvL27C+ZM7kOHXOXjcNEA7+0ikU9bDXxOIRxjni7KmEW7SGA8XItKI8HV0QlDaW5b9irY/nNVJz5kXHlR1scXVTQrLAjT37kWlvuZd8Ue/5bknPdH3Ix5Gvk9zGpthAmwBpmVAXCF3BWiZtLCJE+aP9tSiWCn+ezb83mmHTktLrar+xWZYXSf82cIIzgkPnPiWL0xz59QV+PoXfh/74/ICRCSyzGiMkDD+eDGdsiu7Qox9KFkLSvxUQ5hKLZwcZIldTMGL0XOdGxRNjx6BQIvzCZ6ns2d3V+pBjm9QUiJiwCasxzExWOU/2sVMPXCaQx0BWZ9W8Su/VIsZsLtYIsNyFs65/OXMsH/3o5/F5zKg1fyxWfyFki2WmBP2LJpelqFrWoKe0UPMYCsVTuGkgOQtJPFA5f/4VKNdaYXguvCoC0DDx1cn0qcuaKGvqW2MsuCCteT8tBBy1x9KLivPmSxMJrgvHAi7rQYOhovkPdB7/", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x209bb0d076f9195391f7ed900a4d8c788cc9bf114a1c0c467f5555920b061a70", + "0xf5b1c844495f0396f5418e9eb4e487d26f70f0e464c83670e8efba62f2af843e", + "0x6555bc7af8c1790ace374c7ae84e169d4acde81319606c6ac37fa846014e2ace", + "0xc2e9d0bc1c81d6860da30267bf89552a542ece90ed5aac3cc6da5bd1ee114aa1" + ], + "prestateLeaf": "i2cHGSJXUzBi9FznRsUTY8egUCL8wmep7Nnd1fqQY5vUFIiYsAmrMcxMVjlP9rFTD1wmkMdAVmfVvErv1SLGbC7WCLDchbOufzlzLB/96OfxecyoNX8sVn8hZItlpgT9iyaXpaha1qCntFDzGArFU7hpIDkLSTxQOX/+FSjXWmF4LrwqAtAw8Q==", + "poststateLeaf": "1cn0qcuaKGvqW2MsuCCteT8tBBy1x9KLivPmSxMJrgvHAi7rQYOhovkPdB7/AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "sKUc0bS3vxziYuLSVP+8vjktAy0msieRxuSg0iLcAUuY03cmnnGtdmwOKwfuF2sHLGMDos8IwPbULHApG/i49Y3JAF4iRBWmppjET9h2cKwXgF83mrzd7Zd1bh7f/nz70AGPgfwuRVCYPRruYO36Lbynwg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x932653825685e7a4719f711e5de643e9325e3e584f4b1f4f173a1d8914f087ef" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "sKUc0bS3vxziYuLSVP+8vjktAy0msieRxuSg0iLcAUuY03cmnnGtdmwOKwfuF2sHLGMDos8IwPbULHApG/i49Y3JAF4iRBWmppjET9h2cKwXgF83mrzd7Zd1bh7f/nz70AGPgfwuRVCYPRruYO36LbynwgEAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "iCvCPAopIESMi+Hl25arOaBs3JhDP9dQJg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x2a05292281cbe8bb141e10a2199e10229ea3b1c049f9c3837090109a6c2c2ecc" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "iCvCPAopIESMi+Hl25arOaBs3JhDP9dQJgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "0Q32kVEATjV2f/Cc0dbx3q+SNfict2QIVg8omyeC1HVF5Q203mKtvn8AH+QrgdMblbSwlGNS9QzCnqG1nnL3RER16Nb8DP8e9r+iLNtBeOFehZTn1KN/1ijJPHZNvJrvDwM5dwh9EMX1ctTG8Byoc3UG/+gZR+gmEQLxO0nQcsgHPzt6+ycmnOxjSEXOzCPltEAzAL9hkcz6fq6+m6TxC88wYP9/bW9QaZ7b0zX1i8Uw+mV5PXl8wnBdOPjRvVAQORtgrMP8VTQJO22AuUwSkz5x3amjKwSzzck1OZhpkrCaUDfRCHkMbntJTG00XNTJYzW2nJEU1Fxt6H1106wqX7MfYmWGhyQFHbz/nyu8Ry72f/4k6AuM+mtv75ST3UzpuQsG3OAYiejOiNM8dBoNP7jlU71SX+CmyGEPUP8OZ+hzY3JGIV7R/gYKIlf/EUCh4Ia12ulV1wQYy4WXCimRZeb93WsDmwaQRPzLwPDWqVmaiabpIgu6YkxTnmKs9fjKiCfVk3qn+P0ozfesw2wC2ZzlwfEu1/wqaaI/D/g0B3w5XkdcCj0jcynsocZxcRKD5K0xHtsk1fW6WBaJahzdCbze52YfGEd6pjdqIkC1pWBNCBOzzHxdAdy2uLkO06VLUXrfqi7NhGFx/A//0mAwrYBvZg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x0612e664e63ff3bb719e78e5b85386d0dec52681cfaf54d349d977c8a280e4fb", + "0x022de869c20a4d79405b04d2e0a220a3221bd4a6b4c4193470167a01e38255d4", + "0x7c6e00e7a3f0b65becc63b04d3666ce3223bc97cb3cdd9b5a541bebc2d6bcf39", + "0x5d9ccd2eecf274c471a3402b8f702ba8da1f9658c33c218ee6415ddbdb6b0fac" + ], + "prestateLeaf": "9n/+JOgLjPprb++Uk91M6bkLBtzgGInozojTPHQaDT+45VO9Ul/gpshhD1D/Dmfoc2NyRiFe0f4GCiJX/xFAoeCGtdrpVdcEGMuFlwopkWXm/d1rA5sGkET8y8Dw1qlZmomm6SILumJMU55irPX4yogn1ZN6p/j9KM33rMNsAtmc5cHxLtf8Kg==", + "poststateLeaf": "aaI/D/g0B3w5XkdcCj0jcynsocZxcRKD5K0xHtsk1fW6WBaJahzdCbze52YfGEd6pjdqIkC1pWBNCBOzzHxdAdy2uLkO06VLUXrfqi7NhGFx/A//0mAwrYBvZgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "NIvaQ23RKSRtOnN9vQ/eEvXlDlYOTTy9STSkj6bTYU9dIQEZy8rHsgD+UkPF8anrPL0ZUdMQ5S7mAa0VB4/vDfB8Kl9H5j3jISJWQEY8HaCuc9JsS1PyLxROveE=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xf4fd54da2d9deebfb13c263519bc96a75981aec25cfb546c9de7705284ae6c07" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "NIvaQ23RKSRtOnN9vQ/eEvXlDlYOTTy9STSkj6bTYU9dIQEZy8rHsgD+UkPF8anrPL0ZUdMQ5S7mAa0VB4/vDfB8Kl9H5j3jISJWQEY8HaCuc9JsS1PyLxROveEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "WJq3C6/ENKsK+af/2LODE86sRiOi2eX+Mkro7gDG2NdmasXjRH0RpFhKyL73WFEeGYbAaO5f4E/zHCrXGfYZyyarF0LZHNBq8TkvrsojRRhVmGbnn4axeDtUdJq+Hd2eQAem4mgKo6UnuY8aqizBlyJ58mR6KFSOG093KapJ55umKtTD1yvwroBss+KQfT2ZKYfEKDPK1e+iASvyeaYcR5lACt1pUMbnoDjmgg776vW0lITGXIijVQ8U7KWwrKi5k0qgxcpJgEAB5IHQUf4z/hWlLWCpvYlKmVNDKQVVTkrXEXArYXorV/SkjMijQrfZT1AO1e9Z4WDXVJ2l5g9KQVfOpuveOMiFV3/wuvRXVf70bAS+NdJKjq1ybTnauagyZcmMDdkgwZNikg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x9410877f4fdd218916c30132d71ad536abd51dd03b167c8883f42f0859db47f8", + "0x526b5b02a339336494f78a1640c5ba2c7d7c4c6c710866660dee1fbd740fa3da", + "0x696dd9436f075634f1169564bb051a3f285ddabaa3e849ac8b8793644311c0cc" + ], + "prestateLeaf": "gGyz4pB9PZkph8QoM8rV76IBK/J5phxHmUAK3WlQxuegOOaCDvvq9bSUhMZciKNVDxTspbCsqLmTSqDFykmAQAHkgdBR/jP+FaUtYKm9iUqZU0MpBVVOStcRcCtheitX9KSMyKNCt9lPUA7V71nhYNdUnaXmD0pBV86m6944yIVXf/C69FdV/g==", + "poststateLeaf": "9GwEvjXSSo6tcm052rmoMmXJjA3ZIMGTYpIBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "ZWavHk6IjAS6FtSg9Qg0/c66sfYSTd8ER1pOY+RfR4csyVA8hTFGr6qhOijQjiOzhgUyhZ5Fh8qz0bHMVcxradAbUTZWgb42FrrLUxQptkt8dXZL/Q6phalrTgbWQ5+96ibUGN35fp7PGywHBjlfua4R6/PMBh+hB/8e+Yw63JIZEo2cCYkDp8vMVsIAXkcXD2N8EnIogwhabH3m4XzTvg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x69f77a980ff49e17ec1d6f7b5a139c639ee8992e0eb984828230ef8c053ceae3", + "0x9cd3406f0032d6c07706ebf760890230207d0450a82a711a4d0d927f58dae176" + ], + "prestateLeaf": "ZWavHk6IjAS6FtSg9Qg0/c66sfYSTd8ER1pOY+RfR4csyVA8hTFGr6qhOijQjiOzhgUyhZ5Fh8qz0bHMVcxradAbUTZWgb42FrrLUxQptkt8dXZL/Q6phalrTgbWQ5+96ibUGN35fp7PGywHBjlfua4R6/PMBh+hB/8e+Yw63JIZEo2cCYkDpw==", + "poststateLeaf": "y8xWwgBeRxcPY3wSciiDCFpsfebhfNO+AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "9B9WjYRHM8VA/jKZMRck7bmaKpBidrMIUDcXfze1p7pTFpDN2z4ypMC/qtKkHTVCKFxR9Nb3OkatpLc7L3Zb3mGxgRnL9Z3oviZBQcOwAlgbrnEDnMStOStYHz+FoFWIcvBTblRBdviyQP/eD548mirFH2ldUm/0OVk1ESw336R2lZjz+f2puqfz2eRG9az/4C0qY2FYH2KMzrsdM18sGfOZjf9ptIS47Q8BccZJm/QshiofxA+lGzjI16iJPdAEKEKn+rZ2mcgU+DqOoZBXLqM09+5xW9oIMnAHrN8JLC2xC8cIExVAjFvPK7ePTMIn8mC958AvxYS9H6fmocZqPqbJp1FH/8H0NzNcZvyIFYMNHD4jfX3Y1b6Q6FAY/gIpmZ91PyeOmoNHiRPgGiCvsBULJPLMdSr1iCgeVWwZ/ZmCiaKopqElo9q1BAPaLLBzLm0Ev/1CjQlp4tXvKcLtgcvmU6/7N7xQ4qE7tLEc1q62XUjXBgPorL+HyK52mKN9Ezuuey0rmrtx8qClcggg1LL+yAk2Vw25F4uwRHc9/jbv468ejPRGc/vOgU7hpqi8q5aYtsKcP4DB7k9+DbwViyVHOuYO+BKXYw0SAKF0cptWjYXzSCYNabym/jOMpiJpAF4OzH9acQGVFOV+fIEEwLp/GIPvy3/QAYsoKrnkVq/2dznCLVdLMYZuFdfWd55T", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xa9075e08f50b532bf7d6b052d069f6912a004996b9e8267427e50051abd7eeeb", + "0x622ef2b8079f03d64180a9a075edb64a30a6749de2ca75ea95f7aea3b73036b4", + "0x28ad78f57bf70f4a719bf2425675f92b0a455058db0ea2d80758ec35232f67c8", + "0x0481c175877e232f0950033b43560e873846692d8680038ca0fe78edd4f99300" + ], + "prestateLeaf": "DRw+I3192NW+kOhQGP4CKZmfdT8njpqDR4kT4Bogr7AVCyTyzHUq9YgoHlVsGf2ZgomiqKahJaPatQQD2iywcy5tBL/9Qo0JaeLV7ynC7YHL5lOv+ze8UOKhO7SxHNautl1I1wYD6Ky/h8iudpijfRM7rnstK5q7cfKgpXIIINSy/sgJNlcNuQ==", + "poststateLeaf": "F4uwRHc9/jbv468ejPRGc/vOgU7hpqi8q5aYtsKcP4DB7k9+DbwViyVHOuYO+BKXYw0SAKF0cptWjYXzSCYNabym/jOMpiJpAF4OzH9acQGVFOV+fIEEwLp/GIPvy3/QAYsoKrnkVq/2dznCLVdLMYZuFdfWd55TAQAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "owc1BQ5UPycfsH2vPF8uvab49MBYxShWUNX2a3eL/IhA1Z+4OVXjj2pHNa6ojDeNmMh3hF6Y9g2lfENfNWYlLjFyB5FtVcQH87J/qQNHFW6l/WyUxD63YanQRZBs+t6MhyYHOSjfFdh/cGfXRgq57jeeQFlaVQez00hKrWKP3j5HPd0xe8ujwj684kT4aJs8024CHmzFG8YaR0N12SEjYZ80GifHAu8Yr16pfTezBBtlvrnwScH53fpJpRhECOrNZYM4anrxniTWCr2In03wvwGoPiYyQBWIrdQqwSL77qd7/xr90Lgx1GN3aSrr5T4Os4eI7c3lUz3dBm5u2fSWCORB3iRWTYeRLL7+CxX0aVv3FCYP2ILFpBw+ARsNPF3ixCXGPMup8sDUWata79q06TSKNJkJB1kYtrLq7yDr8shi4RGVxeDE9XvrXCKyAvPndIVWyqGzBU6RihuN+AZXtIlodwXnxBOT2m61YYO7w0Md61Dau5BeTAdqcxjTEfI+YCbSztCVsDGPRg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xcbdf37a7133ed5a200a3fad2902cd9bc1c4dec85d29cc1a83867217940d0d114", + "0xfed99f21455221b33517e24fcd4c26288a62348960c6da111ac49e9059aa2b1d", + "0xb8c1db5a6337825b6233ce1435766d364a7f2f6b2dab42ea06b24a9af485a916" + ], + "prestateLeaf": "PrziRPhomzzTbgIebMUbxhpHQ3XZISNhnzQaJ8cC7xivXql9N7MEG2W+ufBJwfnd+kmlGEQI6s1lgzhqevGeJNYKvYifTfC/Aag+JjJAFYit1CrBIvvup3v/Gv3QuDHUY3dpKuvlPg6zh4jtzeVTPd0Gbm7Z9JYI5EHeJFZNh5Esvv4LFfRpWw==", + "poststateLeaf": "9xQmD9iCxaQcPgEbDTxd4sQlxjzLqfLA1FmrWu/atOk0ijSZCQdZGLay6u8g6/LIYuERlcXgxPV761wisgLz53SFVsqhswVOkYobjfgGV7SJaHcF58QTk9putWGDu8NDHetQ2ruQXkwHanMY0xHyPmAm0s7QlbAxj0YBAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "YmDfFJIJu992WScmzsVyne6MajcLV+PAuHQjjeqJffVgEfJQF5F63v7SVo2qm3F70XGO6LGinr8bUrXd/PKZp75uTG3THIudOg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x759c2672c3bad10c418cbecf4d7562dba49569dfea68ffcb444d7660591e2d7d" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "YmDfFJIJu992WScmzsVyne6MajcLV+PAuHQjjeqJffVgEfJQF5F63v7SVo2qm3F70XGO6LGinr8bUrXd/PKZp75uTG3THIudOgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "TlqhRie7Nhwexw1cmkr0hATHvGmCOnB8i9ftoZMSzbAS/165lmc0rlMZ447NSXcz3NIe11btVQziiaCWa9v4EOJsrrN6b+TPcHao380s+qpWskkRjkxb8iFLIpgiOnty6/uENcyPjlVG3Ll+MT+ICas+Xua9cQweWhn2NrOWU5Q5rvRJu/YiUAzuyE1m3WCNR+VayoqtOYCot7ehD3QBUyHx/zPXSgLiYoFNxdEfmm46AEeXjpZoCYvLm2CBALOt", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x28c1a0012cbe44f3fb5ed2feccbd1fa7aae1f76234c1a8dc19ef694bad386c55", + "0xcde2358bf74c4718abbb00ab46ee37162b743cb879145dd598861253bdbd607b" + ], + "prestateLeaf": "TlqhRie7Nhwexw1cmkr0hATHvGmCOnB8i9ftoZMSzbAS/165lmc0rlMZ447NSXcz3NIe11btVQziiaCWa9v4EOJsrrN6b+TPcHao380s+qpWskkRjkxb8iFLIpgiOnty6/uENcyPjlVG3Ll+MT+ICas+Xua9cQweWhn2NrOWU5Q5rvRJu/YiUA==", + "poststateLeaf": "DO7ITWbdYI1H5VrKiq05gKi3t6EPdAFTIfH/M9dKAuJigU3F0R+abjoAR5eOlmgJi8ubYIEAs60BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "gKWk0Np+a/dx780AIKKk56s351Mse9BugFfE0VVhqhSwR4NSyqoHio77lhavHgnRAcMLLJXhjTAIZ4lg+0+L0iN2z4jTg+SfcDe5rBLN5E16a8c+0+ezxQ/L6MWOlfp4sHHIcBo/HGF0YRiTcx42pBF+pW3BNSobeB3EXPWuKDloNXpOXj0sgNcV2KtBGfEnjcnySaTEqeHtEWDy2o/h5MTc3on1DPcLKihAQLvQ9GLphQ8Vt2kl4qEsyuta6YczVzQUHKy86ft90ksQwq7EAWsK1jyDCbtlgjoTV21ZxW+7ZN4IJPEcSEZ1wZqbvIffvm2S9GyuxTQB+WVI4pfaBuLRs7cTn14F4aZQdoynjIdi1tQnFynhgt6i51GaKbNQaQTennUT6OHHR27psec9grrM4g9rP4iQpwhN3NA0TUkKWewZnF1HLdT8sHjIgcl/m/juYV8qSM8pg10zQ4igltSy28ercGVATyR8I2Ec9TyWJl0k/9Nb4Nbubm1/ihXcKj2M7vrz3qCX0FCZ0rtkV6TJEkH1zzHeNvMHJHs6", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x164fec47c39000332e03169d3f0d740846ee98a365a839fd75dc244d4ebf72ce", + "0x7824bccd36d6084e91e3602f58d6c1326872ffb714e9eb277b6744a9827ee4f0", + "0x1e2bfc61a776eda7628befbc415539254225bb9393d147911a85f3b4f3ac8a97", + "0xe7cdcbf0dad6b72f51f1fee83827cb0bca75da17ff1178abd13ec30c28d67512" + ], + "prestateLeaf": "YtbUJxcp4YLeoudRmimzUGkE3p51E+jhx0du6bHnPYK6zOIPaz+IkKcITdzQNE1JClnsGZxdRy3U/LB4yIHJf5v47mFfKkjPKYNdM0OIoJbUstvHq3BlQE8kfCNhHPU8liZdJP/TW+DW7m5tf4oV3Co9jO76896gl9BQmdK7ZFekyRJB9c8x3g==", + "poststateLeaf": "NvMHJHs6AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "XUhVEs9TzsmGyDuTIzwiSfzBvUfuFL1az6WNUYUwrx1x+5eAlN4vNxu53vMsPyzxsNs5bdR51KrDOC4cMB+ruK+ScuqtroI4AH3HIZT4mJeDOZnQHQXwumlybeeumFxL4MWq8YlFe15lUeRbVLlOXPuvWb7D2K2eGout3LFWEJ2jhfyQKgBiE8bUxTRX0MbJwoyR3LOOraDdtJWnomR5rdDqKwvtSjzBRmkHErNfVngifWgxz21H/JpM/FcEuHVWyoVUON2vMO7VY49DtNzX3mnVODXaIfiuxUx9tNt/qKiru5ds51w/7Mi/4MAx/VQQljOBzukf6fSo3IQR/VIr/oNZD8jS+/rTU9wxcbu6f0GstkKbJ3FmZm2/sRh/tmW3M+aMdHewQWUkH3n/z7CWJmh9Eb+1h6oCQ55we2ZsaxK0DFa+7YB6WpCVPg7AJe4iE7caMvDByi+w3VmbfLQknySPxShXdqCaIQPc53sFLrnHNSQWIium8o7KKU2DB4mPHO3atulp1MwoleLYD8FApfV4Qawoe0/22EGcWrlKFV7X3bS44jzNv/A2PiUUgE/Z34ND+Ofw5z9J4l9Ndc39vG0D/r+nGaL7zaeJ7MV7pQE+xZc6P5K4R1+Q+s+eVh16vsscYybG", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xab67a1d5b80bd313522dfb9ab7e5f5134da2a78e13c4cbd21915014bf2c9f820", + "0x7e524169ad08838f7119b4bd41062fdbbd856fe06ab23be337cbbfa7617c3312", + "0x353f3fc5a7ae59224bcc68262145bb94fb94f9ebed52f9c9dbc5439fab5b1fcc", + "0xc407900eb6a3f77e774a60251244f5e9fae95765de26f48869a834bbe7c5f847" + ], + "prestateLeaf": "rLZCmydxZmZtv7EYf7ZltzPmjHR3sEFlJB95/8+wliZofRG/tYeqAkOecHtmbGsStAxWvu2AelqQlT4OwCXuIhO3GjLwwcovsN1Zm3y0JJ8kj8UoV3agmiED3Od7BS65xzUkFiIrpvKOyilNgweJjxzt2rbpadTMKJXi2A/BQKX1eEGsKHtP9g==", + "poststateLeaf": "2EGcWrlKFV7X3bS44jzNv/A2PiUUgE/Z34ND+Ofw5z9J4l9Ndc39vG0D/r+nGaL7zaeJ7MV7pQE+xZc6P5K4R1+Q+s+eVh16vsscYybGAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "OkvbSQAYhLTM/S40CwkShoqc/doQKJuRniWw25qw0ATH9AGBMrIH4GQmBhBJ21JF+DXKHbaX0hiDUo5sRiyPhUNLC3WiHB58XzKUr2R3TCFy1G8f1ufiJSl0tiVEuPUo7EHVB9J8WXz/O97Icsakb5jrzYiLSdsy0Pc82A/tfTZBPuhC4HZ5jhqVetBiRoW8HdS6/dLSioR7XyXUP18KEn9+GC+/QvLpl/bvVRnnvmXpXM/h68d0+cfI5yVvvKZccaAHZcwmkLSC6Sm39Lh3TTM5ETt9Y2nB1kVD2K+02E7Q6pvC61Sm9sVGRSTylcCCLO1HnCQyN50U0HpJwNkC0svtIOfQCnN25I6Mice/rD0sWPkxaEGTJjnYVLY+14+yEhsSRfJPAacUfkmV4QFhFd1bD+PWmRWe3y2C97zwFp2P6i9WANfG/WV1HxXibjJSasmkdb32mHDBa/XFR+A5uDMdprOoJDKS", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x6a9c620efe67cd187c9f5885cc2c58d9b359246302be95baf2364c508398d59d", + "0xc9f347ba036d7973cd36ee090a73da7a99331f913b962292912b5c64bed6711a", + "0x961ed6a2ceb428015ef02f857d420a6e858daee2ba77a7bd025d9ca785ea4725" + ], + "prestateLeaf": "GpV60GJGhbwd1Lr90tKKhHtfJdQ/XwoSf34YL79C8umX9u9VGee+Zelcz+Hrx3T5x8jnJW+8plxxoAdlzCaQtILpKbf0uHdNMzkRO31jacHWRUPYr7TYTtDqm8LrVKb2xUZFJPKVwIIs7UecJDI3nRTQeknA2QLSy+0g59AKc3bkjoyJx7+sPQ==", + "poststateLeaf": "LFj5MWhBkyY52FS2PtePshIbEkXyTwGnFH5JleEBYRXdWw/j1pkVnt8tgve88Badj+ovVgDXxv1ldR8V4m4yUmrJpHW99phwwWv1xUfgObgzHaazqCQykgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "Dni8xdY2FY53gk916B/zgDq3IuWYwhZ2NCTGNBSp7F4a/peabO3eHNz3/j/lD3a4nG8CIZ5/NeS52OhCAo4ie7iS2SkEt0hLmoXOXHg6WRM7BEFFymqBjnr3R7gj26Uht93NNYK9+UuaW1kW4+s4ZvmyVHPPZ9I4hSIaSjg2r3l3/ZVnCgD640AxwtJL/kGZSjjlU5Snhmkn+X911AofeDbyZB4K70P+9fbGKYsKJCVF7VMP/EBmLNlTryt12Us3k7ddtMF1NJzk8xDamkYrmJZNjf3ol3kYr9qdbd5JkBdOObnc9b5m898vqXQWCmS6LcThuy5+QM0y8lDBEScuQag/xKboULbjwcYBur0Ci5ua+d0H4/xlDmNMI+ruuB+B+G14+P+xPPjK2Iu9b93K38K+yZvTPh21tdvj/4njdAMp2w3C0S4tM/XNQVHZLFwuDR6mMzMUj1M9u2rZNU607qL32thrYE/gDavvDUJ1EcKTtHefUbxPYTBwZ5STNTGImenbSCu3wWdNieRmtujsLXSr6vYgqHtF6K8cyqOaOQwb6D1J/ilxZhD0cNWqQEJOnKjIcw3MhjQjSz13Zqy4Kz4BgfZYiVqfsoMW3sW7233jtPR5tmMe3G2kDppZbMmrlak8ooiKstiYKwMR4XvKKQVJyhjj5M9LheuzBvH8fol9wihZWOBdvz5s3RvqDWQkikwMDHxSrRRkwVO7weEK", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x9236647f7e834b5ef526146639f3e19a91a5e84f2f6a0483b11389a644cedf68", + "0xe3e0db33b1016c4a1e2cd90fa3cdd7268a70a17cfa801b8112bf9da81e9dff6e", + "0x31b580ad3e4eab83a0cefe3b5f580325e02cc9698dfd892c1f89dc881103edb3", + "0x619db25a967f481df1824cba2a713b7f0e256f74be84aa09b1a80fdc51c20aa6" + ], + "prestateLeaf": "mvndB+P8ZQ5jTCPq7rgfgfhtePj/sTz4ytiLvW/dyt/Cvsmb0z4dtbXb4/+J43QDKdsNwtEuLTP1zUFR2SxcLg0epjMzFI9TPbtq2TVOtO6i99rYa2BP4A2r7w1CdRHCk7R3n1G8T2EwcGeUkzUxiJnp20grt8FnTYnkZrbo7C10q+r2IKh7RQ==", + "poststateLeaf": "6K8cyqOaOQwb6D1J/ilxZhD0cNWqQEJOnKjIcw3MhjQjSz13Zqy4Kz4BgfZYiVqfsoMW3sW7233jtPR5tmMe3G2kDppZbMmrlak8ooiKstiYKwMR4XvKKQVJyhjj5M9LheuzBvH8fol9wihZWOBdvz5s3RvqDWQkikwMDHxSrRRkwVO7weEKgQ==" + }, + { + "input": "548ML89LjxGqzaKfXD2+DzRe/n4KnhoXHQQxl245B4A6Jd1kIisZ7ZtlgMFIsrRI4SHYo56gPUeNQKKZS1tnkbhwz4Be7FJgaNhy8qmaR1lrPMmGNG3yK0OZbDoZPWWKEnGYwb0wXIXKykd2OeD+V1FEVIb1varcqD6e31UxjH2sKNfbfb2LJj/OC2L5Lg91Qs6L188Wb63WNt+B8c7ArvbgwKwMLVfFcBwwKJjez6MsuiY7nADjs6i/PhTXrkQQKTF5WWzbHpOtCpBAGEF+TjPD0V3V3eAkz7tcvl1PaAcF63i7f5whXYhhpng/QWrxSKjYBFp1E1fcTATM9+O49z7f6AebERcv34sspmYXZE+Hs6HLE0oMnYHOPUOf91qT29khKLbW2DIYZgd8TR/xnL8u6aLZY3dED9cKpUNtJxX2aLN+2gX+O5rZ5MC8816oBTtBmT7eph2HVrPdJODSGtmQ+QU55DORHur9tss1uf+cy3mKtwY+Y7xDmTsA", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xfe5251befacdee21746ad860d05e7b49c23a954a731a7ae61f1a02e6408e66ba", + "0x45bcd5f51777f3ab924800b6671cd928b96287806ba19fbf9f90d1aa8f50774a", + "0x5d4d865d850c9cea52df84142bc0ee4599e586103df653530d5204e265061ca4" + ], + "prestateLeaf": "P84LYvkuD3VCzovXzxZvrdY234HxzsCu9uDArAwtV8VwHDAomN7Poyy6JjucAOOzqL8+FNeuRBApMXlZbNsek60KkEAYQX5OM8PRXdXd4CTPu1y+XU9oBwXreLt/nCFdiGGmeD9BavFIqNgEWnUTV9xMBMz347j3Pt/oB5sRFy/fiyymZhdkTw==", + "poststateLeaf": "h7OhyxNKDJ2Bzj1Dn/dak9vZISi21tgyGGYHfE0f8Zy/Lumi2WN3RA/XCqVDbScV9mizftoF/jua2eTAvPNeqAU7QZk+3qYdh1az3STg0hrZkPkFOeQzkR7q/bbLNbn/nMt5ircGPmO8Q5k7AAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "ugu7GzhFq70GgrGQSZEFItW0+khpzmPODUEXabcK2KJPgynWcsBBeW0vNfySTjgOiz6pPaoAvoOf/PokycT6t5fd1A8qHbV+yowAzHElFEJXHjUntFCxI9PNJcJo8758kELCR8R9YKBXZVjmF+M87WWg6/g+d9dpj5AL32dU7kQmLkPu5kaxL7hJRTOlzBJQrMNz7G/BWPJJ0GNbFhvrc2YZIL/LGPCt4OZEHFjG90RRXa9w+cWjZHS/QHAGufVSswFDt52O8/XB0KbI2MWpltbOz2+lCg+M6h19Fx0f47i90BlLymMVi5CZRJ5DhlPkuG3KVo0rUrGEcuizcx7HifIbf45PHOdT8HYvxKedmXCq5MsLd1Ohh9WjzCwaCuql9kc1jBW5OzoFWKnaDGlaCyJqeI1lQBz8FqBHYQi5w8DH/m1ZV7T9StMH1qndmcyNJyMlxhGmGU8QmYBR7hnDTnJpeNkRK6l8LEwcjfen1JXhWy78M98bqZxZiArQBmYImtO1YG7LHhftULixizOxezWO9xaSPZBXh5a7pQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xe72dc36a1aa4e46b0ad98ecca057473bbb1cd8af95eeaa11718fde0b652d7c00", + "0xd76c62cf7bfb075bde0ca1f3559e20ea16ff017bf9d8c56dd6a7b5d25b0d4c96", + "0x6e8fb71d0bea05ea7d8426b3169b2ad5c59d13679139ea6cf80c27db4def900c", + "0xff0fda4e3f43d1c1cdcc59bde8114efff8959d1aec78910020892f6786a11f3d" + ], + "prestateLeaf": "quTLC3dToYfVo8wsGgrqpfZHNYwVuTs6BVip2gxpWgsianiNZUAc/BagR2EIucPAx/5tWVe0/UrTB9ap3ZnMjScjJcYRphlPEJmAUe4Zw05yaXjZESupfCxMHI33p9SV4Vsu/DPfG6mcWYgK0AZmCJrTtWBuyx4X7VC4sYszsXs1jvcWkj2QVw==", + "poststateLeaf": "h5a7pQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "iO4gx/gywUBIM+L59hSKkEPKdj5hVnhQGPcTRscZ10xBbdpZW6xIvZtsCGxmFXa85MP9X37X7ysMtUvspHBYeU8sVGl0GbGb3oxD1Eb9JBGDs1BGPnkDqWeikVfMEaE3HeqrDaxHhGOOLLCT2kM+mWKhAM1NlO4CD/1YuITSyfPlygI0bP5NuhjRe46vVaJpxa0dbByZNvX9dWqk6TQ7hb6ui5eOhtjeN0nw5rxyHPZXn2JTBZGfD0WRI3vDi+97DxziWq/Hl7xJUql1fYiq1X6BYevrMcO16ORcJUY9Q9LtLw+aq38xbQCrtYZv9PqjHjMLG38Lj5pqFi+PiacaDEsWYHk8JmC6VWMkVGP42uWncKlI4vBO+S/7G5VVuJociI8Bmu0Io0Tr7S/NaERIqC9y4SSN7UPzqtpHhQexlfYdnl41UsBzTrDwhdoHyDHuDNnRRB7vTIRnV1ejNX3F72XyrhB2q5fWza2ex+Iz9op7ldop7x3jJjl5PVSmPP0a5QhOW6k+TWtDrujqFaqk9Vuz9Qq27bj51nLNzbMzM57kupnIKNrHr3Hr/W8FdqXmU8UWCI5cX3lBxg/ywNVpvE5JV3C/vfXcfS5vFm4zJS5Gbg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x3465128281e0960e6d11814935dece2d58fe79fd3ae0dc9aa1fedb23190adcea", + "0x956bddcb7caf1472e24289ec253118a9a946cfec3faa43d9cdfb3583c6428c02", + "0x256986bdf9066db60f75792c65aa192e846df14a3478a7881238b66b458a81be", + "0x9db4b75e27b3bb292bdd450030a1b43d0f6be658fe6d59c711f534d97afa964d" + ], + "prestateLeaf": "p3CpSOLwTvkv+xuVVbiaHIiPAZrtCKNE6+0vzWhESKgvcuEkje1D86raR4UHsZX2HZ5eNVLAc06w8IXaB8gx7gzZ0UQe70yEZ1dXozV9xe9l8q4QdquX1s2tnsfiM/aKe5XaKe8d4yY5eT1Upjz9GuUITlupPk1rQ67o6hWqpPVbs/UKtu24+Q==", + "poststateLeaf": "1nLNzbMzM57kupnIKNrHr3Hr/W8FdqXmU8UWCI5cX3lBxg/ywNVpvE5JV3C/vfXcfS5vFm4zJS5GbgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "VLPu7ILgq9sB+QMzMPf+iYHuoTT2uKjjcs3QJwJGZOlTXBqUZClVaii3BV/rI/iDshbTrWefG4rD5CaLBqcdYTM4JXt1DnnK2S1C77rFTkM6dkN+q2juKQJNDIxGDxcgztKPzmksK47yLnCwxEDbA9E5mKDjnBwCcnfsqbKjg6o2/9LJu1xlj0q0urR3TMCBl+/FaIJmiQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xbf4fd89b077bf39077318ff9f2eb126cb04c2b3ef6f9f3f1bfb2204e367f731e", + "0xb669198808b22cc2e730d88fee059c96b9d13057b509b4b26cb625a04e75432e" + ], + "prestateLeaf": "VLPu7ILgq9sB+QMzMPf+iYHuoTT2uKjjcs3QJwJGZOlTXBqUZClVaii3BV/rI/iDshbTrWefG4rD5CaLBqcdYTM4JXt1DnnK2S1C77rFTkM6dkN+q2juKQJNDIxGDxcgztKPzmksK47yLnCwxEDbA9E5mKDjnBwCcnfsqbKjg6o2/9LJu1xljw==", + "poststateLeaf": "SrS6tHdMwIGX78VogmaJAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "iaA3KhRXBhnFjkR2szfpxFslTINxAiauR6ZS41N7Tyfnk5iczJHJmjmPvWLUVbsoFWwbBvGkVPPJSyYV8z6Ll+CzQJmR", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xe2a777a8bffe0bbba1ad4d89515f6eaa9440c7d843b5543ce53cf15cdd7dc4ef" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "iaA3KhRXBhnFjkR2szfpxFslTINxAiauR6ZS41N7Tyfnk5iczJHJmjmPvWLUVbsoFWwbBvGkVPPJSyYV8z6Ll+CzQJmRAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "X/hVerdH6C8Evfr1bjZ7xBxsFQN1t/PSrXv64Au/vEZMz68f9T7p9OopxElulNEjA/UlijpVs0H81HF29eAyyfqvKojBH6dhCjhMij2DErMGLenHXanHzN6u2LKVv1o1+tCusG7Rw6bzbTsq0Ee95bOfvzfO27/kpJtBFN+5rhm01rez85tb1bEmkgx6Uf18hKF4WES4zUmdE1MEhcClMJEfQy6ZR8NA3x1qRYhUq9r3xmhN1+vFC+YDX0hdTWuCJg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xead8ca105740b9ddb7988299eb5f06b4dcae604c5b0d08a2cdc8695be973f1f9", + "0x053b8f8c5638392c64f158efd55ccb268671ff1cadfa6b919f23f4c58722ddbe" + ], + "prestateLeaf": "X/hVerdH6C8Evfr1bjZ7xBxsFQN1t/PSrXv64Au/vEZMz68f9T7p9OopxElulNEjA/UlijpVs0H81HF29eAyyfqvKojBH6dhCjhMij2DErMGLenHXanHzN6u2LKVv1o1+tCusG7Rw6bzbTsq0Ee95bOfvzfO27/kpJtBFN+5rhm01rez85tb1Q==", + "poststateLeaf": "sSaSDHpR/XyEoXhYRLjNSZ0TUwSFwKUwkR9DLplHw0DfHWpFiFSr2vfGaE3X68UL5gNfSF1Na4ImAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "CeiQEBsTzUss2uuNs6R4kez3liQg+cPLxmbI4+TE19MAHKQ78tPctAoZ3otdXalXdNWwpX1JnWlFGINd+p0g99Vbk718CS349DY5mBwMh11sOmiSRMG5TGE7d9XqSIK+5X8II7vMrcikfvhImOj1+qcVpMYMAiCceIfbnqAeS55Av+SZSDYWUnrVVZ/UBJcHeMqLmFGsJnyulwQIAvPbKPO+qY0IV8LOY8tA6NXF0aIWaSY9izGrb3pvEGOO36uaDzoYEJ6QbN9BCKUq18LtmfjqrEWfl4exdt/twKzFRWcf5MsKUkaBQyFS7mU8Lp49hs9eELETw7Tv67y+QmsWquDOoFNbGRzsclXtkoNTGJOASap3eNKvuLb3DHd5vUidqU0+6/P18dyGjJ+0JWqO", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x8cda77b99eb425ea9850550203a6525ecfefce976537ec8841cd5094ea3f5ae9", + "0xf7cbceee65f235eec33ca84440f300aecc1c83e265086d562c226065939f1b0f", + "0xf3db4825d2244a11f679202c1b476faa6467b60faeaf220b760c90799c4fc041" + ], + "prestateLeaf": "etVVn9QElwd4youYUawmfK6XBAgC89so876pjQhXws5jy0Do1cXRohZpJj2LMatvem8QY47fq5oPOhgQnpBs30EIpSrXwu2Z+OqsRZ+Xh7F23+3ArMVFZx/kywpSRoFDIVLuZTwunj2Gz14QsRPDtO/rvL5Caxaq4M6gU1sZHOxyVe2Sg1MYkw==", + "poststateLeaf": "gEmqd3jSr7i29wx3eb1InalNPuvz9fHchoyftCVqjgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "T43pAryJO7P0xU/AXcqoGcNUCxUvdZ+t+IhKI6wU821towh4/f3ykzeiOyIf4PpQ252B5CjHQzkTf4i6Tvayioo+ZwvFl7Cbt42y3apjZQExrwpCsb/NFfmw328/us91lMipJmcHox7JPPQKibW8KFa6IhseC1msQ7h8V1c1LPnr+TE4NrsfTcI+rB5mSS4YG8F+Kpi+S5OR2C8qx4NbZ7IWIpIZIxKFV28xTtl5LH837MZqYWQigiZexEpvRC4Cua6WTstsNUihbjjElElZPdZhFFLwvpG5uDUEnyt6fm+mJZkLDJG52stPIOIi0YVhkXLVln784zUUXJGWDQGg+ub+vEh2zJ+BYYsUeHUjtnfclH/s66XWtOkTMCs+rt0Kw8lJ4SX8MFDb0yjRt4HhmhyVNfU1VOxkm/Z7ad36ycQ8UOJp+qrqPr5Rk4y7RqJzKj/Fro8mJu2lanKbeYLIPsSgLwrl2rPhbUal7rHstS0UzWLxqPE9EmKikDJ6OJmSQpfF5GOuhRGoWM8NVwwt612f9xEMirXrzUzbNPgBoUibr596HzTr/m4H86gu/wVTNK7v0xMEk//6yOIKy4P3CzROI5jSHTECoNN+8Zw2rHuxctHl/rYic+4qaP2EYNKXF0EV5cJdIqvgApxbz2abAdC+Rqw5ZNYQ+O5JDy4/y7O6WB0vgsx5XXdhM8S+0gTq", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xc5f4a649be4a2f0cc9248d8ddefeee3ab3220c4d063a611eb7aad752cecba852", + "0x325134ff3a1fb7cbea02c70bf9fe234aeffb339e6c72f355f307c3f2464e9154", + "0xc11dd00fa0ecb786e18159660c847a90fcdfe098738e2b15996b2ccbf7960b06", + "0x8a645af9f6900d64a16558ad878421f6557221476f78b44ae7b03e3d2e0304dd" + ], + "prestateLeaf": "3JR/7Oul1rTpEzArPq7dCsPJSeEl/DBQ29Mo0beB4ZoclTX1NVTsZJv2e2nd+snEPFDiafqq6j6+UZOMu0aicyo/xa6PJibtpWpym3mCyD7EoC8K5dqz4W1Gpe6x7LUtFM1i8ajxPRJiopAyejiZkkKXxeRjroURqFjPDVcMLetdn/cRDIq16w==", + "poststateLeaf": "zUzbNPgBoUibr596HzTr/m4H86gu/wVTNK7v0xMEk//6yOIKy4P3CzROI5jSHTECoNN+8Zw2rHuxctHl/rYic+4qaP2EYNKXF0EV5cJdIqvgApxbz2abAdC+Rqw5ZNYQ+O5JDy4/y7O6WB0vgsx5XXdhM8S+0gTqAQAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "E8ftDm2vrPzfRJ12Iqh62rOORhMZ1BZOGxer0msLFmHUFVOorUGWfbvJHZFHVP1oRGbqlHdr05MpakpXoi+ZNSubKu/zPEAvi0veKZAm/bDzpR7rvTIaIs23pSnq35DfjT/0Xw7ksNsVlu6ZfsS0FPm26RcuOPBiYO9V8/Gq6aAvhSwDmuV33ntGNRwuCm3CZOuA72AwMJtl1LKWVsk+IV8VfVXwitp7SvDDT5e0m970/UnG/o9M9RjSRZbTWLeZfcuYH4iEi0GvvZhywJA5LIn9qg8ixhrrxbO4sl1Sto+BY4BfA4M9/bDyONN+osuVlnh5R9ovFdSqyH+nqAve6lWntoap4B1Dfgu306E7gHT1q2+9HCGrQs9puzi02qSgNCfj+RQHsdGBRgzrruXUjKx+PZV74+Wh3IwPCCwTvQvYkU/Weu8QZoP/bYpgdBJ1AVtiVI1/RTOg", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x27a30ccd006cc8547173c85181c75f532dd71c50fb6c7e57f1da58b0555b0546", + "0x540375311f1657dd7140d32f2e60d54e53b335ae54539ba511fda8d8c57b74eb", + "0xeb893e6229e028b2c510d874435c751039360b203368fa4c75114cfaf39b3c8a" + ], + "prestateLeaf": "e0Y1HC4KbcJk64DvYDAwm2XUspZWyT4hXxV9VfCK2ntK8MNPl7Sb3vT9Scb+j0z1GNJFltNYt5l9y5gfiISLQa+9mHLAkDksif2qDyLGGuvFs7iyXVK2j4FjgF8Dgz39sPI4036iy5WWeHlH2i8V1KrIf6eoC97qVae2hqngHUN+C7fToTuAdA==", + "poststateLeaf": "9atvvRwhq0LPabs4tNqkoDQn4/kUB7HRgUYM667l1Iysfj2Ve+PlodyMDwgsE70L2JFP1nrvEGaD/22KYHQSdQFbYlSNf0UzoAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "aCjoLgKIwqWlW8lhM5bxPyX5Qlipms9rSCj8uLfm6YkYbudeyCaevudHxjcCFWDzmNN4+j/6iNI2dbkQpmrulFvviU5Vn2WlmQSU5D0PvjysOviISM8Tz4PUrdvNxUfIfczgIyFhAG7hKUhi56PtZ9qkYw/I6paRhex8sVqmuei89vswG9SbLkVYU7iOOU/GgeNCfZSM", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xe9a9ed50c10f5da4e10231ad4122859ad7a4849aac33887fd6929feebb34239f", + "0xccddec19638af750e06720249e99c099c6ea622134ec8a20503013e10d2fb163" + ], + "prestateLeaf": "aCjoLgKIwqWlW8lhM5bxPyX5Qlipms9rSCj8uLfm6YkYbudeyCaevudHxjcCFWDzmNN4+j/6iNI2dbkQpmrulFvviU5Vn2WlmQSU5D0PvjysOviISM8Tz4PUrdvNxUfIfczgIyFhAG7hKUhi56PtZ9qkYw/I6paRhex8sVqmuei89vswG9SbLg==", + "poststateLeaf": "RVhTuI45T8aB40J9lIwBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "2EWBx+PqSowDuumM8eRw5jMnkK54s+tRd5qlGO6RlsR099f8Nz2regoK81zbOwFnDaLJuWMpSi0lQayfwydkZvMz5Nili00eh1d16VxCuvM4OugCz2QxaikZ3SMHiNcVStO4asCZbN2dcZpihuZSNyVScd4VvLEoPk6EZnWaEeAVV14xjfoPgT2iwwVzmrGk8sqjQJGKeYBpcbkpKa4BQqUsonxMCmTngb9p5hU9fkg4uKhVnTOV+R/1QDvhnpxbIQkSn7/oDGqQZ6H4VbR8oB7kMswhED5dQBFmF/VXvVpdwcF5fj0LMF20QIWzs8cn9o2YTS7AZJtOvFBWxBXjzhsXYDFpRI+uiOdP5lgIGDq5L03HvXa591o71hzPfmjyR8Y=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x7261444591a7e2089579fd29293a60dd750951cdb884ba12c741a159b8682d4f", + "0x7a9ea07151ca7ca8cea49e9831f91986136303705ed5589d03a9b3101e569b99", + "0x5e8282b46e4352cb1e56e9f5a0a08ddacb112cc8e85192ed906ab67445235202" + ], + "prestateLeaf": "PaLDBXOasaTyyqNAkYp5gGlxuSkprgFCpSyifEwKZOeBv2nmFT1+SDi4qFWdM5X5H/VAO+GenFshCRKfv+gMapBnofhVtHygHuQyzCEQPl1AEWYX9Ve9Wl3BwXl+PQswXbRAhbOzxyf2jZhNLsBkm068UFbEFePOGxdgMWlEj66I50/mWAgYOg==", + "poststateLeaf": "uS9Nx712ufdaO9Ycz35o8kfGAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "wAA1Mt5xqAbr0sTMUomvimo+WbCHDEiS+pWu+eQGgnXxDWmOmuVqwaA5N4fMefoy2e1ROvyuStbgImQ7TRSaVOJQjT7wgEjoh8K5//MlSbuNFQZnWsAltKhlibYqDV3dl+gUdz3ya0wj2Rv+bp5CgQ1DdBcct+uPD1AeLHWzE+iHDpcTUpyvZ7RFw5ZFcM0zVLObx1UDl1eTmQCWqJDNjunwTmd+msSNX7B4ot6XgR1ox5sBQyqCSLbZ+VeN3fuHzX659WQbXaYGpBE8/xBE9iYBhRMm9jYGY8ycDQVNh/5WYzvB5BX/biu0yv0M9bW/cUd7FzYZGhgVuYznR4Qi+G+HkQ7dncKk5e5PqsduTi3iA6UEOO9Oqpg5jH1MkUoi5d4yrxcdjdAuQdM/J+UQbkQ7CgH+IW3R0wZivS8UepXi7rLazScJKHL/+nZgtI/IRFl6GGvPOYUXmCmWRwzMiON1KXqzon3mKSaIb35O+uJwreK9UjkijgJJuZlRXeubMQD3I+9hSQSGrY3Zm0sVtmR2a8nboqiS0hCn9zwq+NfBnzGr2Tn9lG302auqr3hZ3ESfSYpcrcyD+yu+dRhHwIIHYLbl+Uydkc0+/llyEmEOg1QsJuHac7HPeXEX94hDhKLVey/ZVYLOe+xYPhuLilzQwqt9AorjFGtsogEWmSaO3sfjj+pOiYg=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xeed8dddc6a60a05bb1ba49d0bd7b7aa120ea8ee2757e59e8954b71502d83e295", + "0xd9e75a98f4ae520b38436ca9bf464d6f26768bad4afedcb9f7af183eccce01e3", + "0x1a7e9bdb037a90170457843bfa64b4e1442f83c1b383bd1f31878599d8d6250c", + "0xae40f2bf7d42e665ab60a5ca75ffa1515014aefd3c13f1161cc432e5b6ebad27" + ], + "prestateLeaf": "4gOlBDjvTqqYOYx9TJFKIuXeMq8XHY3QLkHTPyflEG5EOwoB/iFt0dMGYr0vFHqV4u6y2s0nCShy//p2YLSPyERZehhrzzmFF5gplkcMzIjjdSl6s6J95ikmiG9+TvricK3ivVI5Io4CSbmZUV3rmzEA9yPvYUkEhq2N2ZtLFbZkdmvJ26Kokg==", + "poststateLeaf": "0hCn9zwq+NfBnzGr2Tn9lG302auqr3hZ3ESfSYpcrcyD+yu+dRhHwIIHYLbl+Uydkc0+/llyEmEOg1QsJuHac7HPeXEX94hDhKLVey/ZVYLOe+xYPhuLilzQwqt9AorjFGtsogEWmSaO3sfjj+pOiYgBAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "Qvy7rQFYakrZWqUKvpTKDtWYg24HYfPSRdE+KrTXeNPq5K1J/7VvR38m6DuUtaVhVcn/d6QRQRRS5RTIDLrUQ1crIWaQ8XmMqNaFKZlBytIGvLpdDWuJ2XydRkPYYChqnH+nSpvJNfAYYKK4K4UY1JNXPjiAOP0jXvWi3oOwEz3brl84/dFPG3IO3dVHJtSzYF63YZ7UXlpJV2MP9q84GwbKirkZthc5IFv7BzOX2AXNIXNDEVkMz1oyytJGK2mlRKLNljapNmo4eAbzHKC4bF4T4ah2O8wpPiUXoWlrQex7wuAnBYGdLS6l3e8mE+Gsd0mEW7vFYKotLAepsHwbPwJ7ESGmDBxbMzicKswWKWyexEL6LpwqcFAcc6Gmiho2D1BOOqpKaApZnWowBk8HFdA2EPjaXycGIxhyftQtMV019XX6hzz93Xt4d4PS0fWfj/ViuFopU/JO3cimvzHqo7tgP8LkiHUlNDF3xnzzn8OpVtvkB7rKAx0tMRWtG+TzvBKKElyr16cYQUX+wd2/u6QBIIx45Vz5z/GzHRXP3nCuFwxNQKhXr0v6KcnGW2QfQRX2LN4PPSk5znTS8Q2R9SgBu4Kjq5sKON5zym8LW8fKOH0Nmheb1Q==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x60c9a594932bdc77815923284197c8032d96af34174bbd71ca0373b4ff58f678", + "0x945dfe1b90dad1ac86ddf5b8f27d8a8faaa6ba396113167510fb60cd088ede07", + "0xf52616b7f35617b69013c8a9485507bda0753fef7eac1b1bff8d93574de672fb", + "0x72ecc86dd75d9c5c7b1f964dd4a664a8a258a34d0b11f2d97dabaa5d4bb8ad55" + ], + "prestateLeaf": "nsRC+i6cKnBQHHOhpooaNg9QTjqqSmgKWZ1qMAZPBxXQNhD42l8nBiMYcn7ULTFdNfV1+oc8/d17eHeD0tH1n4/1YrhaKVPyTt3Ipr8x6qO7YD/C5Ih1JTQxd8Z885/DqVbb5Ae6ygMdLTEVrRvk87wSihJcq9enGEFF/sHdv7ukASCMeOVc+Q==", + "poststateLeaf": "z/GzHRXP3nCuFwxNQKhXr0v6KcnGW2QfQRX2LN4PPSk5znTS8Q2R9SgBu4Kjq5sKON5zym8LW8fKOH0Nmheb1QEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "8DTi9g8t1UVGL22HRPz6nazHrn1f/hKJghZqGDvIVodeI6eld/nUqc4EUOgcn6QAMP1ukAqK+vezIt+T6mNTSxiAf/3wTkmduX3FskhRJ8Iler8Ux1TaZhGMk4WPFbc85DUxrndqKlMsDUgSg9lPcl6KwZ6TVFKT5dIsDXSL+82geDhAyPWs3QKYcnbyJDjoN84NXjxYX/CkZOuvLi4Gf7f3VwtHM18YR6zB07hzIucsKxW2yRaMOQe4cfd0QwODs8qhxJdackMRIpDLFzVdvuglBNdrYjmIwYJtGMK5VFOJYUN3BwC3BlmZ5o9BUzDHZguVtUI2Be2fIH4HtQNnHnxZbNAZGEwoZEfc7IlAOIWu6Gy9ihCaey2Ny+xAAgC866WybS51v9QEKzfG02En0G0qxvTQI6yS16NjKFfAP06LgshjC5sZBSg35YGvlCIe/sNlLivxtJjZwx1btzp2xv/cCOOsYx7OycOQl9w9uqYdFnU1W93VErvuBDgR6+jYpyHUvguDt2SZWA3G4pf+tpS3ng2JsOjjvOVs8buGYKZeUHnfz6p9IfvFpBYu7xY4XOIY3ECTr10Kp0sOGRlBxecJ3FJwIEG8B4+dqLNO5I4AY3KjMsiWd2IPzAUN/iyOIw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x91a22b73667e7daf8bae65680d26f92820c825105c6c5c7680e5439dd596dcd1", + "0x598fae2025414a8902bc1d75b443d7d00745e0723a1668b0d321b4d8cd22c4e4", + "0xadc78b18886ac722fd3fba10b4e2570e25eac9c438cfce45b7dbfb439b6681b9", + "0xaf0b22baaec8e6e9491ab8186ff1f262f1bf51394d61d3c1d1cef3f84796e78d" + ], + "prestateLeaf": "ruhsvYoQmnstjcvsQAIAvOulsm0udb/UBCs3xtNhJ9BtKsb00COsktejYyhXwD9Oi4LIYwubGQUoN+WBr5QiHv7DZS4r8bSY2cMdW7c6dsb/3AjjrGMezsnDkJfcPbqmHRZ1NVvd1RK77gQ4Eevo2Kch1L4Lg7dkmVgNxuKX/raUt54NibDo4w==", + "poststateLeaf": "vOVs8buGYKZeUHnfz6p9IfvFpBYu7xY4XOIY3ECTr10Kp0sOGRlBxecJ3FJwIEG8B4+dqLNO5I4AY3KjMsiWd2IPzAUN/iyOIwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "hgb6TGeZKH9EF6P843QwlJxc7JJQpBM9n5w64QSXa3VCFyrYQp3Ep/xSbgXRdIaVQVOu/yzHE1siHjhYZNCR5o1go61dlkWOULFrmfIhsFnFKbllrm8KInzHlenysEhxAMFrDr5+vfZizD0Tt/8JIMkzGy9zUfEjgRSr67dE4Do9HMznfMkgnnfO69/wlZsePVbKR/KoS8keVY/oqaDsCt6EXppCIOdX3s+uel4pWhuNEM15IxCZO3LnOTVRI+lHga7deq0xLZjDHshQVGJhX2BQn7Dey6ZBItRdF8RMNxtKaFnsV+fII5NJ/IrAl6rEPkkoyLShZX8B65Gami0QkBLHQaD1OOopE6fJjUCJCni9Ew3avacWGcA5W+Gvprv3a+budPDTTlvctGY/MWF1B2bCnCv+1lXusDfvN8Yi0a56CGabuIsk0BKlClHCcmxyxmrlswbQZ/ZRjoPHW0W2pOta7p9odDJg79cvJYEgNKn+JW99vKtb3PDXb/gsdVD/OQ+pR4RKUZNBw9YKzyFCnY/lX5jlNSb+RFyyLCBVSm3GvDIm5RTVSnQ5G0fkBZDSmLjCCIU2C3qMQoWHlx79IQfG8+BGNamo9txUgPSZDcq05RRVHLXLNg==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x99c8ecba39d9f433fb397768d380ec2231ce81b1fe7c61e58e71e31d3048dc2d", + "0x9eff473185e3c9d53c21e858e1660b72696628953521d6755e77ca29cb3740a0", + "0xd937a37fe13016d30a8722a08c3271f875a625186bca9949fda3234c96288317", + "0xb1b8222b6a4010a9f5c0773954556ddab87e7f144ef784144e84b793219ef7ec" + ], + "prestateLeaf": "vRMN2r2nFhnAOVvhr6a792vm7nTw005b3LRmPzFhdQdmwpwr/tZV7rA37zfGItGueghmm7iLJNASpQpRwnJscsZq5bMG0Gf2UY6Dx1tFtqTrWu6faHQyYO/XLyWBIDSp/iVvfbyrW9zw12/4LHVQ/zkPqUeESlGTQcPWCs8hQp2P5V+Y5TUm/g==", + "poststateLeaf": "RFyyLCBVSm3GvDIm5RTVSnQ5G0fkBZDSmLjCCIU2C3qMQoWHlx79IQfG8+BGNamo9txUgPSZDcq05RRVHLXLNgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "qa3vm4/HuDuyj8khkZxhXKZvFhhxiGOW47bfybP4mndme0Aek0OdIq6kExKRWSbRN/ZarQ7685YsovxpEq44G1IERrGbVHAcJjiwarh/+8NVYNACr2wb19TRdEGFPRDlLmV2/qurj0h0uZ7IMp0wEhAbY6sXHdaOWO/NZlt0BZIoYWtkJ9ihNWASQgqCBOMoFiVag6wNyeamX6rnw4NgAIZ0KhhP8ahHZo5Tw0UVFvD+HKErSlgUmJHJBpgwFKtHSs8ViTCYFyXGjdGuWRn+LUNHNbcSehCaV0QVy0pXczeYpU/inUvku1NanAZVQV7C8G/yAvBr+tWWrSqSgw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x667dd12200a0c28ab5d704010fbf9c93efe8f431ba264112d8e505cc9ad494af", + "0xc7f3a3e75dfa75b48ddf293608c2f6fb6697cd51a2dbd4ba3e8d987c86f50426" + ], + "prestateLeaf": "qa3vm4/HuDuyj8khkZxhXKZvFhhxiGOW47bfybP4mndme0Aek0OdIq6kExKRWSbRN/ZarQ7685YsovxpEq44G1IERrGbVHAcJjiwarh/+8NVYNACr2wb19TRdEGFPRDlLmV2/qurj0h0uZ7IMp0wEhAbY6sXHdaOWO/NZlt0BZIoYWtkJ9ihNQ==", + "poststateLeaf": "YBJCCoIE4ygWJVqDrA3J5qZfqufDg2AAhnQqGE/xqEdmjlPDRRUW8P4coStKWBSYkckGmDAUq0dKzxWJMJgXJcaN0a5ZGf4tQ0c1txJ6EJpXRBXLSldzN5ilT+KdS+S7U1qcBlVBXsLwb/IC8Gv61ZatKpKDAQAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "GJNvpfWnfXEBn+maEIEAP1HR6sP0guRY4m902PH+sf4XrP1cUCcbxAgMQZKbhZXkvPf5q0J2Gn++UAwvVto3ORTlv0qGT366PYAkBZsw2BXFVC6CVCFqsm2iarYI1f2aoUM/wbQlzW7eBrhvPvxNLgt4rnhX2RS85mEqocaA2Ud6qBwPAlvkUsSGQjB0zlylLd3OO7IA3LeLzsdF3xldlgcrEd6WjKr2yZyNxFJnaDn0fTI=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xea78fd5eba89918cf126ce1268f142622b39d1e9c1fd9d60771e8db8468f9e83", + "0x15e000fd4ec88a4d5617d54fc6c4649ffc00c517559b2634b3eb0d9e877ddaf8" + ], + "prestateLeaf": "GJNvpfWnfXEBn+maEIEAP1HR6sP0guRY4m902PH+sf4XrP1cUCcbxAgMQZKbhZXkvPf5q0J2Gn++UAwvVto3ORTlv0qGT366PYAkBZsw2BXFVC6CVCFqsm2iarYI1f2aoUM/wbQlzW7eBrhvPvxNLgt4rnhX2RS85mEqocaA2Ud6qBwPAlvkUg==", + "poststateLeaf": "xIZCMHTOXKUt3c47sgDct4vOx0XfGV2WBysR3paMqvbJnI3EUmdoOfR9MgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "AKxthvJXDO1M+B5i5mYyidmHRAP7jB/1bRMCGz0GmTC0gMq9Dbq91Rdmf8ldoYZXMx9VrzrVCchNVeIaJVGIhMgs5UUwPjtKBjItuhSkctd9k1NYFQBgJc2mPMb7H+d/VmyoiegxKdi5Vh4VTr83N+EKyZDdwb8JKRB7D1wMM7r55No7k2fMANLFOT+VSPzg70WPpaY5HEd1uR5IQ9OnXNjdZBv7wKmk1q9AUIpqgrSK+r2V9d49qaPXyX+XzkNqsWLV3a6dxJ9QG1JqD25RLsUUS0i12TZh0jpdkeHP7FP4R7LPUEODXVroI5+bYNSlRVe7OwNrDk+htHkHKODS0+FBVfulr6qYqoAUBEa0XL/veJdMgjGDh+HTD0x46E7PtjzTwmBMS1H7Vh5ErVaQtDx7iLY9N7moJqI1heBDheMiEDil98Xhmqj3IUncZ1uJfWDqrESS8hwah6WJrP7elcBWaqVI4DFhy8KMtUkeXqd6iV7nycVjbtD02+T/uEFT0V9qzd4zWA+wrvGxitlsrIwcMw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xb8cd587689045570423cd43026a2f2739adb54b24b56e7f85bd9fe851c454d26", + "0x7f1a5e9247f9c215ec0c2a34cd80e1931b96eed90dded0707e4c2772d6a2b2be", + "0xd40445f4d71545e4bed22b752ca2110e6c9bb3da8af4abfd31554332f2f2658e" + ], + "prestateLeaf": "0sU5P5VI/ODvRY+lpjkcR3W5HkhD06dc2N1kG/vAqaTWr0BQimqCtIr6vZX13j2po9fJf5fOQ2qxYtXdrp3En1AbUmoPblEuxRRLSLXZNmHSOl2R4c/sU/hHss9QQ4NdWugjn5tg1KVFV7s7A2sOT6G0eQco4NLT4UFV+6WvqpiqgBQERrRcvw==", + "poststateLeaf": "73iXTIIxg4fh0w9MeOhOz7Y808JgTEtR+1YeRK1WkLQ8e4i2PTe5qCaiNYXgQ4XjIhA4pffF4Zqo9yFJ3GdbiX1g6qxEkvIcGoeliaz+3pXAVmqlSOAxYcvCjLVJHl6neole58nFY27Q9Nvk/7hBU9Ffas3eM1gPsK7xsYrZbKyMHDMBAAAAgA==" + }, + { + "input": "d900eUWWrZtBabto65ygKjqnV/jGt+IfbH8GS+AxWdgUIpGGArTHTIUzmZhsMtssGWDMWdsugOjLbD1yhls8RJwRAF8soTs3rhSmphVFOn6KPIc4lyDpPBckmxItmGlk5uP9+JbcyPY4F5I05UAkmtnuoyYMvPyurSCsCPdhwALOXckHdEDSGjaRqY+rugGthFbFICxQXL89u27AhOO7uAtsfle430nfUawwhrr7Kvzfo6UYSQur5TkZGaZS29jvhfY+yR3H+c2U5k2oUfOOLK1QEBNT1WcQ6gywypQwRCQLbV5D73/OBatPjOiKx5KAHU3AdHzt8/0+Ej7G6p+fVeWnoasKALs1dX7I3MUuPPGqZRYrySJLPotrEyFj0ZrNZkzzPhO40Wy+J3mFj7INUvfQSfSFOWa6b+YRrxOJ4tI8fpnzciM1y/Rx64mYOCOQxnBP50Yn4is5RLseJYzSCKTl8yBqyHydCvEqOoBGqcJvtkQmNFTctVfNK4oAXIRxeAYkvRA69Q/xSnBFtKZY+Zid0BlrKeUuT3YJSU+agpGJC0Imiy8ufl6/k1m9B+oKng==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x7e80cac4eef5ab4b93c9e40f44770c65651613c8cca6839c872515ac2de12d82", + "0x2ec375aedfc478bef69a5f4d9d74359cdfe51ca01698b179212b5b7a538fbe12", + "0xd99eaaa52afb616a2959621233062e8c1ba07fea58f34d00beab630442df1c89", + "0xc24b6beb031f922de9b4c4a48ce28bd3b32982126d4104bfd0dabfaf48dfb56c" + ], + "prestateLeaf": "qmUWK8kiSz6LaxMhY9GazWZM8z4TuNFsvid5hY+yDVL30En0hTlmum/mEa8TieLSPH6Z83IjNcv0ceuJmDgjkMZwT+dGJ+IrOUS7HiWM0gik5fMgash8nQrxKjqARqnCb7ZEJjRU3LVXzSuKAFyEcXgGJL0QOvUP8UpwRbSmWPmYndAZaynlLg==", + "poststateLeaf": "T3YJSU+agpGJC0Imiy8ufl6/k1m9B+oKngEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "96g+NaYbWNY8fhK355lnCK0TarKbCBPF/3c0Kp/cF9H1/CncnguutxlZHcJbAiN0IeW10CXmDQSK7gnlIF57tv2fvdaviKJ19s9dlIHjVbWckLcm3kwkfaXZEffsPHFototrK+04ODDNiO+ONvKPANCsi10R11D2RlLJ6JavAVxFUliiLnfVgN8OPHkoG1o5z+LQcYtqoQI+iBCFXoqPq/2ielSTbHn2XbAi9rlZEFRKovLqGB5TOvjZTd0M1yMvEgd9BJjJJ/4=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x8e4daa5e0393a3e6afe66e2d9ccf0684a559f41c5d113d0adf9a27d46fc7c44b", + "0x557b27463478dbaeed224211344a3d1800a9aee66c3587ea633c214388014a2a" + ], + "prestateLeaf": "96g+NaYbWNY8fhK355lnCK0TarKbCBPF/3c0Kp/cF9H1/CncnguutxlZHcJbAiN0IeW10CXmDQSK7gnlIF57tv2fvdaviKJ19s9dlIHjVbWckLcm3kwkfaXZEffsPHFototrK+04ODDNiO+ONvKPANCsi10R11D2RlLJ6JavAVxFUliiLnfVgA==", + "poststateLeaf": "3w48eSgbWjnP4tBxi2qhAj6IEIVeio+r/aJ6VJNsefZdsCL2uVkQVEqi8uoYHlM6+NlN3QzXIy8SB30EmMkn/gEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "naVpEVoOzteWfeK9u0WSh79jh1i+pl3BlBXzrAiSCnl+gO+IAGE4FszQN4aVfZGdBodkAQlLNsBmG7Or725IFhcHJ+E82JlLRg9kpMKHtTl5UHvqPZ/hVv2XCZfj9le0df5lz0e2Lsp0XGEReYz42q4fkjxWOLa3rFwkVQgaePf2tgwIwrPd48iweP0hwbOcgFfWVrn5g0Ui53uwKa8KCjmKeuGsSJKGR1TxTAb98mFs5SVzUPE7ajmErHfe1A4JVqSD98D/9IG/OnRYzPidCZytI8WhBwfA3235dO2Q9k11Pg42QO6wfbsdRF8dJ8jCc1zJNQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xa74f9004e2958c050a01134b23c268d4c10871ce1f5910d6f67a5224fdaaa077", + "0x925b929b966d69070e5b9dd4571730f52833440c82d21b032759d39c6b2a380d" + ], + "prestateLeaf": "naVpEVoOzteWfeK9u0WSh79jh1i+pl3BlBXzrAiSCnl+gO+IAGE4FszQN4aVfZGdBodkAQlLNsBmG7Or725IFhcHJ+E82JlLRg9kpMKHtTl5UHvqPZ/hVv2XCZfj9le0df5lz0e2Lsp0XGEReYz42q4fkjxWOLa3rFwkVQgaePf2tgwIwrPd4w==", + "poststateLeaf": "yLB4/SHBs5yAV9ZWufmDRSLne7AprwoKOYp64axIkoZHVPFMBv3yYWzlJXNQ8TtqOYSsd97UDglWpIP3wP/0gb86dFjM+J0JnK0jxaEHB8Dfbfl07ZD2TXU+DjZA7rB9ux1EXx0nyMJzXMk1AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "b9X5EOqu99dYeLiCgE7ihEmhbhQoo7AQHkRuQ6Js9yGGxvYQDGhVMuo+g2ay56ViV0lmEMsW/15x6o5RMVHeX7ScDfuu22aGhKPo5AGnKj44OaLeUogqLsNp7q40CMTBZH1bkXgHiOGvs+GU7ZAPEuWwKwvKXkbnMSAnD8ybu1eQiAdTGDXglqUI4e7qhDuapIBaVPuUYMnmBc3580o9f3mYP4NCQwcJGafH9B4ToPppHYdjcT6dcTYOp01eMoiKzTsE6nDEIa/atPusKgajDk82L53RuEQBMRmAPGisPISbEog8l8kN+gRfmv/RnRNGUcbUhRUWSKsAo1EtpZOhh4U1aBj1WUAQ/SfnPh9cBUqrqvR4DvJawwxheHuaKi2rzT1esbQhuxT+W+iUdo6pRXJGXiTAvEyippY366NaHUZDR9vN03iU/ryJYkKp3KYyOGZ3AZV0py20gwh8kQXfoc5gVpH24BOrv6MT", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x43937b0f787a47a32041578337614b4c322750e68a582e8f0ebb39a09551a876", + "0x934d1b6297160f8a3b5975d2be38e7789a593e4074f0f8c536aa3644dc0bfea3", + "0xc9b69ea06a2fce42e1ece4b565badc6ce0a7dc9937d13373c57e2bdd19c7b921" + ], + "prestateLeaf": "pQjh7uqEO5qkgFpU+5RgyeYFzfnzSj1/eZg/g0JDBwkZp8f0HhOg+mkdh2NxPp1xNg6nTV4yiIrNOwTqcMQhr9q0+6wqBqMOTzYvndG4RAExGYA8aKw8hJsSiDyXyQ36BF+a/9GdE0ZRxtSFFRZIqwCjUS2lk6GHhTVoGPVZQBD9J+c+H1wFSg==", + "poststateLeaf": "q6r0eA7yWsMMYXh7miotq809XrG0IbsU/lvolHaOqUVyRl4kwLxMoqaWN+ujWh1GQ0fbzdN4lP68iWJCqdymMjhmdwGVdKcttIMIfJEF36HOYFaR9uATq7+jEwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "wCn84Ysh1rI+gqpuVqxHuCSePXQZxFjuSX9VWfGVoSh0PUy0geNnEObkVqE2U48nLt92YPJSgmKFw/JQ7l0XS1teRVBmVWQ1oXtxxp38FDdEZUHui7GKI22m55tbLmDOA3TaX/UgjemQWS3vtEJPO1c4B0GZhepYJvYNIyi83Fjgn26tRFT43Bg4qY4X2vrpMt8kGtLeuoQDFZV2KH4hmEyWlkii3loaclTqwctZTO9Wor6543ynogCm71Sedw80lE13+wOPTbi/Q2Rj0xgGPeim8d+YQ+io8z0gEuL0wh3xBFugxMqe7PjHELq5sHzGkWaHKTmBBUrstI30eacUT67xwwyvWcMY8QoBRoql8IpwQY3i", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x77ed1b9215130e26e30bb32940f10803564d21eaf3c34813a88b3620f52925ff", + "0x6a3c76ccc0da597941e5336c78c7e74359d0172c76f4ed01eb4c17687758a873", + "0x9395db5326d485ca6b249b9d2cae2d71abb6b5a06c5bcf337d886bee66abf4df" + ], + "prestateLeaf": "GDipjhfa+uky3yQa0t66hAMVlXYofiGYTJaWSKLeWhpyVOrBy1lM71aivrnjfKeiAKbvVJ53DzSUTXf7A49NuL9DZGPTGAY96Kbx35hD6KjzPSAS4vTCHfEEW6DEyp7s+McQurmwfMaRZocpOYEFSuy0jfR5pxRPrvHDDK9ZwxjxCgFGiqXwig==", + "poststateLeaf": "cEGN4gEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "6ig8b6H7sUxHcjRDPImXIbLVndahV896pfdMqOe391tOFBOOiZFn6Kzfm/qFrwab3NQvNNc2xcVI3XxA0qDKyRK2WnzkZ7XR1Hrz5Tctr7h5rUy1COhwV/76kNICQ5TktETPDI5EXt0j7II/C2zxDSJHvTU7/IGuhG5rSQ5H5b+Xnb9z2I4mpqy+Gye8KOihYFMl5atGDyGReKO3lUe6Au7wE1/n/YaMCbLcc/0E6pxsqOqkgiIKoKhlVHPPfMXwtg81a0fY0sYz5fmoxJmGExLmTRz61eGb2SYsvBtx", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x969ac4305a9133683ee676aaf8df9c9d0d6ff69d9f84f93ded06ae5ebb63af27", + "0x69408a441399bb046f0422718652ac8c2a61649041458ec5ed52f4f50bc2af5e" + ], + "prestateLeaf": "6ig8b6H7sUxHcjRDPImXIbLVndahV896pfdMqOe391tOFBOOiZFn6Kzfm/qFrwab3NQvNNc2xcVI3XxA0qDKyRK2WnzkZ7XR1Hrz5Tctr7h5rUy1COhwV/76kNICQ5TktETPDI5EXt0j7II/C2zxDSJHvTU7/IGuhG5rSQ5H5b+Xnb9z2I4mpg==", + "poststateLeaf": "rL4bJ7wo6KFgUyXlq0YPIZF4o7eVR7oC7vATX+f9howJstxz/QTqnGyo6qSCIgqgqGVUc898xfC2DzVrR9jSxjPl+ajEmYYTEuZNHPrV4ZvZJiy8G3EBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "s+gSK/z1Yq6vS7gHZ46TwA4nI4PEEfp3bFQ49VMpa8gBa8sVQJbUvo6yVTk6zuLfkj6tXqYtUyN8jQfH3E6dEy92OlrXqH5u44bYWljAawyOiEV87fUAe5Cun3moBFNpt++w+6l0r4WulyHSi+WIroNSXdSIas8wKBHb5JNwi+6x/4Zg9aZQ0A0ZweBLp3+LHX3SpYSvEzni8xxsSC+rhAl4CQhlMUIrZ1wMorsdgCOthcbNXrKtfpa875ZY0cacjXutXimuzbudNOes+6f2y+8=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x66145485300432cafb0f5c1387c5cf90e275dcd905c24348107bedc231ba690f", + "0x7388d16dff1dedd7e404403daaeed4df3a9bdb3baa81c586662624f13dfa56bb" + ], + "prestateLeaf": "s+gSK/z1Yq6vS7gHZ46TwA4nI4PEEfp3bFQ49VMpa8gBa8sVQJbUvo6yVTk6zuLfkj6tXqYtUyN8jQfH3E6dEy92OlrXqH5u44bYWljAawyOiEV87fUAe5Cun3moBFNpt++w+6l0r4WulyHSi+WIroNSXdSIas8wKBHb5JNwi+6x/4Zg9aZQ0A==", + "poststateLeaf": "DRnB4Eunf4sdfdKlhK8TOeLzHGxIL6uECXgJCGUxQitnXAyiux2AI62Fxs1esq1+lrzvlljRxpyNe61eKa7Nu50056z7p/bL7wEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "mM4wTpKyOkZgpKod6R3xAcVeU0tiiklnVLM+vGmInIc0MnLiDjuNBTlIhm+6zZW5xYgVk5aOWWXb1MQjFcDEKfZLkn9SUVFjy01ISZlM9dppiUMFjSDjnzbEgKPE3lBuOJ+wfy/S+T7NNOxGboy+4HJbasw9CeK0orJTGpa8CAD97ZF8tVmhLvLQHfPQB8Be8ighEFl9lh5wFLbIzaiBHQdYSKSZRJUL3Rl/T1Bt7KLCLevgfszkxs/JWxA5woclF3ugsFZOg76RCeUVp0c=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x32f7beadef18e590ffe82aa5e8d36005de9bcb0e7e78a0c68381eacc4947e9bc", + "0x75f2eebd6c67f78c7a6804b8c4d7637d12be4f35f505b4359e89e7bec6e52ec4" + ], + "prestateLeaf": "mM4wTpKyOkZgpKod6R3xAcVeU0tiiklnVLM+vGmInIc0MnLiDjuNBTlIhm+6zZW5xYgVk5aOWWXb1MQjFcDEKfZLkn9SUVFjy01ISZlM9dppiUMFjSDjnzbEgKPE3lBuOJ+wfy/S+T7NNOxGboy+4HJbasw9CeK0orJTGpa8CAD97ZF8tVmhLg==", + "poststateLeaf": "8tAd89AHwF7yKCEQWX2WHnAUtsjNqIEdB1hIpJlElQvdGX9PUG3sosIt6+B+zOTGz8lbEDnChyUXe6CwVk6DvpEJ5RWnRwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "fXxLDLxHCvA4pJ5lr6+oXvZGW1UrxF0hDPn3pSkL7R7PHWCoW3AZ0N0t7kqIniKqjULZZ/ARy5fj4sXQhRWwJFRVSFs+hYmW3OXAUrrX2OIUAsPjj1q/Ooak5BzMFZ0INZgH5Nc2dGJTdnkatSf5yB3OWpTbx8zEhemmEBbQZIn2RpOC1S7kw9+bSl+hhT40Pn0H6bzrowytyMJZonr4d9Z7pPw/cEG0oxP0bflw380hAtB7aW3LX8qf/OpYRr4Ihwfj2eu0+ZkRLzHhZ2tqfqw1q7fVbryfpFij2edKyzKlzV1DLAplTUGNVdrV1NyVgWvny0MUAiSJJWPeRZXKqehiP4+myA1nDKuXYYgWlJLmPCJSoWjGEkDxltZGByDaLNkFC+eW25XQ2OMbdfzC2N3u60c7/cr2CPbiHzAhVZg/Wl8s5cDJMwTA8b38W3yks3QJGDCtgahuK5jUlWPRn88Ew3rz3NQ2l2fC4X3ZCteKYQFUDDX8IE/WeVeAmcH6yvoOEUM=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xa363a494c41749fa34acbfe0db3a4123732d2ee07bb972035424a4fbb757a992", + "0xb4ba5242df70b066f1fdea7bd089ed021a3ddbf6a8cd7a755aec642983c7946c", + "0xe1582c446cf455e06911c8bc76c3201fd3fd37c14241919f6aaab0e50d0393b1" + ], + "prestateLeaf": "35tKX6GFPjQ+fQfpvOujDK3Iwlmievh31nuk/D9wQbSjE/Rt+XDfzSEC0Htpbctfyp/86lhGvgiHB+PZ67T5mREvMeFna2p+rDWrt9VuvJ+kWKPZ50rLMqXNXUMsCmVNQY1V2tXU3JWBa+fLQxQCJIklY95Flcqp6GI/j6bIDWcMq5dhiBaUkg==", + "poststateLeaf": "5jwiUqFoxhJA8ZbWRgcg2izZBQvnltuV0NjjG3X8wtjd7utHO/3K9gj24h8wIVWYP1pfLOXAyTMEwPG9/Ft8pLN0CRgwrYGobiuY1JVj0Z/PBMN689zUNpdnwuF92QrXimEBVAw1/CBP1nlXgJnB+sr6DhFDAQAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "BC+MRlxbvGUWnKSyfDm/ivblXLU9B8lijzAPSyuyDdTkYBZ/FSK4+2SMFmtm8KTghrh5OEvpwDz+NYX+y9wcxWFGBxy4zmC5QAmAZO78bJ0wSMdodhKgjE7JcPsUPgMuMGpvAa3TOQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x6b0b2b4094f608f73c61f68be51329aff2fb0a5179de2b9b9467bfe9c3cdbe83" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "BC+MRlxbvGUWnKSyfDm/ivblXLU9B8lijzAPSyuyDdTkYBZ/FSK4+2SMFmtm8KTghrh5OEvpwDz+NYX+y9wcxWFGBxy4zmC5QAmAZO78bJ0wSMdodhKgjE7JcPsUPgMuMGpvAa3TOQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "3L2fS5vNMdKj6LqS47x4oHrNhIIxLe9WYJ8inef4jgT/WXKUwlXH3SH1x9WDtYYD2A==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x202223f2c589349b4674f6a6e32bf2ade3d3134b59a5c7c663117ecc87fa1455" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "3L2fS5vNMdKj6LqS47x4oHrNhIIxLe9WYJ8inef4jgT/WXKUwlXH3SH1x9WDtYYD2AEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "+1FoFm7ydb3xXIdbfSHG7FduytLO9fpgdX43FT+x7FtCbosJEV75Q81gxdLJ8JMMS3/Cxyk0dnEE7f/kI8tiaCdpoFMbXVSPETE+VhpYh3tqUsPx3XvxPp3UnsTT6QwXSWshkw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x6c7492b6bf953a71885ecd61168a95fc2e6d73ee46df0bf5ef3a366a34c364d3" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "+1FoFm7ydb3xXIdbfSHG7FduytLO9fpgdX43FT+x7FtCbosJEV75Q81gxdLJ8JMMS3/Cxyk0dnEE7f/kI8tiaCdpoFMbXVSPETE+VhpYh3tqUsPx3XvxPp3UnsTT6QwXSWshkwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "3NaoqHhyoYC9jYZUqLHxpGgV4QbDipeCZ8KMnYabEON2SusALPKdC6RsHpTor+9WiZGPVCzoIVL7uDeHQJqY1VQEYtpFjOhrrFzKQbTdgk1cVDGBR0lq6ePoElwQk8dbnXLuKtFoxm1dwXWXP1GTPup292cFh1z48m/uVunD53XXUmegojcj/z0XyOdUaPMQ/bK69bFNcwtoD3ZlfGukiNllk7jy59i3dK2KhabV2AaQjGJV6GZZmUku3reZ7E8nw536tqEUb0FcOCtXTjCyhFT5KyBzuDUR4w5MbNWj04L5TnRgZheADM3xb1XdBUrHYJNNCpDUKCUnYrHZrc4KEgCi+q/KppcvbvcBOAE8hoceshGtczZDXEdeycCZ", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x01cbae12168f2f42f20bc0e26d21447f6e272d77ebea52b645f594b57fb2b6f7", + "0x0392329f9ca01abab9d2b1cade00e7f99d0fcae8cee9f22c77b7adf1b4681432", + "0x16938f33e76b533c425af0f345683dbc3f3d06abcef46ed196da3012aba4ff7e" + ], + "prestateLeaf": "PRfI51Ro8xD9srr1sU1zC2gPdmV8a6SI2WWTuPLn2Ld0rYqFptXYBpCMYlXoZlmZSS7et5nsTyfDnfq2oRRvQVw4K1dOMLKEVPkrIHO4NRHjDkxs1aPTgvlOdGBmF4AMzfFvVd0FSsdgk00KkNQoJSdisdmtzgoSAKL6r8qmly9u9wE4ATyGhw==", + "poststateLeaf": "HrIRrXM2Q1xHXsnAmQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "bos9QTnn1rzgaPL0vtnPW9Jr1bj31IF1UGb7alBiDWiyNMWfXo+vlXuCdCKkVIn1kpB0ETLS05ZThUpi0rwMB85JRI9jYIiaZtqHECqmODDmi8HgMxNH8Tf3M9WEc2E4sN47vB6LJa7sZOgIfviCHla7LIUI6PCFNXMz6yg/eUNtQbA11qplkdP1LmRTOrw3Ry309S9eWmqkaVfDjzG94kpvM5yHl73JMdXVp/v0vCxFuuIM2EIVnaHbGbK2ggqwaHFIHXdSN2VQ/2O5TbG7gxEdxy/LWjV8RuttAbF1SAxEE5bXGiz6p1bkpLwcIzMTcdLSMjwwBgd2/1auF8Vm3NnAO+Rqnw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x91d6e1afd0a075888cc2d5b5e693bdda3803325e39fbd6336ce9bc005c2d96e8", + "0x948020ab27061f5f45c46affe3be5203860745491d71540e2b860330c0dd57ec" + ], + "prestateLeaf": "bos9QTnn1rzgaPL0vtnPW9Jr1bj31IF1UGb7alBiDWiyNMWfXo+vlXuCdCKkVIn1kpB0ETLS05ZThUpi0rwMB85JRI9jYIiaZtqHECqmODDmi8HgMxNH8Tf3M9WEc2E4sN47vB6LJa7sZOgIfviCHla7LIUI6PCFNXMz6yg/eUNtQbA11qplkQ==", + "poststateLeaf": "0/UuZFM6vDdHLfT1L15aaqRpV8OPMb3iSm8znIeXvckx1dWn+/S8LEW64gzYQhWdodsZsraCCrBocUgdd1I3ZVD/Y7lNsbuDER3HL8taNXxG620BsXVIDEQTltcaLPqnVuSkvBwjMxNx0tIyPDAGB3b/Vq4XxWbc2cA75GqfAQAAAAAAAAAAgA==" + }, + { + "input": "6SaxxD+YGI1vnZPnjyTUukGwlnv+6mi+kr0tPJNb+byqdsnp3EmNiZotRPcjKv0mSrN2QlaSgVvtpd1QeM61kh/HHzPqeyY+yl5q8oNTlh75uxskT6MdhHH0348dNg+jR2PkDEXArgnB+l036KgkH42p7RC8eX9fSvXEWnA/9y/khnGRJcfxPqXhFOSlGtFPbI/b4Ia1wmPRwUN80iNxypnlEWgyfClqvaftdOiWDlKOnzzBR3G0ZB1j7/AzKS0Z70K2nVOKtO7oYjMb17Puu3zVIpka2uh6iwLwa58wZlX4Wt2BG+r98k263zW90XyrYt00m6E8diC3B+t/7j0rt9gV6rlBbY6F/c8cIRENxBqWXcc3iemFBr/LYLI=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xec846a6c4315bc6e764fb641a2afe9f15d5eb1861714c76af033da30c15b04b2", + "0x6fb5d7ce201d34f801ad591867e189294147d20f911077e1952d0b071bd3fa31", + "0x0128bf2377b1952f0146ff7ae00ece8c7d6688cbc6c7cf2418a43bedc1fe5b38" + ], + "prestateLeaf": "peEU5KUa0U9sj9vghrXCY9HBQ3zSI3HKmeURaDJ8KWq9p+106JYOUo6fPMFHcbRkHWPv8DMpLRnvQradU4q07uhiMxvXs+67fNUimRra6HqLAvBrnzBmVfha3YEb6v3yTbrfNb3RfKti3TSboTx2ILcH63/uPSu32BXquUFtjoX9zxwhEQ3EGg==", + "poststateLeaf": "ll3HN4nphQa/y2CyAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "7HvyePr04PVkmE4UxQQoYO5qwGyrIYsGY0wmtlMaQ7p6ahwsCXxYL+1Rc7a9lz/oRaRmmhHt5DW6RFtBukhXe2ISy1ftItb82fmikh+Byu1fvJlVfkLfl5dzhoeXv8UHEroxoDNZnhOnt9/saECxkKX9Hp3y9wERFy3uXJzu68I+6tLcK7yRb6ZPAFlJgc8NFfYwu0SaXbV6HUPt50c0EqXs2B4vrxNdiJ5JPDm8a/bw6TB03dEZ7En2cbAYpTyrFjOTws2NdjWqkW1WH+RrSCSNiNSsbEEC6RIEfEss45JL28Ny9YjAnjwl31jpIkH1eY7SO5zuvKsxloZ8NeBTJeaiPu6MfO9WjO1HMi9usfe5IbjNT/Gsc+mcBFszP/6NHcyQYhR4qPNfOf5xp9wNjVYGOTSu5iQUPDfDgEEbTA6BiWIGnLIVT3xkWRcUGVoKM4cct7RHAie38r4/RMhQzfyEQ+Hjz1bZi/+DrXfUFyS6Cvn8G1eHcPXRUHZ+dLy2GzICgTLkTPbbTk8SgvepgdFIJPwGJgkSCYEvpWStEon41UiOP4ojQX4OFGwFrZ/wCdCGAKMB0Wa24Mww9n3ulrdRa95weEg=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xa26cbc2a66287c2836fa49ff8aa3f3aa1e0e5287d772f3b68304ae8b9b204a35", + "0x0cec7fa9e32761ef269f706014f5ca47b83388f58abf405cb3d4b58616b561a0", + "0x34751c222f5447e5ce41d6f871bf218a1e7e270ddf360d0a200f1bf523d36e5f", + "0xa6cada596a1452764f92fc089941d8205a08c8ce544d084eef7f65ce9c208b3e" + ], + "prestateLeaf": "uSG4zU/xrHPpnARbMz/+jR3MkGIUeKjzXzn+cafcDY1WBjk0ruYkFDw3w4BBG0wOgYliBpyyFU98ZFkXFBlaCjOHHLe0RwInt/K+P0TIUM38hEPh489W2Yv/g6131Bckugr5/BtXh3D10VB2fnS8thsyAoEy5Ez2205PEoL3qYHRSCT8BiYJEg==", + "poststateLeaf": "CYEvpWStEon41UiOP4ojQX4OFGwFrZ/wCdCGAKMB0Wa24Mww9n3ulrdRa95weEgBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "ul0dLBs/YmNE/uyOROGboezkmoislGftlZIN22FEHY+3nJzijZvf5hUnIeUjR34oGMXa8SIfAmPVhcM5MT3XIR6ZsZSs1Xg2Q4gsIz+lIdlke9uLi01KCa5+2ut8Um+3u9co+h9X/vM4Iwwmz43xaOVUlb6zn+4rBq3gAbFrxq3OrpYJJpfnxMnXp5qdYqQjH1vUYdYlrQKYYZKQHw8TVKHZZ2z/EV0XyqrUTzO6TRFRGNxcYtV8/OX5ErC3PF0FCoOat2iX3wgsptUNB3DhK9LuFu9G6/RJDOhfw/Ehps79yLjFYAwSyKi+OL874MkGgUsTiUTElF/EFAAfuB53aJHYPdEQi2Z6Zw74Ezzvh3oKQtp/Q/dShwyJ1/uZj2xdnfqwTWdI66ZcNNVT+PWTtLANTpFiJtgiy7f4f7xqWh0tcDqDFxwbQrzrdh+U3Mm7EZjQjACGLkCB1VNUn14JFuDnxfOnpqRQgdrzdFWmzsX/JFl4i8uCN4aYNCe1f38wMVb2d+1DqIzL20fIuMc7CjX/u+hojwRQZSTwDCLo9xMjB5sEFYu/NI8QyUNvx18pXkkLWIvvL8BYFkhB/lpzc9NMY6HCM5kV3Odk0u8FIHRm9B+Slj3gf//H5DbbU30pu3tJ9FAv9HkDs1U=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x258c3147665aaf5c5fc7c739f6c01e58759a0516ea1e3d084a03275ae29732a6", + "0x2879dc29eb40af28eb88ed17c047548bedbf6dcab331ba8b6c050fe6803892a1", + "0x85ad14cf814e5cf867611f6222ceb3f2c82dba3f6b1af34ecd236a1d8d9959ef", + "0x60e4344aa77bf92735092f446229eb16a0e91ccf7351c9a9e8b207632861a936" + ], + "prestateLeaf": "CkLaf0P3UocMidf7mY9sXZ36sE1nSOumXDTVU/j1k7SwDU6RYibYIsu3+H+8alodLXA6gxccG0K863YflNzJuxGY0IwAhi5AgdVTVJ9eCRbg58Xzp6akUIHa83RVps7F/yRZeIvLgjeGmDQntX9/MDFW9nftQ6iMy9tHyLjHOwo1/7voaI8EUA==", + "poststateLeaf": "ZSTwDCLo9xMjB5sEFYu/NI8QyUNvx18pXkkLWIvvL8BYFkhB/lpzc9NMY6HCM5kV3Odk0u8FIHRm9B+Slj3gf//H5DbbU30pu3tJ9FAv9HkDs1UBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "5AOpWwV0q9zthtbxpvpnf/utrQl1riNtoHTJPRm2uWgO6XaJlJQNKMXa", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x5f19fac317da21422bb1f587b1dff8f3df060092fdbfdd227271e3bb3fc182fb" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "5AOpWwV0q9zthtbxpvpnf/utrQl1riNtoHTJPRm2uWgO6XaJlJQNKMXaAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "svQ4eiNJWQvSsDd6UeZ9Vtc4B7u8MwMnxWdNmlP6LtqRbUIflXt3qcub8ceF9a37f10jXztRn/ey1pexOjfxxtCbyXKLjNGVLw0VdpgBK6D0cnPJsLuJEqZz7dgObU2Q4lY9rATN9ENjIu+QRW5s6kNiaaZwO7FC4xp6u9s0fnQBb9J46ORA5vBIclmqArka0tHUlGCVh4wm888oSMyKCpfGbyjRJotodRi+2JC6UVcAemTUqmO9ckw6pGZzupdgpD6HCsAHhATZIufyS4Uu+gzEzbCJjUz4aS0DvxItfw3LrNJnwv2pwqvstnxcx4N1loLN6N10yZ2qftCdhiv/Ci/DMf3wqn8ZhNOt9fXwMWF0EmhJznNMG+Hyej8g4/nuTMzwx/WcwATTsx0JJicXPDXphVOIYEw9lJlWnQ==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0x52311ccdc69e2b3309a674bf5475cd132d2c32ed815028115cafd0f497ea6eb8", + "0x5108714c345510560c7e8ad860626f648555dde85c5ff3c1bb9dac1b4b440481", + "0x09c7f822bffa5ef1ac75cad13f6b2741bbe5e5ce34a347d16bf7e1b48b8c7687" + ], + "prestateLeaf": "8EhyWaoCuRrS0dSUYJWHjCbzzyhIzIoKl8ZvKNEmi2h1GL7YkLpRVwB6ZNSqY71yTDqkZnO6l2CkPocKwAeEBNki5/JLhS76DMTNsImNTPhpLQO/Ei1/Dcus0mfC/anCq+y2fFzHg3WWgs3o3XTJnap+0J2GK/8KL8Mx/fCqfxmE06319fAxYQ==", + "poststateLeaf": "dBJoSc5zTBvh8no/IOP57kzM8Mf1nMAE07MdCSYnFzw16YVTiGBMPZSZVp0BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "XTleBrkuE5hWTQJFJRFhD12lyVTeV9AXaG/EaSuKhyAjK1EWJGArTmr/+vXyPgm7Wnc3TVRD32GpQIl3o/6bg9nHQ119M2lXyw6mAyypwl5IhmzIns1D3nhgrZJ+9Nz68SRWJ84oLv0Mm33ZHLAoCYmpGqFL4IIWPg96Ds5UHzF8bbEF2SAiYV3LLmGs1sblREH8+yYCCRSwgW9m65lYsDDvM7+QqauLXaOxWEQJcW8zA9G+U402rkB4q26uXxUkUcEsXGWW6d6qyE2uG8C54BWb2eYaaYVC+rWFdIUwJaXJvD7jJhFIj36NsjxZdX2Wckt6j3s2EmNiQwQNJgPqyHd/1cLANrekOoGKWOVm7P/LNpDEoxHntBvh7MVnEorJ", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xc34e8c6768cdd7c7cce28f8547e33f113191da57e3691db60e1ee55800fe96b0", + "0x0ac6cedb22f964162bf1a543d006b8d232af6a3f44076cbe30772f9f28c27a95", + "0x0809ac2786ade5f79fa427a0377b89ec99ac109ff230ec81bb4d609bcf7fd954" + ], + "prestateLeaf": "XcsuYazWxuVEQfz7JgIJFLCBb2brmViwMO8zv5Cpq4tdo7FYRAlxbzMD0b5TjTauQHirbq5fFSRRwSxcZZbp3qrITa4bwLngFZvZ5hpphUL6tYV0hTAlpcm8PuMmEUiPfo2yPFl1fZZyS3qPezYSY2JDBA0mA+rId3/VwsA2t6Q6gYpY5Wbs/w==", + "poststateLeaf": "yzaQxKMR57Qb4ezFZxKKyQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "eCmovNYrO+YV73aWboXDY/ToIZIp6UfGwIarwJunhPk8EVjTQb3x5K8BPPMG5KRLbTh12+Pu31HC2xXyg7F+SmHbXmt+Uog9FXvnrI9emRAk", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xd517103b16c5e12cca833ae2cc160e3d11c59c20aa380cfa5284aa10bdf4709a" + ], + "prestateLeaf": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "poststateLeaf": "eCmovNYrO+YV73aWboXDY/ToIZIp6UfGwIarwJunhPk8EVjTQb3x5K8BPPMG5KRLbTh12+Pu31HC2xXyg7F+SmHbXmt+Uog9FXvnrI9emRAkAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "uY3e1GKA8n27JPlxrOmwzav0R8hB2C1oQbUTsZVf17ImDpTqC2e0aeBUOvA+xfyOEUiVoAhF8zd2EfZdt1PG9U+3kOYhLyNKzmjvzosHvIHRw6W8NXqrd0RsmW4i1OLAzJyxbwHvH5G55VVdE1WTmWh/uWC/ww1JDMuUd9n+QddNQng8Q9lxAzhZ8PQMefc9a7fe/XBinzIBtnd2A6kkGa0zd7uoEbElMcrwqBEkDhteq1ykSOE/WDk4xcazJZqt4eMIj6asb5YTLr5EZQXtS1StsS6KiMZmiowo6rBtHCkpwGHN5Vx3whCG1BPw1l3ZRsYq09YM++DF7krB/rf/VxZGvS1T1UUX5v64dSj9PfR2Hm46qnB3eiA+5vwYAA0m7n+Rb6tSKVgpG/FnMXR29PH86EDxNWSzVIa9yFFhRReIW/BHlV7saXrb3A6Ff8UxQxcpcgMMdYDttUeQ7g==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xcdd63146e8f5f65947495f8051b4696f73b6dcf5965519736f223e4cafb50b4e", + "0x7633254742d3f1fb964adde4a6b281aacbe57b38ccfd3032a0fb8d8ccf1352ed", + "0x8a08eb0d9beec9bd41f5244566b9d8c524e89a9b22dc0f33504fbbe3dd5eed20" + ], + "prestateLeaf": "OFnw9Ax59z1rt979cGKfMgG2d3YDqSQZrTN3u6gRsSUxyvCoESQOG16rXKRI4T9YOTjFxrMlmq3h4wiPpqxvlhMuvkRlBe1LVK2xLoqIxmaKjCjqsG0cKSnAYc3lXHfCEIbUE/DWXdlGxirT1gz74MXuSsH+t/9XFka9LVPVRRfm/rh1KP099A==", + "poststateLeaf": "dh5uOqpwd3ogPub8GAANJu5/kW+rUilYKRvxZzF0dvTx/OhA8TVks1SGvchRYUUXiFvwR5Ve7Gl629wOhX/FMUMXKXIDDHWA7bVHkO4BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "l55l8M4SnsMkcXMGROaphUDY4YUoIyj1d4c/9n3Oym78zfdrkHlgmId6gEeOwUsh0VJ3ja8OH42qiHP10gmKlkXLaWdmJBXQTrT/ygJt7FLJzpqmsSC082JcGT8EHbktfLmRtyKa7mNZjgVsZvxEOcR/fCnF23QnW3qB9gy7Z9odXlqrOka01yBXgPIvcWutRK1M6pq/BRZ9WrXg6uatCZcgE7VhSvAqVq7IWWoA9bKzPlDcWqrwKmIgmGY6gA8J83LbX9Pw4lyvj7AO9rJg4hvuRbbWDeMva2DjzBZsw/66T+CQ5htuevS35DWEVBsEcnPjBdEtWS5dqOiNl72levHJIbfM3RQ/pmhhPd87CmEX9AUeRZ1RFmHB6k7I+ZPZF5IO0+y9Vug67oXUSBmo1onZLKRdbiEuClHf9mbh4Ms=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xf9df249199350922bdb3d0d66ad8348746df540c87f092b006ae68637b8af765", + "0x5d6cbc50af5573059040f81bb9855d2685660e5caa57b88126a9ef7beccb8e26", + "0x40d0dc4e0214f314b8114d49e6b3ba842433c341390d7a63ad9be12ad76697ce" + ], + "prestateLeaf": "IFeA8i9xa61ErUzqmr8FFn1ateDq5q0JlyATtWFK8CpWrshZagD1srM+UNxaqvAqYiCYZjqADwnzcttf0/DiXK+PsA72smDiG+5FttYN4y9rYOPMFmzD/rpP4JDmG2569LfkNYRUGwRyc+MF0S1ZLl2o6I2XvaV68ckht8zdFD+maGE93zsKYQ==", + "poststateLeaf": "F/QFHkWdURZhwepOyPmT2ReSDtPsvVboOu6F1EgZqNaJ2SykXW4hLgpR3/Zm4eDLAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "pisVHfcf8JErfu8ZBb1hDtca67kfmzBHQU4ls3/oeZ7EN0mGnmHN/wUvrHqNyS+32HnH5duO/MiyN5kGH1IqY8IVLEjXF84WfbZsupoXyuDvvkYjPEwVlVAer0vCRkugw9Z8X2DctMcRq9Tc3xLTEPel2+jLCceyzbJvvUNjFqh4CWT1zqpqQCgRYfzo1+hDV554ifwMhbrWCrAam+CvGzmvNZ8+AJ6VIdbTPiSCOywS+/UIzz0=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xcd4659f1f12afa5949b88198029fc204f63a18d6d4b00c4f8c216019cda7653c", + "0x55b83209b05d78020837a302f30f74151e1c871be2ae0f43d6e3d239a05a6416" + ], + "prestateLeaf": "pisVHfcf8JErfu8ZBb1hDtca67kfmzBHQU4ls3/oeZ7EN0mGnmHN/wUvrHqNyS+32HnH5duO/MiyN5kGH1IqY8IVLEjXF84WfbZsupoXyuDvvkYjPEwVlVAer0vCRkugw9Z8X2DctMcRq9Tc3xLTEPel2+jLCceyzbJvvUNjFqh4CWT1zqpqQA==", + "poststateLeaf": "KBFh/OjX6ENXnniJ/AyFutYKsBqb4K8bOa81nz4AnpUh1tM+JII7LBL79QjPPQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "O8NUg0KaDo18ybTKajXM/sgqEYS37+lewPhGV/2sMAX6bu6pD+hu97cVqUkoazqhtXPFvnwPFy/jQjR+LFOXFZvvjCRgFRQvzzKOb8DJ0fqCRLgr9kiHN9LpqHXVhyHhhlRkem0+AIzvEmyqbgj5pbDQsr0gtBBZYN/IwUYeggMVEhM/rDKEiLf1Z5E/xs1a5ukkKJwXBVAioEmG81qwOBn9FqHWIrclhCUpqZRfRIZPp8ZXl64RTSIXhGibMujQipVQuXUsAOd4QE4HjirdI9SfzsLMesYimfC/eedEkhjL0z9+sjhgp5wJY84LBiNI6ZwrzNGke/zdySctbfKbgj2q6ECX3jy6lbeiNThEdLfO1t+IHM7rijOlNw/r8Up3YgH5ExUuuAOUJ5B8OuPXyMfnl484FISrdt0XGpNJpS66b1XHmf7p3TmFLlnO7/23uN57KiNdpe0RaB8R+Y5IzmcWrzR67wcbjbBTbjbAD6w1QIWVx6iUVSfciQIgw7WdYK1C6PXxUMEflpAnV0POZSMu++sbYrxJhlGArYgVbSH4i93eENLOJytOfnKXYzirqsHyCjHa55FqYBqC4ogkXXHBIzE90x9n2bNpc2IHYXIUfDcrkZSQFBnyze2uRsYx8dBXi+Pd2pLSFc3l0WnUCRXc4LCMFKEKWy/6fPHAUTy569sNCQd1E19Fx96UAQOsLRAtiHKp9XwCQYFhnPY2j61MYnsnV1Eh+cBc4J8ofY4mK4r7OBY9ze9+qrP95CCxyDJUIHDzDySR05MlXTJwwmA1a2X4r45shLLgZEF6EWsLwSvibme0tjvzczsfIwNGAmvg2gx4Uajr6kBelBRAobURalVBBx0SLghEjf1pXftmDkmRJipoV7tUVNK4tYEm5VVAZ95AA98=", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xbedfbe751c8d3913a76bb030bdcad312ce8c70aeabfaefcf491dafe740946ca5", + "0x74393cbd5f8764be54385a8c7f9442358ae2580f33aebc2597ffa42949172a34", + "0xc881ce64585de627f538376cae9513ca5f4efaa2ab255ce3fa5b15e890223982", + "0xa6ad654807a4f8de3b78d7fa389dc3bd31bbbb5bef24e8e8174f75c6f9ac5c6d", + "0xf6785ff09114b843cccff1e119100ca5ec235725ef2763573e1f3063bf48b886", + "0x20642552f0e7ef28ff28252b3839f0fa323f46792bd103baa9778684f737cda3" + ], + "prestateLeaf": "rUxieydXUSH5wFzgnyh9jiYrivs4Fj3N736qs/3kILHIMlQgcPMPJJHTkyVdMnDCYDVrZfivjmyEsuBkQXoRawvBK+JuZ7S2O/NzOx8jA0YCa+DaDHhRqOvqQF6UFEChtRFqVUEHHRIuCESN/Wld+2YOSZEmKmhXu1RU0ri1gSblVUBn3kAD3w==", + "poststateLeaf": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgA==" + }, + { + "input": "O8NUg0KaDo18ybTKajXM/sgqEYS37+lewPhGV/2sMAX6bu6pD+hu97cVqUkoazqhtXPFvnwPFy/jQjR+LFOXFZvvjCRgFRQvzzKOb8DJ0fqCRLgr9kiHN9LpqHXVhyHhhlRkem0+AIzvEmyqbgj5pbDQsr0gtBBZYN/IwUYeggMVEhM/rDKEiLf1Z5E/xs1a5ukkKJwXBVAioEmG81qwOBn9FqHWIrclhCUpqZRfRIZPp8ZXl64RTSIXhGibMujQipVQuXUsAOd4QE4HjirdI9SfzsLMesYimfC/eedEkhjL0z9+sjhgp5wJY84LBiNI6ZwrzNGke/zdySctbfKbgj2q6ECX3jy6lbeiNThEdLfO1t+IHM7rijOlNw/r8Up3YgH5ExUuuAOUJ5B8OuPXyMfnl484FISrdt0XGpNJpS66b1XHmf7p3TmFLlnO7/23uN57KiNdpe0RaB8R+Y5IzmcWrzR67wcbjbBTbjbAD6w1QIWVx6iUVSfciQIgw7WdYK1C6PXxUMEflpAnV0POZSMu++sbYrxJhlGArYgVbSH4i93eENLOJytOfnKXYzirqsHyCjHa55FqYBqC4ogkXXHBIzE90x9n2bNpc2IHYXIUfDcrkZSQFBnyze2uRsYx8dBXi+Pd2pLSFc3l0WnUCRXc4LCMFKEKWy/6fPHAUTy569sNCQd1E19Fx96UAQOsLRAtiHKp9XwCQYFhnPY2j61MYnsnV1Eh+cBc4J8ofY4mK4r7OBY9ze9+qrP95CCxyDJUIHDzDySR05MlXTJwwmA1a2X4r45shLLgZEF6EWsLwSvibme0tjvzczsfIwNGAmvg2gx4Uajr6kBelBRAobURalVBBx0SLghEjf1pXftmDkmRJipoV7tUVNK4tYEm5VVAZ95AAw==", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xbedfbe751c8d3913a76bb030bdcad312ce8c70aeabfaefcf491dafe740946ca5", + "0x74393cbd5f8764be54385a8c7f9442358ae2580f33aebc2597ffa42949172a34", + "0xc881ce64585de627f538376cae9513ca5f4efaa2ab255ce3fa5b15e890223982", + "0xa6ad654807a4f8de3b78d7fa389dc3bd31bbbb5bef24e8e8174f75c6f9ac5c6d", + "0x71ca4f0903770f2035f3650a40ac8b23d00aaffd7b155a8be5bd02ff1c944181" + ], + "prestateLeaf": "hlGArYgVbSH4i93eENLOJytOfnKXYzirqsHyCjHa55FqYBqC4ogkXXHBIzE90x9n2bNpc2IHYXIUfDcrkZSQFBnyze2uRsYx8dBXi+Pd2pLSFc3l0WnUCRXc4LCMFKEKWy/6fPHAUTy569sNCQd1E19Fx96UAQOsLRAtiHKp9XwCQYFhnPY2jw==", + "poststateLeaf": "rUxieydXUSH5wFzgnyh9jiYrivs4Fj3N736qs/3kILHIMlQgcPMPJJHTkyVdMnDCYDVrZfivjmyEsuBkQXoRawvBK+JuZ7S2O/NzOx8jA0YCa+DaDHhRqOvqQF6UFEChtRFqVUEHHRIuCESN/Wld+2YOSZEmKmhXu1RU0ri1gSblVUBn3kADgQ==" + }, + { + "input": "O8NUg0KaDo18ybTKajXM/sgqEYS37+lewPhGV/2sMAX6bu6pD+hu97cVqUkoazqhtXPFvnwPFy/jQjR+LFOXFZvvjCRgFRQvzzKOb8DJ0fqCRLgr9kiHN9LpqHXVhyHhhlRkem0+AIzvEmyqbgj5pbDQsr0gtBBZYN/IwUYeggMVEhM/rDKEiLf1Z5E/xs1a5ukkKJwXBVAioEmG81qwOBn9FqHWIrclhCUpqZRfRIZPp8ZXl64RTSIXhGibMujQipVQuXUsAOd4QE4HjirdI9SfzsLMesYimfC/eedEkhjL0z9+sjhgp5wJY84LBiNI6ZwrzNGke/zdySctbfKbgj2q6ECX3jy6lbeiNThEdLfO1t+IHM7rijOlNw/r8Up3YgH5ExUuuAOUJ5B8OuPXyMfnl484FISrdt0XGpNJpS66b1XHmf7p3TmFLlnO7/23uN57KiNdpe0RaB8R+Y5IzmcWrzR67wcbjbBTbjbAD6w1QIWVx6iUVSfciQIgw7WdYK1C6PXxUMEflpAnV0POZSMu++sbYrxJhlGArYgVbSH4i93eENLOJytOfnKXYzirqsHyCjHa55FqYBqC4ogkXXHBIzE90x9n2bNpc2IHYXIUfDcrkZSQFBnyze2uRsYx8dBXi+Pd2pLSFc3l0WnUCRXc4LCMFKEKWy/6fPHAUTy569sNCQd1E19Fx96UAQOsLRAtiHKp9XwCQYFhnPY2j61MYnsnV1Eh+cBc4J8ofY4mK4r7OBY9ze9+qrP95CCxyDJUIHDzDySR05MlXTJwwmA1a2X4r45shLLgZEF6EWsLwSvibme0tjvzczsfIwNGAmvg2gx4Uajr6kBelBRAobURalVBBx0SLghEjf1pXftmDkmRJipoV7tUVNK4tYEm5VVAZ95A", + "commitments": [ + "0xee0a1a26c607ab52c6308165995365f7951a185fccca4b76c847b8860d9fea7a", + "0xbedfbe751c8d3913a76bb030bdcad312ce8c70aeabfaefcf491dafe740946ca5", + "0x74393cbd5f8764be54385a8c7f9442358ae2580f33aebc2597ffa42949172a34", + "0xc881ce64585de627f538376cae9513ca5f4efaa2ab255ce3fa5b15e890223982", + "0xa6ad654807a4f8de3b78d7fa389dc3bd31bbbb5bef24e8e8174f75c6f9ac5c6d", + "0x8c3c5310bc148f9c71d9c0a8771479fb87c6fd9d9595ba60ded7b8eba1d6a483" + ], + "prestateLeaf": "hlGArYgVbSH4i93eENLOJytOfnKXYzirqsHyCjHa55FqYBqC4ogkXXHBIzE90x9n2bNpc2IHYXIUfDcrkZSQFBnyze2uRsYx8dBXi+Pd2pLSFc3l0WnUCRXc4LCMFKEKWy/6fPHAUTy569sNCQd1E19Fx96UAQOsLRAtiHKp9XwCQYFhnPY2jw==", + "poststateLeaf": "rUxieydXUSH5wFzgnyh9jiYrivs4Fj3N736qs/3kILHIMlQgcPMPJJHTkyVdMnDCYDVrZfivjmyEsuBkQXoRawvBK+JuZ7S2O/NzOx8jA0YCa+DaDHhRqOvqQF6UFEChtRFqVUEHHRIuCESN/Wld+2YOSZEmKmhXu1RU0ri1gSblVUBn3kABgA==" + } +] diff --git a/op-challenger2/game/keccak/merkle/testdata/proofs.json b/op-challenger2/game/keccak/merkle/testdata/proofs.json new file mode 100644 index 000000000000..cf319a4aab25 --- /dev/null +++ b/op-challenger2/game/keccak/merkle/testdata/proofs.json @@ -0,0 +1,177 @@ +[ + { + "name": "NoLeaves", + "rootHash": "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "leafCount": 0, + "index": 0, + "proofs": [] + }, + { + "name": "SingleLeaf", + "rootHash": "0x8285ae2d9ccfc8021051c1ea3dfa5ba8219605d0e28c941f1bc174822eda1154", + "leafCount": 1, + "index": 0, + "proofs": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2" + ] + }, + { + "name": "TwoLeaves", + "rootHash": "0x2f27513ae8b07634c3ac408e55e7b4b6d0dc400e3fa9b2165607d61a84625608", + "leafCount": 2, + "index": 1, + "proofs": [ + "0xff00000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2" + ] + }, + { + "name": "SingleLeaf", + "rootHash": "0x8285ae2d9ccfc8021051c1ea3dfa5ba8219605d0e28c941f1bc174822eda1154", + "leafCount": 1, + "index": 1, + "proofs": [ + "0xff00000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2" + ] + }, + { + "name": "PartialTree", + "rootHash": "0x72cdab4557ab96d1246b76e49b1d1a582662ebfceafe2f9c2922304d4fb9db33", + "leafCount": 20, + "index": 10, + "proofs": [ + "0xff0b000000000000000000000000000000000000000000000000000000000000", + "0xa68dfd9074d20ce110c771102e62f17a85ae562a56149e112dd4fecb6cbd8f52", + "0x892339d3cfe686a15b728211f6a3e4f21cf4ee128b152664c90f231f5a9f8333", + "0xd8ca500dee0f1f7334075ebef1e979652b70355c56e09b74d50c87e31e675ddb", + "0x0c9a9c89e79e068154f6c2b9b827cd4f3972e035542ebb68d68b9120e757626c", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2" + ] + }, + { + "name": "PartialTree", + "rootHash": "0x72cdab4557ab96d1246b76e49b1d1a582662ebfceafe2f9c2922304d4fb9db33", + "leafCount": 20, + "index": 11, + "proofs": [ + "0xff0a000000000000000000000000000000000000000000000000000000000000", + "0xa68dfd9074d20ce110c771102e62f17a85ae562a56149e112dd4fecb6cbd8f52", + "0x892339d3cfe686a15b728211f6a3e4f21cf4ee128b152664c90f231f5a9f8333", + "0xd8ca500dee0f1f7334075ebef1e979652b70355c56e09b74d50c87e31e675ddb", + "0x0c9a9c89e79e068154f6c2b9b827cd4f3972e035542ebb68d68b9120e757626c", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2" + ] + }, + { + "name": "PartialTree", + "rootHash": "0x9c089c06e4f93c68d74fdfef9255cb75f78b053dabb43749d20e21aa8330092c", + "leafCount": 65535, + "index": 65533, + "proofs": [ + "0xfffc000000000000000000000000000000000000000000000000000000000000", + "0xf52ca96368ae7189a9a33f0efefd1b0a2a2c208c8dca11a2b1066fe99a6582c5", + "0x6a9b869f709b2d34940a0bbe9eea3d502f1e918a4490eb890f1727d6843c055d", + "0x02f7a2dbcc9b3d6780f7f72aafd200af423ba438d45c535d2d4e333e43ce98fc", + "0x84952e81b8cba49f78e4ee1ef684936b09c1b851ecbe03382afecc04fb98b305", + "0x2f9e97e47493214948c70a6b818145a7e9cd84f7ceab384b179559730d7efbac", + "0x3414ae9dd358c60fae6b4010fc90af715bcc5d0fdd55cefd5370277de456c4b8", + "0x04c3b7e470a22c856003970d95b7015f284a0a09c8bc18e1da02078ded445d58", + "0x417e698fd035c879670c381d7bb0815ce67fb854625e502984ae8e2436ede0a9", + "0x042abe5eb9668a197d5389e4b225c474a7646950279cbd6d326db073ea120266", + "0x34f6ea610e918f37226300a42b22f702f3dd7bd7ae17e9dcb6e7374906b83aef", + "0x1c050824157972417b2931f6018ed24e3fe7fedf7c5a502eb26710501d5379d2", + "0x529067b7c2a065d285b931d19fd85ad35eb0abf89c1ed40ca836ace2c7ddad9e", + "0xfd4d4328beb216713b978c561d4acde2ffe400c60d4b6abe21940c0cff2bf0bc", + "0x9b4a92fefca8b04570c15e329cbc6c953dbe30e06607d985882f35fb8cfd986a", + "0x6ec52bd9024705f2c1bd913c43550f5c2b3fbba19dee077b4da6aa6cd90d1c3f" + ] + }, + { + "name": "FullTree", + "rootHash": "0x9c089c06e4f93c68d74fdfef9255cb75f78b053dabb43749d20e21aa8330092c", + "leafCount": 65535, + "index": 65534, + "proofs": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x65cf5d60111a84456933bd43218230c6235995be88b274e196e8ffcb88927db6", + "0x6a9b869f709b2d34940a0bbe9eea3d502f1e918a4490eb890f1727d6843c055d", + "0x02f7a2dbcc9b3d6780f7f72aafd200af423ba438d45c535d2d4e333e43ce98fc", + "0x84952e81b8cba49f78e4ee1ef684936b09c1b851ecbe03382afecc04fb98b305", + "0x2f9e97e47493214948c70a6b818145a7e9cd84f7ceab384b179559730d7efbac", + "0x3414ae9dd358c60fae6b4010fc90af715bcc5d0fdd55cefd5370277de456c4b8", + "0x04c3b7e470a22c856003970d95b7015f284a0a09c8bc18e1da02078ded445d58", + "0x417e698fd035c879670c381d7bb0815ce67fb854625e502984ae8e2436ede0a9", + "0x042abe5eb9668a197d5389e4b225c474a7646950279cbd6d326db073ea120266", + "0x34f6ea610e918f37226300a42b22f702f3dd7bd7ae17e9dcb6e7374906b83aef", + "0x1c050824157972417b2931f6018ed24e3fe7fedf7c5a502eb26710501d5379d2", + "0x529067b7c2a065d285b931d19fd85ad35eb0abf89c1ed40ca836ace2c7ddad9e", + "0xfd4d4328beb216713b978c561d4acde2ffe400c60d4b6abe21940c0cff2bf0bc", + "0x9b4a92fefca8b04570c15e329cbc6c953dbe30e06607d985882f35fb8cfd986a", + "0x6ec52bd9024705f2c1bd913c43550f5c2b3fbba19dee077b4da6aa6cd90d1c3f" + ] + } +] diff --git a/op-challenger2/game/keccak/merkle/tree.go b/op-challenger2/game/keccak/merkle/tree.go new file mode 100644 index 000000000000..16a6c02218b0 --- /dev/null +++ b/op-challenger2/game/keccak/merkle/tree.go @@ -0,0 +1,154 @@ +package merkle + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// BinaryMerkleTreeDepth is the depth of the merkle tree. +const BinaryMerkleTreeDepth = 16 + +// Proof is a list of [common.Hash]s that prove the merkle inclusion of a leaf. +// These are the sibling hashes of the leaf's path from the root to the leaf. +type Proof [BinaryMerkleTreeDepth]common.Hash + +var ( + // MaxLeafCount is the maximum number of leaves in the merkle tree. + MaxLeafCount = 1<= uint64(MaxLeafCount) { + panic("proof index out of bounds") + } + + levelNode := m.walkDownToLeafCount(index + 1) + for height := 0; height < BinaryMerkleTreeDepth; height++ { + if levelNode.Parent.IsLeftChild(levelNode) { + if levelNode.Parent.Right == nil { + proof[height] = common.Hash{} + } else { + proof[height] = levelNode.Parent.Right.Label + } + } else { + if levelNode.Parent.Left == nil { + proof[height] = common.Hash{} + } else { + proof[height] = levelNode.Parent.Left.Label + } + } + levelNode = levelNode.Parent + } + + return proof +} diff --git a/op-challenger2/game/keccak/merkle/tree_test.go b/op-challenger2/game/keccak/merkle/tree_test.go new file mode 100644 index 000000000000..3fa5f83ddce2 --- /dev/null +++ b/op-challenger2/game/keccak/merkle/tree_test.go @@ -0,0 +1,78 @@ +package merkle + +import ( + _ "embed" + "encoding/json" + "fmt" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +//go:embed testdata/proofs.json +var refTests []byte + +type testData struct { + Name string `json:"name"` + LeafCount uint64 `json:"leafCount"` + RootHash common.Hash `json:"rootHash"` + Index uint64 `json:"index"` + Proofs Proof `json:"proofs"` +} + +func TestBinaryMerkleTree_AddLeaf(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("%s-LeafCount-%v-Ref-%v", test.Name, test.LeafCount, i), func(t *testing.T) { + tree := NewBinaryMerkleTree() + expectedLeafHash := zeroHashes[BinaryMerkleTreeDepth-1] + for i := 0; i < int(test.LeafCount); i++ { + expectedLeafHash = leafHash(i) + tree.AddLeaf(expectedLeafHash) + } + leaf := tree.walkDownToLeafCount(tree.LeafCount) + require.Equal(t, expectedLeafHash, leaf.Label) + }) + } +} + +func TestBinaryMerkleTree_RootHash(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("%s-LeafCount-%v-Ref-%v", test.Name, test.LeafCount, i), func(t *testing.T) { + tree := NewBinaryMerkleTree() + for i := 0; i < int(test.LeafCount); i++ { + tree.AddLeaf(leafHash(i)) + } + require.Equal(t, test.RootHash, tree.RootHash()) + }) + } +} + +func TestBinaryMerkleTree_ProofAtIndex(t *testing.T) { + var tests []testData + require.NoError(t, json.Unmarshal(refTests, &tests)) + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("%s-Index-%v-Ref-%v", test.Name, test.LeafCount, i), func(t *testing.T) { + tree := NewBinaryMerkleTree() + for i := 0; i < int(test.LeafCount); i++ { + tree.AddLeaf(leafHash(i)) + } + proof := tree.ProofAtIndex(test.Index) + require.Equal(t, test.Proofs, proof) + }) + } +} + +func leafHash(idx int) common.Hash { + return common.Hash{0xff, byte(idx)} +} diff --git a/op-challenger2/game/keccak/scheduler.go b/op-challenger2/game/keccak/scheduler.go new file mode 100644 index 000000000000..a73a39ec8695 --- /dev/null +++ b/op-challenger2/game/keccak/scheduler.go @@ -0,0 +1,108 @@ +package keccak + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + faultTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/types" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +type Challenger interface { + Challenge(ctx context.Context, blockHash common.Hash, oracle Oracle, preimages []keccakTypes.LargePreimageMetaData) error +} + +type OracleSource interface { + Oracles() []keccakTypes.LargePreimageOracle +} + +type LargePreimageScheduler struct { + log log.Logger + cl faultTypes.ClockReader + ch chan common.Hash + oracles OracleSource + challenger Challenger + cancel func() + wg sync.WaitGroup +} + +func NewLargePreimageScheduler( + logger log.Logger, + cl faultTypes.ClockReader, + oracleSource OracleSource, + challenger Challenger) *LargePreimageScheduler { + return &LargePreimageScheduler{ + log: logger, + cl: cl, + ch: make(chan common.Hash, 1), + oracles: oracleSource, + challenger: challenger, + } +} + +func (s *LargePreimageScheduler) Start(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + s.cancel = cancel + s.wg.Add(1) + go s.run(ctx) +} + +func (s *LargePreimageScheduler) Close() error { + s.cancel() + s.wg.Wait() + return nil +} + +func (s *LargePreimageScheduler) run(ctx context.Context) { + defer s.wg.Done() + for { + select { + case <-ctx.Done(): + return + case blockHash := <-s.ch: + if err := s.verifyPreimages(ctx, blockHash); err != nil { + s.log.Error("Failed to verify large preimages", "blockHash", blockHash, "err", err) + } + } + } +} + +func (s *LargePreimageScheduler) Schedule(blockHash common.Hash, _ uint64) error { + select { + case s.ch <- blockHash: + default: + s.log.Trace("Skipping preimage check while already processing") + } + return nil +} + +func (s *LargePreimageScheduler) verifyPreimages(ctx context.Context, blockHash common.Hash) error { + var err error + for _, oracle := range s.oracles.Oracles() { + err = errors.Join(err, s.verifyOraclePreimages(ctx, oracle, blockHash)) + } + return err +} + +func (s *LargePreimageScheduler) verifyOraclePreimages(ctx context.Context, oracle keccakTypes.LargePreimageOracle, blockHash common.Hash) error { + preimages, err := oracle.GetActivePreimages(ctx, blockHash) + if err != nil { + return err + } + period, err := oracle.ChallengePeriod(ctx) + if err != nil { + return fmt.Errorf("failed to load challenge period: %w", err) + } + toVerify := make([]keccakTypes.LargePreimageMetaData, 0, len(preimages)) + for _, preimage := range preimages { + if preimage.ShouldVerify(s.cl.Now(), time.Duration(period)*time.Second) { + toVerify = append(toVerify, preimage) + } + } + return s.challenger.Challenge(ctx, blockHash, oracle, toVerify) +} diff --git a/op-challenger2/game/keccak/scheduler_test.go b/op-challenger2/game/keccak/scheduler_test.go new file mode 100644 index 000000000000..bc4e59e46ef6 --- /dev/null +++ b/op-challenger2/game/keccak/scheduler_test.go @@ -0,0 +1,141 @@ +package keccak + +import ( + "context" + "errors" + "math/big" + "sync" + "testing" + "time" + + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +var stubChallengePeriod = uint64(3600) + +func TestScheduleNextCheck(t *testing.T) { + ctx := context.Background() + currentTimestamp := uint64(1240) + logger := testlog.Logger(t, log.LevelInfo) + preimage1 := keccakTypes.LargePreimageMetaData{ // Incomplete so won't be verified + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xab}, + UUID: big.NewInt(111), + }, + } + preimage2 := keccakTypes.LargePreimageMetaData{ // Already countered so won't be verified + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xab}, + UUID: big.NewInt(222), + }, + Timestamp: currentTimestamp - 10, + Countered: true, + } + preimage3 := keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0xdd}, + UUID: big.NewInt(333), + }, + Timestamp: currentTimestamp - 10, + } + oracle := &stubOracle{ + images: []keccakTypes.LargePreimageMetaData{preimage1, preimage2, preimage3}, + } + cl := clock.NewDeterministicClock(time.Unix(int64(currentTimestamp), 0)) + challenger := &stubChallenger{} + scheduler := NewLargePreimageScheduler(logger, cl, OracleSourceArray{oracle}, challenger) + scheduler.Start(ctx) + defer scheduler.Close() + err := scheduler.Schedule(common.Hash{0xaa}, 3) + require.NoError(t, err) + require.Eventually(t, func() bool { + return oracle.GetPreimagesCount() == 1 + }, 10*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { + verified := challenger.Checked() + t.Logf("Checked preimages: %v", verified) + return len(verified) == 1 && verified[0] == preimage3 + }, 10*time.Second, 10*time.Millisecond, "Did not verify preimage") +} + +type stubOracle struct { + m sync.Mutex + addr common.Address + getPreimagesCount int + images []keccakTypes.LargePreimageMetaData + treeRoots map[keccakTypes.LargePreimageIdent]common.Hash +} + +func (s *stubOracle) ChallengePeriod(_ context.Context) (uint64, error) { + return stubChallengePeriod, nil +} + +func (s *stubOracle) GetInputDataBlocks(_ context.Context, _ rpcblock.Block, _ keccakTypes.LargePreimageIdent) ([]uint64, error) { + panic("not supported") +} + +func (s *stubOracle) DecodeInputData(_ []byte) (*big.Int, keccakTypes.InputData, error) { + panic("not supported") +} + +func (s *stubOracle) Addr() common.Address { + return s.addr +} + +func (s *stubOracle) GetActivePreimages(_ context.Context, _ common.Hash) ([]keccakTypes.LargePreimageMetaData, error) { + s.m.Lock() + defer s.m.Unlock() + s.getPreimagesCount++ + return s.images, nil +} + +func (s *stubOracle) GetPreimagesCount() int { + s.m.Lock() + defer s.m.Unlock() + return s.getPreimagesCount +} + +func (s *stubOracle) ChallengeTx(_ keccakTypes.LargePreimageIdent, _ keccakTypes.Challenge) (txmgr.TxCandidate, error) { + panic("not supported") +} + +func (s *stubOracle) GetProposalTreeRoot(_ context.Context, _ rpcblock.Block, ident keccakTypes.LargePreimageIdent) (common.Hash, error) { + root, ok := s.treeRoots[ident] + if ok { + return root, nil + } + return common.Hash{}, errors.New("unknown tree root") +} + +type stubChallenger struct { + m sync.Mutex + checked []keccakTypes.LargePreimageMetaData +} + +func (s *stubChallenger) Challenge(_ context.Context, _ common.Hash, _ Oracle, preimages []keccakTypes.LargePreimageMetaData) error { + s.m.Lock() + defer s.m.Unlock() + s.checked = append(s.checked, preimages...) + return nil +} + +func (s *stubChallenger) Checked() []keccakTypes.LargePreimageMetaData { + s.m.Lock() + defer s.m.Unlock() + v := make([]keccakTypes.LargePreimageMetaData, len(s.checked)) + copy(v, s.checked) + return v +} + +type OracleSourceArray []keccakTypes.LargePreimageOracle + +func (o OracleSourceArray) Oracles() []keccakTypes.LargePreimageOracle { + return o +} diff --git a/op-challenger2/game/keccak/types/types.go b/op-challenger2/game/keccak/types/types.go new file mode 100644 index 000000000000..f16a865aa12c --- /dev/null +++ b/op-challenger2/game/keccak/types/types.go @@ -0,0 +1,107 @@ +package types + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/merkle" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" +) + +// BlockSize is the size in bytes required for leaf data. +const BlockSize = 136 + +// Leaf is the keccak state matrix added to the large preimage merkle tree. +type Leaf struct { + // Input is the data absorbed for the block, exactly 136 bytes + Input [BlockSize]byte + // Index of the block in the absorption process + Index uint64 + // StateCommitment is the hash of the internal state after absorbing the input. + StateCommitment common.Hash +} + +// Hash returns the hash of the leaf data. That is the +// bytewise concatenation of the input, index, and state commitment. +func (l Leaf) Hash() common.Hash { + concatted := make([]byte, 0, 136+32+32) + concatted = append(concatted, l.Input[:]...) + concatted = append(concatted, math.U256Bytes(new(big.Int).SetUint64(l.Index))...) + concatted = append(concatted, l.StateCommitment.Bytes()...) + return crypto.Keccak256Hash(concatted) +} + +// InputData is a contiguous segment of preimage data. +type InputData struct { + // Input is the preimage data. + // When Finalize is false, len(Input) must equal len(Commitments)*BlockSize + // When Finalize is true, len(Input) must be between len(Commitments - 1)*BlockSize and len(Commitments)*BlockSize + Input []byte + // Commitments are the keccak commitments for each leaf in the chunk. + Commitments []common.Hash + // Finalize indicates whether the chunk is the final chunk. + Finalize bool +} + +type LargePreimageIdent struct { + Claimant common.Address + UUID *big.Int +} + +type LargePreimageMetaData struct { + LargePreimageIdent + + // Timestamp is the time at which the proposal first became fully available. + // 0 when not all data is available yet + Timestamp uint64 + PartOffset uint32 + ClaimedSize uint32 + BlocksProcessed uint32 + BytesProcessed uint32 + Countered bool +} + +// ShouldVerify returns true if the preimage upload is complete, has not yet been countered, and the +// challenge period has not yet elapsed. +func (m LargePreimageMetaData) ShouldVerify(now time.Time, ignoreAfter time.Duration) bool { + return m.Timestamp > 0 && !m.Countered && m.Timestamp+uint64(ignoreAfter.Seconds()) > uint64(now.Unix()) +} + +type StateSnapshot [25]uint64 + +// Pack packs the state in to the solidity ABI encoding required for the state matrix +func (s StateSnapshot) Pack() []byte { + buf := make([]byte, 0, len(s)*32) + for _, v := range s { + buf = append(buf, math.U256Bytes(new(big.Int).SetUint64(v))...) + } + return buf +} + +type Challenge struct { + // StateMatrix is the packed state matrix preimage of the StateCommitment in Prestate + StateMatrix StateSnapshot + + // Prestate is the valid leaf immediately prior to the first invalid leaf + Prestate Leaf + PrestateProof merkle.Proof + + // Poststate is the first invalid leaf in the preimage. The challenge claims that this leaf is invalid. + Poststate Leaf + PoststateProof merkle.Proof +} + +type LargePreimageOracle interface { + Addr() common.Address + GetActivePreimages(ctx context.Context, blockHash common.Hash) ([]LargePreimageMetaData, error) + GetInputDataBlocks(ctx context.Context, block rpcblock.Block, ident LargePreimageIdent) ([]uint64, error) + GetProposalTreeRoot(ctx context.Context, block rpcblock.Block, ident LargePreimageIdent) (common.Hash, error) + DecodeInputData(data []byte) (*big.Int, InputData, error) + ChallengeTx(ident LargePreimageIdent, challenge Challenge) (txmgr.TxCandidate, error) + ChallengePeriod(ctx context.Context) (uint64, error) +} diff --git a/op-challenger2/game/keccak/types/types_test.go b/op-challenger2/game/keccak/types/types_test.go new file mode 100644 index 000000000000..b8a34021c2f7 --- /dev/null +++ b/op-challenger2/game/keccak/types/types_test.go @@ -0,0 +1,64 @@ +package types + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestShouldVerify(t *testing.T) { + tests := []struct { + name string + timestamp uint64 + countered bool + now int64 + expected bool + }{ + { + name: "IgnoreNotFinalizedAndNotCountered", + timestamp: 0, + countered: false, + now: 100, + expected: false, + }, + { + name: "VerifyFinalizedAndNotCountered", + timestamp: 50, + countered: false, + now: 100, + expected: true, + }, + { + name: "IgnoreFinalizedAndCountered", + timestamp: 50, + countered: true, + now: 100, + expected: false, + }, + { + name: "IgnoreNotFinalizedAndCountered", + timestamp: 0, + countered: true, + now: 100, + expected: false, + }, + { + name: "IgnoreFinalizedBeforeTimeWindowAndNotCountered", + timestamp: 50, + countered: false, + now: 50 + int64((2 * time.Hour).Seconds()), + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + metadata := LargePreimageMetaData{ + Timestamp: test.timestamp, + Countered: test.countered, + } + require.Equal(t, test.expected, metadata.ShouldVerify(time.Unix(test.now, 0), 1*time.Hour)) + }) + } +} diff --git a/op-challenger2/game/keccak/verifier.go b/op-challenger2/game/keccak/verifier.go new file mode 100644 index 000000000000..143381e4f5ad --- /dev/null +++ b/op-challenger2/game/keccak/verifier.go @@ -0,0 +1,81 @@ +package keccak + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/fetcher" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/matrix" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + lru "github.com/hashicorp/golang-lru/v2" +) + +const validPreimageCacheSize = 500 + +type VerifierPreimageOracle interface { + fetcher.Oracle + GetProposalTreeRoot(ctx context.Context, block rpcblock.Block, ident keccakTypes.LargePreimageIdent) (common.Hash, error) +} + +type Fetcher interface { + FetchInputs(ctx context.Context, blockHash common.Hash, oracle fetcher.Oracle, ident keccakTypes.LargePreimageIdent) ([]keccakTypes.InputData, error) +} + +type PreimageVerifier struct { + log log.Logger + fetcher Fetcher + + // knownValid caches the merkle tree roots that have been confirmed as valid. + // Invalid roots are not cached as those preimages will be ignored once the challenge is processed. + knownValid *lru.Cache[common.Hash, bool] +} + +func NewPreimageVerifier(logger log.Logger, fetcher Fetcher) *PreimageVerifier { + // Can't error because size is hard coded + cache, _ := lru.New[common.Hash, bool](validPreimageCacheSize) + return &PreimageVerifier{ + log: logger, + fetcher: fetcher, + knownValid: cache, + } +} + +func (v *PreimageVerifier) CreateChallenge(ctx context.Context, blockHash common.Hash, oracle VerifierPreimageOracle, preimage keccakTypes.LargePreimageMetaData) (keccakTypes.Challenge, error) { + root, err := oracle.GetProposalTreeRoot(ctx, rpcblock.ByHash(blockHash), preimage.LargePreimageIdent) + if err != nil { + return keccakTypes.Challenge{}, fmt.Errorf("failed to get proposal merkle root: %w", err) + } + if valid, ok := v.knownValid.Get(root); ok && valid { + // We've already determined that the keccak transition is valid. + // Note that the merkle tree may have been validated by a different proposal but since the tree root + // commits to all the input data and the resulting keccak state matrix, any other proposal with the same + // root must also have the same inputs and correctly applied keccak. + // It is possible that this proposal can't be squeezed because the claimed data length doesn't match the + // actual length but the contracts enforce that and it can't be challenged on that basis. + return keccakTypes.Challenge{}, matrix.ErrValid + } + inputs, err := v.fetcher.FetchInputs(ctx, blockHash, oracle, preimage.LargePreimageIdent) + if err != nil { + return keccakTypes.Challenge{}, fmt.Errorf("failed to fetch leaves: %w", err) + } + readers := make([]io.Reader, 0, len(inputs)) + var commitments []common.Hash + for _, input := range inputs { + readers = append(readers, bytes.NewReader(input.Input)) + commitments = append(commitments, input.Commitments...) + } + challenge, err := matrix.Challenge(io.MultiReader(readers...), commitments) + if errors.Is(err, matrix.ErrValid) { + v.knownValid.Add(root, true) + return keccakTypes.Challenge{}, err + } else if err != nil { + return keccakTypes.Challenge{}, fmt.Errorf("failed to create challenge: %w", err) + } + return challenge, nil +} diff --git a/op-challenger2/game/keccak/verifier_test.go b/op-challenger2/game/keccak/verifier_test.go new file mode 100644 index 000000000000..c53a78894585 --- /dev/null +++ b/op-challenger2/game/keccak/verifier_test.go @@ -0,0 +1,164 @@ +package keccak + +import ( + "bytes" + "context" + "errors" + "io" + "math/big" + "math/rand" + "sync/atomic" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/fetcher" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/matrix" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestVerify(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) + tests := []struct { + name string + inputs func() []keccakTypes.InputData + expectedErr error + }{ + { + name: "Valid-SingleInput", + inputs: func() []keccakTypes.InputData { return validInputs(t, 1) }, + expectedErr: matrix.ErrValid, + }, + { + name: "Valid-MultipleInputs", + inputs: func() []keccakTypes.InputData { return validInputs(t, 3) }, + expectedErr: matrix.ErrValid, + }, + { + name: "Invalid-FirstCommitment", + inputs: func() []keccakTypes.InputData { + inputs := validInputs(t, 1) + inputs[0].Commitments[0] = common.Hash{0xaa} + return inputs + }, + expectedErr: nil, + }, + { + name: "Invalid-MiddleCommitment", + inputs: func() []keccakTypes.InputData { + inputs := validInputs(t, 1) + inputs[0].Commitments[1] = common.Hash{0xaa} + return inputs + }, + expectedErr: nil, + }, + { + name: "Invalid-LastCommitment", + inputs: func() []keccakTypes.InputData { + inputs := validInputs(t, 3) + inputs[2].Commitments[len(inputs[2].Commitments)-1] = common.Hash{0xaa} + return inputs + }, + expectedErr: nil, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + fetcher := &stubFetcher{ + inputs: test.inputs(), + } + verifier := NewPreimageVerifier(logger, fetcher) + preimage := keccakTypes.LargePreimageMetaData{} + oracle := &stubOracle{ + treeRoots: map[keccakTypes.LargePreimageIdent]common.Hash{ + preimage.LargePreimageIdent: {0xde}, + }, + } + challenge, err := verifier.CreateChallenge(context.Background(), common.Hash{0xff}, oracle, preimage) + require.ErrorIs(t, err, test.expectedErr) + if err == nil { + // Leave checking the validity of the challenge to the StateMatrix tests + // Just confirm that we got a non-zero challenge + require.NotEqual(t, keccakTypes.Challenge{}, challenge) + } else { + require.Equal(t, keccakTypes.Challenge{}, challenge) + } + }) + } +} + +func TestCacheValidRoots(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + fetcher := &stubFetcher{ + inputs: validInputs(t, 1), + } + verifier := NewPreimageVerifier(logger, fetcher) + preimage1 := keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0x12}, + UUID: big.NewInt(1), + }, + } + preimage2 := keccakTypes.LargePreimageMetaData{ + LargePreimageIdent: keccakTypes.LargePreimageIdent{ + Claimant: common.Address{0x23}, + UUID: big.NewInt(2), + }, + } + oracle := &stubOracle{ + treeRoots: map[keccakTypes.LargePreimageIdent]common.Hash{ + preimage1.LargePreimageIdent: {0xde}, + preimage2.LargePreimageIdent: {0xde}, + }, + } + challenge, err := verifier.CreateChallenge(context.Background(), common.Hash{0xff}, oracle, preimage1) + require.ErrorIs(t, err, matrix.ErrValid) + require.Equal(t, keccakTypes.Challenge{}, challenge, "Should be valid") + require.EqualValues(t, 1, fetcher.fetchCount.Load(), "Should fetch data and validate") + + // Should cache the validity + challenge, err = verifier.CreateChallenge(context.Background(), common.Hash{0xee}, oracle, preimage1) + require.ErrorIs(t, err, matrix.ErrValid) + require.Equal(t, keccakTypes.Challenge{}, challenge, "Should be valid") + require.EqualValues(t, 1, fetcher.fetchCount.Load(), "Should use cached validity") + + // Should cache the validity across different challenges + challenge, err = verifier.CreateChallenge(context.Background(), common.Hash{0xee}, oracle, preimage2) + require.ErrorIs(t, err, matrix.ErrValid) + require.Equal(t, keccakTypes.Challenge{}, challenge, "Should be valid") + require.EqualValues(t, 1, fetcher.fetchCount.Load(), "Should use cached validity") +} + +func validInputs(t *testing.T, inputCount int) []keccakTypes.InputData { + chunkSize := 2 * keccakTypes.BlockSize + data := testutils.RandomData(rand.New(rand.NewSource(4444)), inputCount*chunkSize) + var calls []keccakTypes.InputData + in := bytes.NewReader(data) + s := matrix.NewStateMatrix() + for { + call, err := s.AbsorbUpTo(in, chunkSize) + if !errors.Is(err, io.EOF) { + require.NoError(t, err) + } + calls = append(calls, call) + if errors.Is(err, io.EOF) { + break + } + } + return calls +} + +type stubFetcher struct { + inputs []keccakTypes.InputData + fetchCount atomic.Int64 +} + +func (s *stubFetcher) FetchInputs(_ context.Context, _ common.Hash, _ fetcher.Oracle, _ keccakTypes.LargePreimageIdent) ([]keccakTypes.InputData, error) { + s.fetchCount.Add(1) + return s.inputs, nil +} diff --git a/op-challenger2/game/monitor.go b/op-challenger2/game/monitor.go new file mode 100644 index 000000000000..d1a35029e1b4 --- /dev/null +++ b/op-challenger2/game/monitor.go @@ -0,0 +1,174 @@ +package game + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/eth" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" +) + +type blockNumberFetcher func(ctx context.Context) (uint64, error) + +// gameSource loads information about the games available to play +type gameSource interface { + GetGamesAtOrAfter(ctx context.Context, blockHash common.Hash, earliestTimestamp uint64) ([]types.GameMetadata, error) +} + +type RWClock interface { + SetTime(uint64) + Now() time.Time +} + +type gameScheduler interface { + Schedule([]types.GameMetadata, uint64) error +} + +type preimageScheduler interface { + Schedule(blockHash common.Hash, blockNumber uint64) error +} + +type claimer interface { + Schedule(blockNumber uint64, games []types.GameMetadata) error +} + +type gameMonitor struct { + logger log.Logger + clock RWClock + source gameSource + scheduler gameScheduler + preimages preimageScheduler + gameWindow time.Duration + claimer claimer + fetchBlockNumber blockNumberFetcher + allowedGames []common.Address + l1HeadsSub ethereum.Subscription + l1Source *headSource + runState sync.Mutex +} + +type MinimalSubscriber interface { + EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (ethereum.Subscription, error) +} + +type headSource struct { + inner MinimalSubscriber +} + +func (s *headSource) SubscribeNewHead(ctx context.Context, ch chan<- *ethTypes.Header) (ethereum.Subscription, error) { + return s.inner.EthSubscribe(ctx, ch, "newHeads") +} + +func newGameMonitor( + logger log.Logger, + cl RWClock, + source gameSource, + scheduler gameScheduler, + preimages preimageScheduler, + gameWindow time.Duration, + claimer claimer, + fetchBlockNumber blockNumberFetcher, + allowedGames []common.Address, + l1Source MinimalSubscriber, +) *gameMonitor { + return &gameMonitor{ + logger: logger, + clock: cl, + scheduler: scheduler, + preimages: preimages, + source: source, + gameWindow: gameWindow, + claimer: claimer, + fetchBlockNumber: fetchBlockNumber, + allowedGames: allowedGames, + l1Source: &headSource{inner: l1Source}, + } +} + +func (m *gameMonitor) allowedGame(game common.Address) bool { + if len(m.allowedGames) == 0 { + return true + } + for _, allowed := range m.allowedGames { + if allowed == game { + return true + } + } + return false +} + +func (m *gameMonitor) progressGames(ctx context.Context, blockHash common.Hash, blockNumber uint64) error { + minGameTimestamp := clock.MinCheckedTimestamp(m.clock, m.gameWindow) + games, err := m.source.GetGamesAtOrAfter(ctx, blockHash, minGameTimestamp) + if err != nil { + return fmt.Errorf("failed to load games: %w", err) + } + var gamesToPlay []types.GameMetadata + for _, game := range games { + if !m.allowedGame(game.Proxy) { + m.logger.Debug("Skipping game not on allow list", "game", game.Proxy) + continue + } + gamesToPlay = append(gamesToPlay, game) + } + if err := m.claimer.Schedule(blockNumber, gamesToPlay); err != nil { + return fmt.Errorf("failed to schedule bond claims: %w", err) + } + if err := m.scheduler.Schedule(gamesToPlay, blockNumber); errors.Is(err, scheduler.ErrBusy) { + m.logger.Info("Scheduler still busy with previous update") + } else if err != nil { + return fmt.Errorf("failed to schedule games: %w", err) + } + return nil +} + +func (m *gameMonitor) onNewL1Head(ctx context.Context, sig eth.L1BlockRef) { + m.clock.SetTime(sig.Time) + if err := m.progressGames(ctx, sig.Hash, sig.Number); err != nil { + m.logger.Error("Failed to progress games", "err", err) + } + if err := m.preimages.Schedule(sig.Hash, sig.Number); err != nil { + m.logger.Error("Failed to validate large preimages", "err", err) + } +} + +func (m *gameMonitor) resubscribeFunction() event.ResubscribeErrFunc { + // The ctx is cancelled as soon as the subscription is returned, + // but is only used to create the subscription, and does not affect the returned subscription. + return func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + m.logger.Warn("resubscribing after failed L1 subscription", "err", err) + } + return eth.WatchHeadChanges(ctx, m.l1Source, m.onNewL1Head) + } +} + +func (m *gameMonitor) StartMonitoring() { + m.runState.Lock() + defer m.runState.Unlock() + if m.l1HeadsSub != nil { + return // already started + } + m.l1HeadsSub = event.ResubscribeErr(time.Second*10, m.resubscribeFunction()) +} + +func (m *gameMonitor) StopMonitoring() { + m.runState.Lock() + defer m.runState.Unlock() + if m.l1HeadsSub == nil { + return // already stopped + } + m.l1HeadsSub.Unsubscribe() + m.l1HeadsSub = nil +} diff --git a/op-challenger2/game/monitor_test.go b/op-challenger2/game/monitor_test.go new file mode 100644 index 000000000000..a69c69b962ab --- /dev/null +++ b/op-challenger2/game/monitor_test.go @@ -0,0 +1,296 @@ +package game + +import ( + "context" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/clock" +) + +// TestMonitorGames tests that the monitor can handle a new head event +// and resubscribe to new heads if the subscription errors. +func TestMonitorGames(t *testing.T) { + t.Run("Schedules games", func(t *testing.T) { + addr1 := common.Address{0xaa} + addr2 := common.Address{0xbb} + monitor, source, sched, mockHeadSource, preimages, _ := setupMonitorTest(t, []common.Address{}) + source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + headerNotSent := true + for { + if len(sched.Scheduled()) >= 1 { + break + } + sub := mockHeadSource.Sub() + if sub == nil { + continue + } + if headerNotSent { + select { + case sub.headers <- ðtypes.Header{ + Number: big.NewInt(1), + }: + headerNotSent = false + case <-ctx.Done(): + return + default: + } + } + // Just to avoid a tight loop + time.Sleep(100 * time.Millisecond) + } + mockHeadSource.SetErr(fmt.Errorf("eth subscribe test error")) + cancel() + }() + + monitor.StartMonitoring() + <-ctx.Done() + monitor.StopMonitoring() + require.Len(t, sched.Scheduled(), 1) + require.Equal(t, []common.Address{addr1, addr2}, sched.Scheduled()[0]) + require.GreaterOrEqual(t, preimages.ScheduleCount(), 1, "Should schedule preimage checks") + }) + + t.Run("Resubscribes on error", func(t *testing.T) { + addr1 := common.Address{0xaa} + addr2 := common.Address{0xbb} + monitor, source, sched, mockHeadSource, preimages, _ := setupMonitorTest(t, []common.Address{}) + source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + // Wait for the subscription to be created + waitErr := wait.For(context.Background(), 5*time.Second, func() (bool, error) { + return mockHeadSource.Sub() != nil, nil + }) + require.NoError(t, waitErr) + mockHeadSource.Sub().errChan <- fmt.Errorf("test error") + for { + if len(sched.Scheduled()) >= 1 { + break + } + sub := mockHeadSource.Sub() + if sub == nil { + continue + } + select { + case sub.headers <- ðtypes.Header{ + Number: big.NewInt(1), + }: + case <-ctx.Done(): + return + default: + } + // Just to avoid a tight loop + time.Sleep(100 * time.Millisecond) + } + mockHeadSource.SetErr(fmt.Errorf("eth subscribe test error")) + cancel() + }() + + monitor.StartMonitoring() + <-ctx.Done() + monitor.StopMonitoring() + require.NotEmpty(t, sched.Scheduled()) // We might get more than one update scheduled. + require.Equal(t, []common.Address{addr1, addr2}, sched.Scheduled()[0]) + require.GreaterOrEqual(t, preimages.ScheduleCount(), 1, "Should schedule preimage checks") + }) +} + +func TestMonitorCreateAndProgressGameAgents(t *testing.T) { + monitor, source, sched, _, _, _ := setupMonitorTest(t, []common.Address{}) + + addr1 := common.Address{0xaa} + addr2 := common.Address{0xbb} + source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} + + require.NoError(t, monitor.progressGames(context.Background(), common.Hash{0x01}, 0)) + + require.Len(t, sched.Scheduled(), 1) + require.Equal(t, []common.Address{addr1, addr2}, sched.Scheduled()[0]) +} + +func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) { + addr1 := common.Address{0xaa} + addr2 := common.Address{0xbb} + monitor, source, sched, _, _, stubClaimer := setupMonitorTest(t, []common.Address{addr2}) + source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} + + require.NoError(t, monitor.progressGames(context.Background(), common.Hash{0x01}, 0)) + + require.Len(t, sched.Scheduled(), 1) + require.Equal(t, []common.Address{addr2}, sched.Scheduled()[0]) + require.Equal(t, 1, stubClaimer.scheduledGames) +} + +func newFDG(proxy common.Address, timestamp uint64) types.GameMetadata { + return types.GameMetadata{ + Proxy: proxy, + Timestamp: timestamp, + } +} + +func setupMonitorTest( + t *testing.T, + allowedGames []common.Address, +) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource, *stubPreimageScheduler, *mockScheduler) { + logger := testlog.Logger(t, log.LevelDebug) + source := &stubGameSource{} + i := uint64(1) + fetchBlockNum := func(ctx context.Context) (uint64, error) { + i++ + return i, nil + } + sched := &stubScheduler{} + preimages := &stubPreimageScheduler{} + mockHeadSource := &mockNewHeadSource{} + stubClaimer := &mockScheduler{} + monitor := newGameMonitor( + logger, + clock.NewSimpleClock(), + source, + sched, + preimages, + time.Duration(0), + stubClaimer, + fetchBlockNum, + allowedGames, + mockHeadSource, + ) + return monitor, source, sched, mockHeadSource, preimages, stubClaimer +} + +type mockNewHeadSource struct { + sync.Mutex + sub *mockSubscription + err error +} + +func (m *mockNewHeadSource) Sub() *mockSubscription { + m.Lock() + defer m.Unlock() + return m.sub +} + +func (m *mockNewHeadSource) SetSub(sub *mockSubscription) { + m.Lock() + defer m.Unlock() + m.sub = sub +} + +func (m *mockNewHeadSource) SetErr(err error) { + m.Lock() + defer m.Unlock() + m.err = err +} + +func (m *mockNewHeadSource) EthSubscribe( + _ context.Context, + ch any, + _ ...any, +) (ethereum.Subscription, error) { + m.Lock() + defer m.Unlock() + errChan := make(chan error) + m.sub = &mockSubscription{errChan, (ch).(chan<- *ethtypes.Header)} + if m.err != nil { + return nil, m.err + } + return m.sub, nil +} + +type mockScheduler struct { + scheduleErr error + scheduledGames int +} + +func (m *mockScheduler) Schedule(_ uint64, games []types.GameMetadata) error { + m.scheduledGames += len(games) + return m.scheduleErr +} + +type mockSubscription struct { + errChan chan error + headers chan<- *ethtypes.Header +} + +func (m *mockSubscription) Unsubscribe() {} + +func (m *mockSubscription) Err() <-chan error { + return m.errChan +} + +type stubGameSource struct { + fetchErr error + games []types.GameMetadata +} + +func (s *stubGameSource) GetGamesAtOrAfter( + _ context.Context, + _ common.Hash, + _ uint64, +) ([]types.GameMetadata, error) { + if s.fetchErr != nil { + return nil, s.fetchErr + } + return s.games, nil +} + +type stubScheduler struct { + sync.Mutex + scheduled [][]common.Address +} + +func (s *stubScheduler) Scheduled() [][]common.Address { + s.Lock() + defer s.Unlock() + return s.scheduled +} + +func (s *stubScheduler) Schedule(games []types.GameMetadata, blockNumber uint64) error { + s.Lock() + defer s.Unlock() + var addrs []common.Address + for _, game := range games { + addrs = append(addrs, game.Proxy) + } + s.scheduled = append(s.scheduled, addrs) + return nil +} + +type stubPreimageScheduler struct { + sync.Mutex + scheduleCount int +} + +func (s *stubPreimageScheduler) Schedule(_ common.Hash, _ uint64) error { + s.Lock() + defer s.Unlock() + s.scheduleCount++ + return nil +} + +func (s *stubPreimageScheduler) ScheduleCount() int { + s.Lock() + defer s.Unlock() + return s.scheduleCount +} diff --git a/op-challenger2/game/registry/oracles.go b/op-challenger2/game/registry/oracles.go new file mode 100644 index 000000000000..7f64c64568ec --- /dev/null +++ b/op-challenger2/game/registry/oracles.go @@ -0,0 +1,32 @@ +package registry + +import ( + "sync" + + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum/go-ethereum/common" + "golang.org/x/exp/maps" +) + +type OracleRegistry struct { + l sync.Mutex + oracles map[common.Address]keccakTypes.LargePreimageOracle +} + +func NewOracleRegistry() *OracleRegistry { + return &OracleRegistry{ + oracles: make(map[common.Address]keccakTypes.LargePreimageOracle), + } +} + +func (r *OracleRegistry) RegisterOracle(oracle keccakTypes.LargePreimageOracle) { + r.l.Lock() + defer r.l.Unlock() + r.oracles[oracle.Addr()] = oracle +} + +func (r *OracleRegistry) Oracles() []keccakTypes.LargePreimageOracle { + r.l.Lock() + defer r.l.Unlock() + return maps.Values(r.oracles) +} diff --git a/op-challenger2/game/registry/oracles_test.go b/op-challenger2/game/registry/oracles_test.go new file mode 100644 index 000000000000..43259d7b518d --- /dev/null +++ b/op-challenger2/game/registry/oracles_test.go @@ -0,0 +1,56 @@ +package registry + +import ( + "context" + "math/big" + "testing" + + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestDeduplicateOracles(t *testing.T) { + registry := NewOracleRegistry() + oracleA := stubPreimageOracle{0xaa} + oracleB := stubPreimageOracle{0xbb} + registry.RegisterOracle(oracleA) + registry.RegisterOracle(oracleB) + registry.RegisterOracle(oracleB) + oracles := registry.Oracles() + require.Len(t, oracles, 2) + require.Contains(t, oracles, oracleA) + require.Contains(t, oracles, oracleB) +} + +type stubPreimageOracle common.Address + +func (s stubPreimageOracle) ChallengePeriod(_ context.Context) (uint64, error) { + panic("not supported") +} + +func (s stubPreimageOracle) GetProposalTreeRoot(_ context.Context, _ rpcblock.Block, _ keccakTypes.LargePreimageIdent) (common.Hash, error) { + panic("not supported") +} + +func (s stubPreimageOracle) ChallengeTx(_ keccakTypes.LargePreimageIdent, _ keccakTypes.Challenge) (txmgr.TxCandidate, error) { + panic("not supported") +} + +func (s stubPreimageOracle) GetInputDataBlocks(_ context.Context, _ rpcblock.Block, _ keccakTypes.LargePreimageIdent) ([]uint64, error) { + panic("not supported") +} + +func (s stubPreimageOracle) DecodeInputData(_ []byte) (*big.Int, keccakTypes.InputData, error) { + panic("not supported") +} + +func (s stubPreimageOracle) Addr() common.Address { + return common.Address(s) +} + +func (s stubPreimageOracle) GetActivePreimages(_ context.Context, _ common.Hash) ([]keccakTypes.LargePreimageMetaData, error) { + return nil, nil +} diff --git a/op-challenger2/game/registry/registry.go b/op-challenger2/game/registry/registry.go new file mode 100644 index 000000000000..fe05404fa166 --- /dev/null +++ b/op-challenger2/game/registry/registry.go @@ -0,0 +1,57 @@ +package registry + +import ( + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/claims" + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" +) + +var ErrUnsupportedGameType = errors.New("unsupported game type") + +type GameTypeRegistry struct { + types map[uint32]scheduler.PlayerCreator + bondCreators map[uint32]claims.BondContractCreator +} + +func NewGameTypeRegistry() *GameTypeRegistry { + return &GameTypeRegistry{ + types: make(map[uint32]scheduler.PlayerCreator), + bondCreators: make(map[uint32]claims.BondContractCreator), + } +} + +// RegisterGameType registers a scheduler.PlayerCreator to use for a specific game type. +// Panics if the same game type is registered multiple times, since this indicates a significant programmer error. +func (r *GameTypeRegistry) RegisterGameType(gameType uint32, creator scheduler.PlayerCreator) { + if _, ok := r.types[gameType]; ok { + panic(fmt.Errorf("duplicate creator registered for game type: %v", gameType)) + } + r.types[gameType] = creator +} + +func (r *GameTypeRegistry) RegisterBondContract(gameType uint32, creator claims.BondContractCreator) { + if _, ok := r.bondCreators[gameType]; ok { + panic(fmt.Errorf("duplicate bond contract registered for game type: %v", gameType)) + } + r.bondCreators[gameType] = creator +} + +// CreatePlayer creates a new game player for the given game, using the specified directory for persisting data. +func (r *GameTypeRegistry) CreatePlayer(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { + creator, ok := r.types[game.GameType] + if !ok { + return nil, fmt.Errorf("%w: %v", ErrUnsupportedGameType, game.GameType) + } + return creator(game, dir) +} + +func (r *GameTypeRegistry) CreateBondContract(game types.GameMetadata) (claims.BondContract, error) { + creator, ok := r.bondCreators[game.GameType] + if !ok { + return nil, fmt.Errorf("%w: %v", ErrUnsupportedGameType, game.GameType) + } + return creator(game) +} diff --git a/op-challenger2/game/registry/registry_test.go b/op-challenger2/game/registry/registry_test.go new file mode 100644 index 000000000000..9b2844f3aa59 --- /dev/null +++ b/op-challenger2/game/registry/registry_test.go @@ -0,0 +1,84 @@ +package registry + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/claims" + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler" + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestUnknownGameType(t *testing.T) { + registry := NewGameTypeRegistry() + player, err := registry.CreatePlayer(types.GameMetadata{GameType: 0}, "") + require.ErrorIs(t, err, ErrUnsupportedGameType) + require.Nil(t, player) +} + +func TestKnownGameType(t *testing.T) { + registry := NewGameTypeRegistry() + expectedPlayer := &test.StubGamePlayer{} + creator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { + return expectedPlayer, nil + } + registry.RegisterGameType(0, creator) + player, err := registry.CreatePlayer(types.GameMetadata{GameType: 0}, "") + require.NoError(t, err) + require.Same(t, expectedPlayer, player) +} + +func TestPanicsOnDuplicateGameType(t *testing.T) { + registry := NewGameTypeRegistry() + creator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { + return nil, nil + } + registry.RegisterGameType(0, creator) + require.Panics(t, func() { + registry.RegisterGameType(0, creator) + }) +} + +func TestBondContracts(t *testing.T) { + t.Run("UnknownGameType", func(t *testing.T) { + registry := NewGameTypeRegistry() + contract, err := registry.CreateBondContract(types.GameMetadata{GameType: 0}) + require.ErrorIs(t, err, ErrUnsupportedGameType) + require.Nil(t, contract) + }) + t.Run("KnownGameType", func(t *testing.T) { + registry := NewGameTypeRegistry() + expected := &stubBondContract{} + registry.RegisterBondContract(0, func(game types.GameMetadata) (claims.BondContract, error) { + return expected, nil + }) + creator, err := registry.CreateBondContract(types.GameMetadata{GameType: 0}) + require.NoError(t, err) + require.Same(t, expected, creator) + }) + t.Run("PanicsOnDuplicate", func(t *testing.T) { + registry := NewGameTypeRegistry() + creator := func(game types.GameMetadata) (claims.BondContract, error) { + return nil, nil + } + registry.RegisterBondContract(0, creator) + require.Panics(t, func() { + registry.RegisterBondContract(0, creator) + }) + }) +} + +type stubBondContract struct{} + +func (s *stubBondContract) GetCredit(_ context.Context, _ common.Address) (*big.Int, types.GameStatus, error) { + panic("not supported") +} + +func (s *stubBondContract) ClaimCreditTx(_ context.Context, _ common.Address) (txmgr.TxCandidate, error) { + panic("not supported") +} diff --git a/op-challenger2/game/scheduler/coordinator.go b/op-challenger2/game/scheduler/coordinator.go new file mode 100644 index 000000000000..39e0b1d0c740 --- /dev/null +++ b/op-challenger2/game/scheduler/coordinator.go @@ -0,0 +1,206 @@ +package scheduler + +import ( + "context" + "errors" + "fmt" + "slices" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +var errUnknownGame = errors.New("unknown game") + +type PlayerCreator func(game types.GameMetadata, dir string) (GamePlayer, error) + +type CoordinatorMetricer interface { + RecordActedL1Block(n uint64) + RecordGamesStatus(inProgress, defenderWon, challengerWon int) + RecordGameUpdateScheduled() + RecordGameUpdateCompleted() +} + +type gameState struct { + player GamePlayer + inflight bool + lastProcessedBlockNum uint64 + status types.GameStatus +} + +// coordinator manages the set of current games, queues games to be played (on separate worker threads) and +// cleans up data files once a game is resolved. +// All function calls must be made on the same thread. +type coordinator struct { + // jobQueue is the outgoing queue for jobs being sent to workers for progression + jobQueue chan<- job + + // resultQueue is the incoming queue of jobs that have been completed by workers + resultQueue <-chan job + + logger log.Logger + m CoordinatorMetricer + createPlayer PlayerCreator + states map[common.Address]*gameState + disk DiskManager + + allowInvalidPrestate bool + + // lastScheduledBlockNum is the highest block number that the coordinator has seen and scheduled jobs. + lastScheduledBlockNum uint64 +} + +// schedule takes the current list of games to attempt to progress, filters out games that have previous +// progressions already in-flight and schedules jobs to progress on the outbound jobQueue. +// To avoid deadlock, it may process results from the inbound resultQueue while adding jobs to the outbound jobQueue. +// Returns an error if a game couldn't be scheduled because of an error. It will continue attempting to progress +// all games even if an error occurs with one game. +func (c *coordinator) schedule(ctx context.Context, games []types.GameMetadata, blockNumber uint64) error { + // First remove any game states we no longer require + for addr, state := range c.states { + if !state.inflight && !slices.ContainsFunc(games, func(candidate types.GameMetadata) bool { + return candidate.Proxy == addr + }) { + delete(c.states, addr) + } + } + + var gamesInProgress int + var gamesChallengerWon int + var gamesDefenderWon int + var errs []error + var jobs []job + // Next collect all the jobs to schedule and ensure all games are recorded in the states map. + // Otherwise, results may start being processed before all games are recorded, resulting in existing + // data directories potentially being deleted for games that are required. + for _, game := range games { + if j, err := c.createJob(ctx, game, blockNumber); err != nil { + errs = append(errs, fmt.Errorf("failed to create job for game %v: %w", game.Proxy, err)) + } else if j != nil { + jobs = append(jobs, *j) + c.m.RecordGameUpdateScheduled() + } + state, ok := c.states[game.Proxy] + if ok { + switch state.status { + case types.GameStatusInProgress: + gamesInProgress++ + case types.GameStatusDefenderWon: + gamesDefenderWon++ + case types.GameStatusChallengerWon: + gamesChallengerWon++ + } + } else { + c.logger.Warn("Game not found in states map", "game", game.Proxy) + } + } + c.m.RecordGamesStatus(gamesInProgress, gamesDefenderWon, gamesChallengerWon) + + lowestProcessedBlockNum := blockNumber + for _, state := range c.states { + lowestProcessedBlockNum = min(lowestProcessedBlockNum, state.lastProcessedBlockNum) + } + c.lastScheduledBlockNum = blockNumber + c.m.RecordActedL1Block(lowestProcessedBlockNum) + + // Finally, enqueue the jobs + for _, j := range jobs { + if err := c.enqueueJob(ctx, j); err != nil { + errs = append(errs, fmt.Errorf("failed to enqueue job for game %v: %w", j.addr, err)) + } + } + return errors.Join(errs...) +} + +// createJob updates the state for the specified game and returns the job to enqueue for it, if any +// Returns (nil, nil) when there is no error and no job to enqueue +func (c *coordinator) createJob(ctx context.Context, game types.GameMetadata, blockNumber uint64) (*job, error) { + state, ok := c.states[game.Proxy] + if !ok { + // This is the first time we're seeing this game, so its last processed block + // is the last block the coordinator processed (it didn't exist yet). + state = &gameState{lastProcessedBlockNum: c.lastScheduledBlockNum} + c.states[game.Proxy] = state + } + if state.inflight { + c.logger.Debug("Not rescheduling already in-flight game", "game", game.Proxy) + return nil, nil + } + // Create the player separately to the state so we retry creating it if it fails on the first attempt. + if state.player == nil { + player, err := c.createPlayer(game, c.disk.DirForGame(game.Proxy)) + if err != nil { + return nil, fmt.Errorf("failed to create game player: %w", err) + } + if err := player.ValidatePrestate(ctx); err != nil { + if !c.allowInvalidPrestate || !errors.Is(err, types.ErrInvalidPrestate) { + return nil, fmt.Errorf("failed to validate prestate: %w", err) + } + c.logger.Error("Invalid prestate", "game", game.Proxy, "err", err) + } + state.player = player + state.status = player.Status() + } + if state.status != types.GameStatusInProgress { + c.logger.Debug("Not rescheduling resolved game", "game", game.Proxy, "status", state.status) + state.lastProcessedBlockNum = blockNumber + return nil, nil + } + state.inflight = true + return newJob(blockNumber, game.Proxy, state.player, state.status), nil +} + +func (c *coordinator) enqueueJob(ctx context.Context, j job) error { + for { + select { + case c.jobQueue <- j: + return nil + case result := <-c.resultQueue: + if err := c.processResult(result); err != nil { + c.logger.Error("Failed to process result", "err", err) + } + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (c *coordinator) processResult(j job) error { + state, ok := c.states[j.addr] + if !ok { + return fmt.Errorf("game %v received unexpected result: %w", j.addr, errUnknownGame) + } + state.inflight = false + state.status = j.status + state.lastProcessedBlockNum = j.block + c.deleteResolvedGameFiles() + c.m.RecordGameUpdateCompleted() + return nil +} + +func (c *coordinator) deleteResolvedGameFiles() { + var keepGames []common.Address + for addr, state := range c.states { + if state.status == types.GameStatusInProgress || state.inflight { + keepGames = append(keepGames, addr) + } + } + if err := c.disk.RemoveAllExcept(keepGames); err != nil { + c.logger.Error("Unable to cleanup game data", "err", err) + } +} + +func newCoordinator(logger log.Logger, m CoordinatorMetricer, jobQueue chan<- job, resultQueue <-chan job, createPlayer PlayerCreator, disk DiskManager, allowInvalidPrestate bool) *coordinator { + return &coordinator{ + logger: logger, + m: m, + jobQueue: jobQueue, + resultQueue: resultQueue, + createPlayer: createPlayer, + disk: disk, + states: make(map[common.Address]*gameState), + allowInvalidPrestate: allowInvalidPrestate, + } +} diff --git a/op-challenger2/game/scheduler/coordinator_test.go b/op-challenger2/game/scheduler/coordinator_test.go new file mode 100644 index 000000000000..75bc08d44c7f --- /dev/null +++ b/op-challenger2/game/scheduler/coordinator_test.go @@ -0,0 +1,481 @@ +package scheduler + +import ( + "context" + "fmt" + "slices" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestScheduleNewGames(t *testing.T) { + c, workQueue, _, games, disk, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + gameAddr3 := common.Address{0xcc} + ctx := context.Background() + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2, gameAddr3), 0)) + + require.Len(t, workQueue, 3, "should schedule job for each game") + require.Len(t, games.created, 3, "should have created players") + var players []GamePlayer + for i := 0; i < len(games.created); i++ { + j := <-workQueue + players = append(players, j.player) + } + for addr, player := range games.created { + require.Equal(t, disk.DirForGame(addr), player.Dir, "should use allocated directory") + require.Containsf(t, players, player, "should have created a job for player %v", addr) + } +} + +func TestSkipSchedulingInflightGames(t *testing.T) { + c, workQueue, _, _, _, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + ctx := context.Background() + + // Schedule the game once + require.NoError(t, c.schedule(ctx, asGames(gameAddr1), 0)) + require.Len(t, workQueue, 1, "should schedule game") + + // And then attempt to schedule again + require.NoError(t, c.schedule(ctx, asGames(gameAddr1), 0)) + require.Len(t, workQueue, 1, "should not reschedule in-flight game") +} + +func TestExitWhenContextDoneWhileSchedulingJob(t *testing.T) { + // No space in buffer to schedule a job + c, workQueue, _, _, _, _ := setupCoordinatorTest(t, 0) + gameAddr1 := common.Address{0xaa} + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Context is cancelled + + // Should not block because the context is done. + err := c.schedule(ctx, asGames(gameAddr1), 0) + require.ErrorIs(t, err, context.Canceled) + require.Empty(t, workQueue, "should not have been able to schedule game") +} + +func TestSchedule_PrestateValidationErrors(t *testing.T) { + c, _, _, games, _, _ := setupCoordinatorTest(t, 10) + games.PrestateErr = types.ErrInvalidPrestate + gameAddr1 := common.Address{0xaa} + ctx := context.Background() + + err := c.schedule(ctx, asGames(gameAddr1), 0) + require.Error(t, err) +} + +func TestSchedule_SkipPrestateValidationErrors(t *testing.T) { + c, _, _, games, _, logs := setupCoordinatorTest(t, 10) + c.allowInvalidPrestate = true + games.PrestateErr = types.ErrInvalidPrestate + gameAddr1 := common.Address{0xaa} + ctx := context.Background() + + err := c.schedule(ctx, asGames(gameAddr1), 0) + require.NoError(t, err) + errLog := logs.FindLog(testlog.NewLevelFilter(log.LevelError), testlog.NewMessageFilter("Invalid prestate")) + require.NotNil(t, errLog) + require.Equal(t, errLog.AttrValue("game"), gameAddr1) + require.Equal(t, errLog.AttrValue("err"), games.PrestateErr) +} + +func TestSchedule_PrestateValidationFailure(t *testing.T) { + c, _, _, games, _, _ := setupCoordinatorTest(t, 10) + c.allowInvalidPrestate = true + games.PrestateErr = fmt.Errorf("failed to fetch prestate") + gameAddr1 := common.Address{0xaa} + ctx := context.Background() + + err := c.schedule(ctx, asGames(gameAddr1), 0) + require.ErrorIs(t, err, games.PrestateErr) +} + +func TestScheduleGameAgainAfterCompletion(t *testing.T) { + c, workQueue, _, _, _, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + ctx := context.Background() + + // Schedule the game once + require.NoError(t, c.schedule(ctx, asGames(gameAddr1), 0)) + require.Len(t, workQueue, 1, "should schedule game") + + // Read the job + j := <-workQueue + require.Len(t, workQueue, 0) + + // Process the result + require.NoError(t, c.processResult(j)) + + // And then attempt to schedule again + require.NoError(t, c.schedule(ctx, asGames(gameAddr1), 0)) + require.Len(t, workQueue, 1, "should reschedule completed game") +} + +func TestResultForUnknownGame(t *testing.T) { + c, _, _, _, _, _ := setupCoordinatorTest(t, 10) + err := c.processResult(job{addr: common.Address{0xaa}}) + require.ErrorIs(t, err, errUnknownGame) +} + +func TestProcessResultsWhileJobQueueFull(t *testing.T) { + c, workQueue, resultQueue, games, disk, _ := setupCoordinatorTest(t, 0) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + gameAddr3 := common.Address{0xcc} + ctx := context.Background() + + // Create pre-existing data for all three games + disk.DirForGame(gameAddr1) + disk.DirForGame(gameAddr2) + disk.DirForGame(gameAddr3) + + resultsSent := make(chan any) + go func() { + defer close(resultsSent) + // Process three jobs then exit + for i := 0; i < 3; i++ { + j := <-workQueue + resultQueue <- j + } + }() + + // Even though work queue length is only 1, should be able to schedule all three games + // by reading and processing results + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2, gameAddr3), 0)) + require.Len(t, games.created, 3, "should have created 3 games") + +loop: + for { + select { + case <-resultQueue: + // Drain any remaining results + case <-resultsSent: + break loop + } + } + + // Check that pre-existing directories weren't deleted. + // This would fail if we start processing results before we've added all the required games to the state + require.Empty(t, disk.deletedDirs, "should not have deleted any directories") +} + +func TestDeleteDataForResolvedGames(t *testing.T) { + c, workQueue, _, _, disk, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + gameAddr3 := common.Address{0xcc} + ctx := context.Background() + + // First get game 3 marked as resolved + require.NoError(t, c.schedule(ctx, asGames(gameAddr3), 0)) + require.Len(t, workQueue, 1) + j := <-workQueue + j.status = types.GameStatusDefenderWon + require.NoError(t, c.processResult(j)) + // But ensure its data directory is marked as existing + disk.DirForGame(gameAddr3) + + games := asGames(gameAddr1, gameAddr2, gameAddr3) + require.NoError(t, c.schedule(ctx, games, 0)) + + // The work queue should only contain jobs for games 1 and 2 + // A resolved game should not be scheduled for an update. + // This makes the inflight game metric more robust. + require.Len(t, workQueue, 2, "should schedule all games") + + // Game 1 progresses and is still in progress + // Game 2 progresses and is now resolved + // Game 3 hasn't yet progressed (update is still in flight) + for i := 0; i < len(games)-1; i++ { + j := <-workQueue + if j.addr == gameAddr2 { + j.status = types.GameStatusDefenderWon + } + require.NoError(t, c.processResult(j)) + } + + require.True(t, disk.gameDirExists[gameAddr1], "game 1 data should be preserved (not resolved)") + require.False(t, disk.gameDirExists[gameAddr2], "game 2 data should be deleted") + // Game 3 never got marked as in-flight because it was already resolved so got skipped. + // We shouldn't be able to have a known-resolved game that is also in-flight because we always skip processing it. + require.False(t, disk.gameDirExists[gameAddr3], "game 3 data should be deleted") +} + +func TestSchedule_RecordActedL1Block(t *testing.T) { + c, workQueue, _, _, _, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xcc} + ctx := context.Background() + + // The first game should be tracked + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2), 1)) + + // Process the result + require.Len(t, workQueue, 2) + j := <-workQueue + require.Equal(t, gameAddr1, j.addr) + j.status = types.GameStatusDefenderWon + require.NoError(t, c.processResult(j)) + j = <-workQueue + require.Equal(t, gameAddr2, j.addr) + j.status = types.GameStatusInProgress + require.NoError(t, c.processResult(j)) + + // Schedule another block + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2), 2)) + + // Process the result (only the in-progress game gets rescheduled) + require.Len(t, workQueue, 1) + j = <-workQueue + require.Equal(t, gameAddr2, j.addr) + require.Equal(t, uint64(2), j.block) + j.status = types.GameStatusInProgress + require.NoError(t, c.processResult(j)) + + // Schedule a third block + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2), 3)) + + // Process the result (only the in-progress game gets rescheduled) + // This is deliberately done a third time, because there was actually a bug where it worked for the first two + // cycles and failed on the third. This was because the first cycle the game status was unknown so it was processed + // the second cycle was the first time the game was known to be complete so was skipped but crucially it left it + // marked as in-flight. On the third update the was incorrectly skipped as in-flight and the l1 block number + // wasn't updated. From then on the block number would never be updated. + require.Len(t, workQueue, 1) + j = <-workQueue + require.Equal(t, gameAddr2, j.addr) + require.Equal(t, uint64(3), j.block) + j.status = types.GameStatusInProgress + require.NoError(t, c.processResult(j)) + + // Schedule so that the metric is updated + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2), 4)) + + // Verify that the block number is recorded by the metricer as acted upon + require.Equal(t, uint64(3), c.m.(*stubSchedulerMetrics).actedL1Blocks) +} + +func TestSchedule_RecordActedL1BlockMultipleGames(t *testing.T) { + c, workQueue, _, _, _, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + gameAddr3 := common.Address{0xcc} + ctx := context.Background() + + games := asGames(gameAddr1, gameAddr2, gameAddr3) + require.NoError(t, c.schedule(ctx, games, 1)) + require.Len(t, workQueue, 3) + + // Game 1 progresses and is still in progress + // Game 2 progresses and is now resolved + // Game 3 hasn't yet progressed (update is still in flight) + var game3Job job + for i := 0; i < len(games); i++ { + require.Equal(t, uint64(0), c.m.(*stubSchedulerMetrics).actedL1Blocks) + j := <-workQueue + if j.addr == gameAddr2 { + j.status = types.GameStatusDefenderWon + } + if j.addr != gameAddr3 { + require.NoError(t, c.processResult(j)) + } else { + game3Job = j + } + } + + // Schedule so that the metric is updated + require.NoError(t, c.schedule(ctx, games, 2)) + + // Verify that block 1 isn't yet complete + require.Equal(t, uint64(0), c.m.(*stubSchedulerMetrics).actedL1Blocks) + + // Complete processing game 3 + require.NoError(t, c.processResult(game3Job)) + + // Schedule so that the metric is updated + require.NoError(t, c.schedule(ctx, games, 3)) + + // Verify that block 1 is now complete + require.Equal(t, uint64(1), c.m.(*stubSchedulerMetrics).actedL1Blocks) +} + +func TestSchedule_RecordActedL1BlockNewGame(t *testing.T) { + c, workQueue, _, _, _, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + gameAddr3 := common.Address{0xcc} + ctx := context.Background() + + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2), 1)) + require.Len(t, workQueue, 2) + + // Game 1 progresses and is still in progress + // Game 2 progresses and is now resolved + // Game 3 doesn't exist yet + for i := 0; i < 2; i++ { + require.Equal(t, uint64(0), c.m.(*stubSchedulerMetrics).actedL1Blocks) + j := <-workQueue + if j.addr == gameAddr2 { + j.status = types.GameStatusDefenderWon + } + require.NoError(t, c.processResult(j)) + } + + // Schedule next block with game 3 now created + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2, gameAddr3), 2)) + + // Verify that block 1 is now complete + require.Equal(t, uint64(1), c.m.(*stubSchedulerMetrics).actedL1Blocks) +} + +func TestDoNotDeleteDataForGameThatFailedToCreatePlayer(t *testing.T) { + c, workQueue, _, games, disk, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + ctx := context.Background() + + games.creationFails = gameAddr1 + + gameList := asGames(gameAddr1, gameAddr2) + err := c.schedule(ctx, gameList, 0) + require.Error(t, err) + + // Game 1 won't be scheduled because the player failed to be created + require.Len(t, workQueue, 1, "should schedule game 2") + + // Process game 2 result + require.NoError(t, c.processResult(<-workQueue)) + + require.True(t, disk.gameDirExists[gameAddr1], "game 1 data should be preserved") + require.True(t, disk.gameDirExists[gameAddr2], "game 2 data should be preserved") + + // Should create player for game 1 next time its scheduled + games.creationFails = common.Address{} + require.NoError(t, c.schedule(ctx, gameList, 0)) + require.Len(t, workQueue, len(gameList), "should schedule all games") + + j := <-workQueue + require.Equal(t, gameAddr1, j.addr, "first job should be for first game") + require.NotNil(t, j.player, "should have created player for game 1") +} + +func TestDropOldGameStates(t *testing.T) { + c, workQueue, _, _, _, _ := setupCoordinatorTest(t, 10) + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + gameAddr3 := common.Address{0xcc} + gameAddr4 := common.Address{0xdd} + ctx := context.Background() + + // Start tracking game 1, 2 and 3 + require.NoError(t, c.schedule(ctx, asGames(gameAddr1, gameAddr2, gameAddr3), 0)) + require.Len(t, workQueue, 3, "should schedule games") + + // Complete processing of games 1 and 2, leaving 3 in flight + require.NoError(t, c.processResult(<-workQueue)) + require.NoError(t, c.processResult(<-workQueue)) + + // Next update only has games 2 and 4 + require.NoError(t, c.schedule(ctx, asGames(gameAddr2, gameAddr4), 0)) + + require.NotContains(t, c.states, gameAddr1, "should drop state for game 1") + require.Contains(t, c.states, gameAddr2, "should keep state for game 2 (still active)") + require.Contains(t, c.states, gameAddr3, "should keep state for game 3 (inflight)") + require.Contains(t, c.states, gameAddr4, "should create state for game 4") +} + +func setupCoordinatorTest(t *testing.T, bufferSize int) (*coordinator, <-chan job, chan job, *createdGames, *stubDiskManager, *testlog.CapturingHandler) { + logger, logs := testlog.CaptureLogger(t, log.LevelInfo) + workQueue := make(chan job, bufferSize) + resultQueue := make(chan job, bufferSize) + games := &createdGames{ + t: t, + created: make(map[common.Address]*test.StubGamePlayer), + } + disk := &stubDiskManager{gameDirExists: make(map[common.Address]bool)} + c := newCoordinator(logger, &stubSchedulerMetrics{}, workQueue, resultQueue, games.CreateGame, disk, false) + return c, workQueue, resultQueue, games, disk, logs +} + +type createdGames struct { + t *testing.T + createCompleted common.Address + creationFails common.Address + created map[common.Address]*test.StubGamePlayer + PrestateErr error +} + +func (c *createdGames) CreateGame(fdg types.GameMetadata, dir string) (GamePlayer, error) { + addr := fdg.Proxy + if c.creationFails == addr { + return nil, fmt.Errorf("refusing to create player for game: %v", addr) + } + if _, exists := c.created[addr]; exists { + c.t.Fatalf("game %v already exists", addr) + } + status := types.GameStatusInProgress + if addr == c.createCompleted { + status = types.GameStatusDefenderWon + } + game := &test.StubGamePlayer{ + Addr: addr, + StatusValue: status, + Dir: dir, + } + if c.PrestateErr != nil { + game.PrestateErr = c.PrestateErr + } + c.created[addr] = game + return game, nil +} + +type stubSchedulerMetrics struct { + actedL1Blocks uint64 +} + +func (s *stubSchedulerMetrics) RecordActedL1Block(n uint64) { + s.actedL1Blocks = n +} + +func (s *stubSchedulerMetrics) RecordGamesStatus(_, _, _ int) {} +func (s *stubSchedulerMetrics) RecordGameUpdateScheduled() {} +func (s *stubSchedulerMetrics) RecordGameUpdateCompleted() {} + +type stubDiskManager struct { + gameDirExists map[common.Address]bool + deletedDirs []common.Address +} + +func (s *stubDiskManager) DirForGame(addr common.Address) string { + s.gameDirExists[addr] = true + return addr.Hex() +} + +func (s *stubDiskManager) RemoveAllExcept(addrs []common.Address) error { + for address := range s.gameDirExists { + keep := slices.Contains(addrs, address) + s.gameDirExists[address] = keep + if !keep { + s.deletedDirs = append(s.deletedDirs, address) + } + } + return nil +} + +func asGames(addrs ...common.Address) []types.GameMetadata { + var games []types.GameMetadata + for _, addr := range addrs { + games = append(games, types.GameMetadata{ + Proxy: addr, + }) + } + return games +} diff --git a/op-challenger2/game/scheduler/scheduler.go b/op-challenger2/game/scheduler/scheduler.go new file mode 100644 index 000000000000..f64bea9688f5 --- /dev/null +++ b/op-challenger2/game/scheduler/scheduler.go @@ -0,0 +1,118 @@ +package scheduler + +import ( + "context" + "errors" + "sync" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum/go-ethereum/log" +) + +var ErrBusy = errors.New("busy scheduling previous update") + +type SchedulerMetricer interface { + RecordActedL1Block(n uint64) + RecordGamesStatus(inProgress, defenderWon, challengerWon int) + RecordGameUpdateScheduled() + RecordGameUpdateCompleted() + IncActiveExecutors() + DecActiveExecutors() + IncIdleExecutors() + DecIdleExecutors() +} + +type blockGames struct { + blockNumber uint64 + games []types.GameMetadata +} + +type Scheduler struct { + logger log.Logger + coordinator *coordinator + m SchedulerMetricer + maxConcurrency uint + scheduleQueue chan blockGames + jobQueue chan job + resultQueue chan job + wg sync.WaitGroup + cancel func() +} + +func NewScheduler(logger log.Logger, m SchedulerMetricer, disk DiskManager, maxConcurrency uint, createPlayer PlayerCreator, allowInvalidPrestate bool) *Scheduler { + // Size job and results queues to be fairly small so backpressure is applied early + // but with enough capacity to keep the workers busy + jobQueue := make(chan job, maxConcurrency*2) + resultQueue := make(chan job, maxConcurrency*2) + + // scheduleQueue has a size of 1 so backpressure quickly propagates to the caller + // allowing them to potentially skip update cycles. + scheduleQueue := make(chan blockGames, 1) + + return &Scheduler{ + logger: logger, + m: m, + coordinator: newCoordinator(logger, m, jobQueue, resultQueue, createPlayer, disk, allowInvalidPrestate), + maxConcurrency: maxConcurrency, + scheduleQueue: scheduleQueue, + jobQueue: jobQueue, + resultQueue: resultQueue, + } +} + +func (s *Scheduler) ThreadActive() { + s.m.IncActiveExecutors() + s.m.DecIdleExecutors() +} + +func (s *Scheduler) ThreadIdle() { + s.m.IncIdleExecutors() + s.m.DecActiveExecutors() +} + +func (s *Scheduler) Start(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + s.cancel = cancel + + for i := uint(0); i < s.maxConcurrency; i++ { + s.m.IncIdleExecutors() + s.wg.Add(1) + go progressGames(ctx, s.jobQueue, s.resultQueue, &s.wg, s.ThreadActive, s.ThreadIdle) + } + + s.wg.Add(1) + go s.loop(ctx) +} + +func (s *Scheduler) Close() error { + s.cancel() + s.wg.Wait() + return nil +} + +func (s *Scheduler) Schedule(games []types.GameMetadata, blockNumber uint64) error { + select { + case s.scheduleQueue <- blockGames{blockNumber: blockNumber, games: games}: + return nil + default: + return ErrBusy + } +} + +func (s *Scheduler) loop(ctx context.Context) { + defer s.wg.Done() + for { + select { + case <-ctx.Done(): + return + case blockGames := <-s.scheduleQueue: + if err := s.coordinator.schedule(ctx, blockGames.games, blockGames.blockNumber); err != nil { + s.logger.Error("Failed to schedule game updates", "err", err) + } + case j := <-s.resultQueue: + if err := s.coordinator.processResult(j); err != nil { + s.logger.Error("Error while processing game result", "game", j.addr, "err", err) + } + } + } +} diff --git a/op-challenger2/game/scheduler/scheduler_test.go b/op-challenger2/game/scheduler/scheduler_test.go new file mode 100644 index 000000000000..02e44b3e1a98 --- /dev/null +++ b/op-challenger2/game/scheduler/scheduler_test.go @@ -0,0 +1,73 @@ +package scheduler + +import ( + "context" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestSchedulerProcessesGames(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) + ctx := context.Background() + createPlayer := func(g types.GameMetadata, dir string) (GamePlayer, error) { + return &test.StubGamePlayer{}, nil + } + removeExceptCalls := make(chan []common.Address) + disk := &trackingDiskManager{removeExceptCalls: removeExceptCalls} + s := NewScheduler(logger, metrics.NoopMetrics, disk, 2, createPlayer, false) + s.Start(ctx) + + gameAddr1 := common.Address{0xaa} + gameAddr2 := common.Address{0xbb} + gameAddr3 := common.Address{0xcc} + games := asGames(gameAddr1, gameAddr2, gameAddr3) + + require.NoError(t, s.Schedule(games, 0)) + + // All jobs should be executed and completed, the last step being to clean up disk resources + for i := 0; i < len(games); i++ { + kept := <-removeExceptCalls + require.Len(t, kept, len(games), "should keep all games") + for _, game := range games { + require.Containsf(t, kept, game.Proxy, "should keep game %v", game.Proxy) + } + } + require.NoError(t, s.Close()) +} + +func TestReturnBusyWhenScheduleQueueFull(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) + createPlayer := func(game types.GameMetadata, dir string) (GamePlayer, error) { + return &test.StubGamePlayer{}, nil + } + removeExceptCalls := make(chan []common.Address) + disk := &trackingDiskManager{removeExceptCalls: removeExceptCalls} + s := NewScheduler(logger, metrics.NoopMetrics, disk, 2, createPlayer, false) + + // Scheduler not started - first call fills the queue + require.NoError(t, s.Schedule(asGames(common.Address{0xaa}), 0)) + + // Second call should return busy + err := s.Schedule(asGames(common.Address{0xaa}), 0) + require.ErrorIs(t, err, ErrBusy) +} + +type trackingDiskManager struct { + removeExceptCalls chan []common.Address +} + +func (t *trackingDiskManager) DirForGame(addr common.Address) string { + return addr.Hex() +} + +func (t *trackingDiskManager) RemoveAllExcept(addrs []common.Address) error { + t.removeExceptCalls <- addrs + return nil +} diff --git a/op-challenger2/game/scheduler/test/stub_player.go b/op-challenger2/game/scheduler/test/stub_player.go new file mode 100644 index 000000000000..9af0aa7dacf6 --- /dev/null +++ b/op-challenger2/game/scheduler/test/stub_player.go @@ -0,0 +1,29 @@ +package test + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum/go-ethereum/common" +) + +type StubGamePlayer struct { + Addr common.Address + ProgressCount int + StatusValue types.GameStatus + Dir string + PrestateErr error +} + +func (g *StubGamePlayer) ValidatePrestate(_ context.Context) error { + return g.PrestateErr +} + +func (g *StubGamePlayer) ProgressGame(_ context.Context) types.GameStatus { + g.ProgressCount++ + return g.StatusValue +} + +func (g *StubGamePlayer) Status() types.GameStatus { + return g.StatusValue +} diff --git a/op-challenger2/game/scheduler/types.go b/op-challenger2/game/scheduler/types.go new file mode 100644 index 000000000000..1eccff5ca117 --- /dev/null +++ b/op-challenger2/game/scheduler/types.go @@ -0,0 +1,36 @@ +package scheduler + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" +) + +type GamePlayer interface { + ValidatePrestate(ctx context.Context) error + ProgressGame(ctx context.Context) types.GameStatus + Status() types.GameStatus +} + +type DiskManager interface { + DirForGame(addr common.Address) string + RemoveAllExcept(addrs []common.Address) error +} + +type job struct { + block uint64 + addr common.Address + player GamePlayer + status types.GameStatus +} + +func newJob(block uint64, addr common.Address, player GamePlayer, status types.GameStatus) *job { + return &job{ + block: block, + addr: addr, + player: player, + status: status, + } +} diff --git a/op-challenger2/game/scheduler/worker.go b/op-challenger2/game/scheduler/worker.go new file mode 100644 index 000000000000..5184889c5263 --- /dev/null +++ b/op-challenger2/game/scheduler/worker.go @@ -0,0 +1,24 @@ +package scheduler + +import ( + "context" + "sync" +) + +// progressGames accepts jobs from in channel, calls ProgressGame on the job.player and returns the job +// with updated job.resolved via the out channel. +// The loop exits when the ctx is done. wg.Done() is called when the function returns. +func progressGames(ctx context.Context, in <-chan job, out chan<- job, wg *sync.WaitGroup, threadActive, threadIdle func()) { + defer wg.Done() + for { + select { + case <-ctx.Done(): + return + case j := <-in: + threadActive() + j.status = j.player.ProgressGame(ctx) + out <- j + threadIdle() + } + } +} diff --git a/op-challenger2/game/scheduler/worker_test.go b/op-challenger2/game/scheduler/worker_test.go new file mode 100644 index 000000000000..ae4d174fe5a9 --- /dev/null +++ b/op-challenger2/game/scheduler/worker_test.go @@ -0,0 +1,83 @@ +package scheduler + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler/test" + "github.com/ethereum-optimism/optimism/op-challenger2/game/types" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + + "github.com/stretchr/testify/require" +) + +func TestWorkerShouldProcessJobsUntilContextDone(t *testing.T) { + in := make(chan job, 2) + out := make(chan job, 2) + + ms := &metricSink{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup + wg.Add(1) + go progressGames(ctx, in, out, &wg, ms.ThreadActive, ms.ThreadIdle) + + in <- job{ + player: &test.StubGamePlayer{StatusValue: types.GameStatusInProgress}, + } + waitErr := wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) { + return ms.activeCalls.Load() >= 1, nil + }) + require.NoError(t, waitErr) + require.EqualValues(t, ms.activeCalls.Load(), 1) + require.EqualValues(t, ms.idleCalls.Load(), 1) + + in <- job{ + player: &test.StubGamePlayer{StatusValue: types.GameStatusDefenderWon}, + } + waitErr = wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) { + return ms.activeCalls.Load() >= 2, nil + }) + require.NoError(t, waitErr) + require.EqualValues(t, ms.activeCalls.Load(), 2) + require.EqualValues(t, ms.idleCalls.Load(), 2) + + result1 := readWithTimeout(t, out) + result2 := readWithTimeout(t, out) + + require.Equal(t, result1.status, types.GameStatusInProgress) + require.Equal(t, result2.status, types.GameStatusDefenderWon) + + // Cancel the context which should exit the worker + cancel() + wg.Wait() +} + +type metricSink struct { + activeCalls atomic.Int32 + idleCalls atomic.Int32 +} + +func (m *metricSink) ThreadActive() { + m.activeCalls.Add(1) +} + +func (m *metricSink) ThreadIdle() { + m.idleCalls.Add(1) +} + +func readWithTimeout[T any](t *testing.T, ch <-chan T) T { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + select { + case <-ctx.Done(): + var val T + t.Fatal("Did not receive event from channel") + return val // Won't be reached but makes the compiler happy + case val := <-ch: + return val + } +} diff --git a/op-challenger2/game/service.go b/op-challenger2/game/service.go new file mode 100644 index 000000000000..c3d1e8fe512b --- /dev/null +++ b/op-challenger2/game/service.go @@ -0,0 +1,319 @@ +package game + +import ( + "context" + "errors" + "fmt" + "io" + "sync/atomic" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak" + "github.com/ethereum-optimism/optimism/op-challenger2/game/keccak/fetcher" + "github.com/ethereum-optimism/optimism/op-challenger2/sender" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-challenger2/config" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/claims" + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger2/game/registry" + "github.com/ethereum-optimism/optimism/op-challenger2/game/scheduler" + "github.com/ethereum-optimism/optimism/op-challenger2/metrics" + "github.com/ethereum-optimism/optimism/op-challenger2/version" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/httputil" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/txmgr" +) + +type Service struct { + logger log.Logger + metrics metrics.Metricer + monitor *gameMonitor + sched *scheduler.Scheduler + + faultGamesCloser fault.CloseFunc + + preimages *keccak.LargePreimageScheduler + + txMgr *txmgr.SimpleTxManager + txSender *sender.TxSender + + systemClock clock.Clock + l1Clock *clock.SimpleClock + + claimants []common.Address + claimer *claims.BondClaimScheduler + + factoryContract *contracts.DisputeGameFactoryContract + registry *registry.GameTypeRegistry + oracles *registry.OracleRegistry + rollupClient *sources.RollupClient + + l1Client *ethclient.Client + pollClient client.RPC + + pprofService *oppprof.Service + metricsSrv *httputil.HTTPServer + + balanceMetricer io.Closer + + stopped atomic.Bool +} + +// NewService creates a new Service. +func NewService(ctx context.Context, logger log.Logger, cfg *config.Config, m metrics.Metricer) (*Service, error) { + s := &Service{ + systemClock: clock.SystemClock, + l1Clock: clock.NewSimpleClock(), + logger: logger, + metrics: m, + } + + if err := s.initFromConfig(ctx, cfg); err != nil { + // upon initialization error we can try to close any of the service components that may have started already. + return nil, errors.Join(fmt.Errorf("failed to init challenger game service: %w", err), s.Stop(ctx)) + } + + return s, nil +} + +func (s *Service) initFromConfig(ctx context.Context, cfg *config.Config) error { + if err := s.initTxManager(ctx, cfg); err != nil { + return fmt.Errorf("failed to init tx manager: %w", err) + } + s.initClaimants(cfg) + if err := s.initL1Client(ctx, cfg); err != nil { + return fmt.Errorf("failed to init l1 client: %w", err) + } + if err := s.initRollupClient(ctx, cfg); err != nil { + return fmt.Errorf("failed to init rollup client: %w", err) + } + if err := s.initPollClient(ctx, cfg); err != nil { + return fmt.Errorf("failed to init poll client: %w", err) + } + if err := s.initPProf(&cfg.PprofConfig); err != nil { + return fmt.Errorf("failed to init profiling: %w", err) + } + if err := s.initMetricsServer(&cfg.MetricsConfig); err != nil { + return fmt.Errorf("failed to init metrics server: %w", err) + } + if err := s.initFactoryContract(cfg); err != nil { + return fmt.Errorf("failed to create factory contract bindings: %w", err) + } + if err := s.registerGameTypes(ctx, cfg); err != nil { + return fmt.Errorf("failed to register game types: %w", err) + } + if err := s.initBondClaims(); err != nil { + return fmt.Errorf("failed to init bond claiming: %w", err) + } + if err := s.initScheduler(cfg); err != nil { + return fmt.Errorf("failed to init scheduler: %w", err) + } + if err := s.initLargePreimages(); err != nil { + return fmt.Errorf("failed to init large preimage scheduler: %w", err) + } + + s.initMonitor(cfg) + + s.metrics.RecordInfo(version.SimpleWithMeta) + s.metrics.RecordUp() + return nil +} + +func (s *Service) initClaimants(cfg *config.Config) { + claimants := []common.Address{s.txSender.From()} + s.claimants = append(claimants, cfg.AdditionalBondClaimants...) +} + +func (s *Service) initTxManager(ctx context.Context, cfg *config.Config) error { + txMgr, err := txmgr.NewSimpleTxManager("challenger", s.logger, s.metrics, cfg.TxMgrConfig) + if err != nil { + return fmt.Errorf("failed to create the transaction manager: %w", err) + } + s.txMgr = txMgr + s.txSender = sender.NewTxSender(ctx, s.logger, txMgr, cfg.MaxPendingTx) + return nil +} + +func (s *Service) initL1Client(ctx context.Context, cfg *config.Config) error { + l1Client, err := dial.DialEthClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, cfg.L1EthRpc) + if err != nil { + return fmt.Errorf("failed to dial L1: %w", err) + } + s.l1Client = l1Client + return nil +} + +func (s *Service) initPollClient(ctx context.Context, cfg *config.Config) error { + pollClient, err := client.NewRPCWithClient(ctx, s.logger, cfg.L1EthRpc, client.NewBaseRPCClient(s.l1Client.Client()), cfg.PollInterval) + if err != nil { + return fmt.Errorf("failed to create RPC client: %w", err) + } + s.pollClient = pollClient + return nil +} + +func (s *Service) initPProf(cfg *oppprof.CLIConfig) error { + s.pprofService = oppprof.New( + cfg.ListenEnabled, + cfg.ListenAddr, + cfg.ListenPort, + cfg.ProfileType, + cfg.ProfileDir, + cfg.ProfileFilename, + ) + + if err := s.pprofService.Start(); err != nil { + return fmt.Errorf("failed to start pprof service: %w", err) + } + + return nil +} + +func (s *Service) initMetricsServer(cfg *opmetrics.CLIConfig) error { + if !cfg.Enabled { + return nil + } + s.logger.Debug("starting metrics server", "addr", cfg.ListenAddr, "port", cfg.ListenPort) + m, ok := s.metrics.(opmetrics.RegistryMetricer) + if !ok { + return fmt.Errorf("metrics were enabled, but metricer %T does not expose registry for metrics-server", s.metrics) + } + metricsSrv, err := opmetrics.StartServer(m.Registry(), cfg.ListenAddr, cfg.ListenPort) + if err != nil { + return fmt.Errorf("failed to start metrics server: %w", err) + } + s.logger.Info("started metrics server", "addr", metricsSrv.Addr()) + s.metricsSrv = metricsSrv + s.balanceMetricer = s.metrics.StartBalanceMetrics(s.logger, s.l1Client, s.txSender.From()) + return nil +} + +func (s *Service) initFactoryContract(cfg *config.Config) error { + factoryContract := contracts.NewDisputeGameFactoryContract(s.metrics, cfg.GameFactoryAddress, + batching.NewMultiCaller(s.l1Client.Client(), batching.DefaultBatchSize)) + s.factoryContract = factoryContract + return nil +} + +func (s *Service) initBondClaims() error { + claimer := claims.NewBondClaimer(s.logger, s.metrics, s.registry.CreateBondContract, s.txSender, s.claimants...) + s.claimer = claims.NewBondClaimScheduler(s.logger, s.metrics, claimer) + return nil +} + +func (s *Service) initRollupClient(ctx context.Context, cfg *config.Config) error { + if cfg.RollupRpc == "" { + return nil + } + rollupClient, err := dial.DialRollupClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, cfg.RollupRpc) + if err != nil { + return err + } + s.rollupClient = rollupClient + return nil +} + +func (s *Service) registerGameTypes(ctx context.Context, cfg *config.Config) error { + gameTypeRegistry := registry.NewGameTypeRegistry() + oracles := registry.NewOracleRegistry() + caller := batching.NewMultiCaller(s.l1Client.Client(), batching.DefaultBatchSize) + closer, err := fault.RegisterGameTypes(ctx, s.systemClock, s.l1Clock, s.logger, s.metrics, cfg, gameTypeRegistry, oracles, s.rollupClient, s.txSender, s.factoryContract, caller, s.l1Client, cfg.SelectiveClaimResolution, s.claimants) + if err != nil { + return err + } + s.faultGamesCloser = closer + s.registry = gameTypeRegistry + s.oracles = oracles + return nil +} + +func (s *Service) initScheduler(cfg *config.Config) error { + disk := newDiskManager(cfg.Datadir) + s.sched = scheduler.NewScheduler(s.logger, s.metrics, disk, cfg.MaxConcurrency, s.registry.CreatePlayer, cfg.AllowInvalidPrestate) + return nil +} + +func (s *Service) initLargePreimages() error { + fetcher := fetcher.NewPreimageFetcher(s.logger, s.l1Client) + verifier := keccak.NewPreimageVerifier(s.logger, fetcher) + challenger := keccak.NewPreimageChallenger(s.logger, s.metrics, verifier, s.txSender) + s.preimages = keccak.NewLargePreimageScheduler(s.logger, s.l1Clock, s.oracles, challenger) + return nil +} + +func (s *Service) initMonitor(cfg *config.Config) { + s.monitor = newGameMonitor(s.logger, s.l1Clock, s.factoryContract, s.sched, s.preimages, cfg.GameWindow, s.claimer, s.l1Client.BlockNumber, cfg.GameAllowlist, s.pollClient) +} + +func (s *Service) Start(ctx context.Context) error { + s.logger.Info("starting scheduler") + s.sched.Start(ctx) + s.claimer.Start(ctx) + s.preimages.Start(ctx) + s.logger.Info("starting monitoring") + s.monitor.StartMonitoring() + s.logger.Info("challenger game service start completed") + return nil +} + +func (s *Service) Stopped() bool { + return s.stopped.Load() +} + +func (s *Service) Stop(ctx context.Context) error { + s.logger.Info("stopping challenger game service") + + var result error + if s.sched != nil { + if err := s.sched.Close(); err != nil { + result = errors.Join(result, fmt.Errorf("failed to close scheduler: %w", err)) + } + } + if s.monitor != nil { + s.monitor.StopMonitoring() + } + if s.faultGamesCloser != nil { + s.faultGamesCloser() + } + if s.pprofService != nil { + if err := s.pprofService.Stop(ctx); err != nil { + result = errors.Join(result, fmt.Errorf("failed to close pprof server: %w", err)) + } + } + if s.balanceMetricer != nil { + if err := s.balanceMetricer.Close(); err != nil { + result = errors.Join(result, fmt.Errorf("failed to close balance metricer: %w", err)) + } + } + + if s.txMgr != nil { + s.txMgr.Close() + } + + if s.rollupClient != nil { + s.rollupClient.Close() + } + if s.pollClient != nil { + s.pollClient.Close() + } + if s.l1Client != nil { + s.l1Client.Close() + } + if s.metricsSrv != nil { + if err := s.metricsSrv.Stop(ctx); err != nil { + result = errors.Join(result, fmt.Errorf("failed to close metrics server: %w", err)) + } + } + s.stopped.Store(true) + s.logger.Info("stopped challenger game service", "err", result) + return result +} diff --git a/op-challenger2/game/types/types.go b/op-challenger2/game/types/types.go new file mode 100644 index 000000000000..09ec3e7b1e03 --- /dev/null +++ b/op-challenger2/game/types/types.go @@ -0,0 +1,56 @@ +package types + +import ( + "encoding/json" + "errors" + "fmt" + + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum/go-ethereum/common" +) + +var ErrInvalidPrestate = errors.New("absolute prestate does not match") + +type GameStatus uint8 + +const ( + GameStatusInProgress GameStatus = iota + GameStatusChallengerWon + GameStatusDefenderWon +) + +// String returns the string representation of the game status. +func (s GameStatus) String() string { + switch s { + case GameStatusInProgress: + return "In Progress" + case GameStatusChallengerWon: + return "Challenger Won" + case GameStatusDefenderWon: + return "Defender Won" + default: + return "Unknown" + } +} + +// GameStatusFromUint8 returns a game status from the uint8 representation. +func GameStatusFromUint8(i uint8) (GameStatus, error) { + if i > 2 { + return GameStatus(i), fmt.Errorf("invalid game status: %d", i) + } + return GameStatus(i), nil +} + +type GameMetadata struct { + Index uint64 + GameType uint32 + Timestamp uint64 + Proxy common.Address +} + +func GameStatusToOPChallenger2GameStatus(s gameTypes.GameStatus) GameStatus { + var castedStatus GameStatus + jsonBytes, _ := json.Marshal(s) + json.Unmarshal(jsonBytes, &castedStatus) + return castedStatus +} diff --git a/op-challenger2/game/types/types_test.go b/op-challenger2/game/types/types_test.go new file mode 100644 index 000000000000..b22425f4e69e --- /dev/null +++ b/op-challenger2/game/types/types_test.go @@ -0,0 +1,30 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +var validGameStatuses = []GameStatus{ + GameStatusInProgress, + GameStatusChallengerWon, + GameStatusDefenderWon, +} + +func TestGameStatusFromUint8(t *testing.T) { + for _, status := range validGameStatuses { + t.Run(fmt.Sprintf("Valid Game Status %v", status), func(t *testing.T) { + parsed, err := GameStatusFromUint8(uint8(status)) + require.NoError(t, err) + require.Equal(t, status, parsed) + }) + } + + t.Run("Invalid", func(t *testing.T) { + status, err := GameStatusFromUint8(3) + require.Error(t, err) + require.Equal(t, GameStatus(3), status) + }) +} diff --git a/op-challenger2/metrics/metrics.go b/op-challenger2/metrics/metrics.go new file mode 100644 index 000000000000..adb223c0eac7 --- /dev/null +++ b/op-challenger2/metrics/metrics.go @@ -0,0 +1,329 @@ +package metrics + +import ( + "io" + + "github.com/ethereum-optimism/optimism/op-service/httputil" + "github.com/ethereum-optimism/optimism/op-service/sources/caching" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/prometheus/client_golang/prometheus" + + contractMetrics "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" +) + +const Namespace = "op_challenger" + +type Metricer interface { + RecordInfo(version string) + RecordUp() + + StartBalanceMetrics(l log.Logger, client *ethclient.Client, account common.Address) io.Closer + + // Record Tx metrics + txmetrics.TxMetricer + + // Record cache metrics + caching.Metrics + + // Record contract metrics + contractMetrics.ContractMetricer + + RecordActedL1Block(n uint64) + + RecordGameStep() + RecordGameMove() + RecordGameL2Challenge() + RecordCannonExecutionTime(t float64) + RecordAsteriscExecutionTime(t float64) + RecordClaimResolutionTime(t float64) + RecordGameActTime(t float64) + + RecordPreimageChallenged() + RecordPreimageChallengeFailed() + + RecordBondClaimFailed() + RecordBondClaimed(amount uint64) + + RecordGamesStatus(inProgress, defenderWon, challengerWon int) + + RecordGameUpdateScheduled() + RecordGameUpdateCompleted() + + IncActiveExecutors() + DecActiveExecutors() + IncIdleExecutors() + DecIdleExecutors() +} + +// Metrics implementation must implement RegistryMetricer to allow the metrics server to work. +var _ opmetrics.RegistryMetricer = (*Metrics)(nil) + +type Metrics struct { + ns string + registry *prometheus.Registry + factory opmetrics.Factory + + txmetrics.TxMetrics + *opmetrics.CacheMetrics + *contractMetrics.ContractMetrics + + info prometheus.GaugeVec + up prometheus.Gauge + + executors prometheus.GaugeVec + + bondClaimFailures prometheus.Counter + bondsClaimed prometheus.Counter + + preimageChallenged prometheus.Counter + preimageChallengeFailed prometheus.Counter + + highestActedL1Block prometheus.Gauge + + moves prometheus.Counter + steps prometheus.Counter + l2Challenges prometheus.Counter + + claimResolutionTime prometheus.Histogram + gameActTime prometheus.Histogram + cannonExecutionTime prometheus.Histogram + asteriscExecutionTime prometheus.Histogram + + trackedGames prometheus.GaugeVec + inflightGames prometheus.Gauge +} + +func (m *Metrics) Registry() *prometheus.Registry { + return m.registry +} + +var _ Metricer = (*Metrics)(nil) + +func NewMetrics() *Metrics { + registry := opmetrics.NewRegistry() + factory := opmetrics.With(registry) + + return &Metrics{ + ns: Namespace, + registry: registry, + factory: factory, + + TxMetrics: txmetrics.MakeTxMetrics(Namespace, factory), + + CacheMetrics: opmetrics.NewCacheMetrics(factory, Namespace, "provider_cache", "Provider cache"), + + ContractMetrics: contractMetrics.MakeContractMetrics(Namespace, factory), + + info: *factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "info", + Help: "Pseudo-metric tracking version and config info", + }, []string{ + "version", + }), + up: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "up", + Help: "1 if the op-challenger2 has finished starting up", + }), + executors: *factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "executors", + Help: "Number of active and idle executors", + }, []string{ + "status", + }), + moves: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "moves", + Help: "Number of game moves made by the challenge agent", + }), + steps: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "steps", + Help: "Number of game steps made by the challenge agent", + }), + l2Challenges: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "l2_challenges", + Help: "Number of L2 challenges made by the challenge agent", + }), + cannonExecutionTime: factory.NewHistogram(prometheus.HistogramOpts{ + Namespace: Namespace, + Name: "cannon_execution_time", + Help: "Time (in seconds) to execute cannon", + Buckets: append( + []float64{1.0, 10.0}, + prometheus.ExponentialBuckets(30.0, 2.0, 14)...), + }), + claimResolutionTime: factory.NewHistogram(prometheus.HistogramOpts{ + Namespace: Namespace, + Name: "claim_resolution_time", + Help: "Time (in seconds) spent trying to resolve claims", + Buckets: []float64{.05, .1, .25, .5, 1, 2.5, 5, 7.5, 10}, + }), + gameActTime: factory.NewHistogram(prometheus.HistogramOpts{ + Namespace: Namespace, + Name: "game_act_time", + Help: "Time (in seconds) spent acting on a game", + Buckets: append( + []float64{1.0, 2.0, 5.0, 10.0}, + prometheus.ExponentialBuckets(30.0, 2.0, 14)...), + }), + asteriscExecutionTime: factory.NewHistogram(prometheus.HistogramOpts{ + Namespace: Namespace, + Name: "asterisc_execution_time", + Help: "Time (in seconds) to execute asterisc", + Buckets: append( + []float64{1.0, 10.0}, + prometheus.ExponentialBuckets(30.0, 2.0, 14)...), + }), + bondClaimFailures: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "claim_failures", + Help: "Number of bond claims that failed", + }), + bondsClaimed: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "bonds", + Help: "Number of bonds claimed by the challenge agent", + }), + preimageChallenged: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "preimage_challenged", + Help: "Number of preimages challenged by the challenger", + }), + preimageChallengeFailed: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "preimage_challenge_failed", + Help: "Number of preimage challenges that failed", + }), + trackedGames: *factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "tracked_games", + Help: "Number of games being tracked by the challenger", + }, []string{ + "status", + }), + highestActedL1Block: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "highest_acted_l1_block", + Help: "Highest L1 block acted on by the challenger", + }), + inflightGames: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "inflight_games", + Help: "Number of games being tracked by the challenger", + }), + } +} + +func (m *Metrics) Start(host string, port int) (*httputil.HTTPServer, error) { + return opmetrics.StartServer(m.registry, host, port) +} + +func (m *Metrics) StartBalanceMetrics( + l log.Logger, + client *ethclient.Client, + account common.Address, +) io.Closer { + return opmetrics.LaunchBalanceMetrics(l, m.registry, m.ns, client, account) +} + +// RecordInfo sets a pseudo-metric that contains versioning and +// config info for the op-proposer. +func (m *Metrics) RecordInfo(version string) { + m.info.WithLabelValues(version).Set(1) +} + +// RecordUp sets the up metric to 1. +func (m *Metrics) RecordUp() { + prometheus.MustRegister() + m.up.Set(1) +} + +func (m *Metrics) Document() []opmetrics.DocumentedMetric { + return m.factory.Document() +} + +func (m *Metrics) RecordGameMove() { + m.moves.Add(1) +} + +func (m *Metrics) RecordGameStep() { + m.steps.Add(1) +} + +func (m *Metrics) RecordGameL2Challenge() { + m.l2Challenges.Add(1) +} + +func (m *Metrics) RecordPreimageChallenged() { + m.preimageChallenged.Add(1) +} + +func (m *Metrics) RecordPreimageChallengeFailed() { + m.preimageChallengeFailed.Add(1) +} + +func (m *Metrics) RecordBondClaimFailed() { + m.bondClaimFailures.Add(1) +} + +func (m *Metrics) RecordBondClaimed(amount uint64) { + m.bondsClaimed.Add(float64(amount)) +} + +func (m *Metrics) RecordCannonExecutionTime(t float64) { + m.cannonExecutionTime.Observe(t) +} + +func (m *Metrics) RecordAsteriscExecutionTime(t float64) { + m.asteriscExecutionTime.Observe(t) +} + +func (m *Metrics) RecordClaimResolutionTime(t float64) { + m.claimResolutionTime.Observe(t) +} + +func (m *Metrics) RecordGameActTime(t float64) { + m.gameActTime.Observe(t) +} + +func (m *Metrics) IncActiveExecutors() { + m.executors.WithLabelValues("active").Inc() +} + +func (m *Metrics) DecActiveExecutors() { + m.executors.WithLabelValues("active").Dec() +} + +func (m *Metrics) IncIdleExecutors() { + m.executors.WithLabelValues("idle").Inc() +} + +func (m *Metrics) DecIdleExecutors() { + m.executors.WithLabelValues("idle").Dec() +} + +func (m *Metrics) RecordGamesStatus(inProgress, defenderWon, challengerWon int) { + m.trackedGames.WithLabelValues("in_progress").Set(float64(inProgress)) + m.trackedGames.WithLabelValues("defender_won").Set(float64(defenderWon)) + m.trackedGames.WithLabelValues("challenger_won").Set(float64(challengerWon)) +} + +func (m *Metrics) RecordActedL1Block(n uint64) { + m.highestActedL1Block.Set(float64(n)) +} + +func (m *Metrics) RecordGameUpdateScheduled() { + m.inflightGames.Add(1) +} + +func (m *Metrics) RecordGameUpdateCompleted() { + m.inflightGames.Sub(1) +} diff --git a/op-challenger2/metrics/noop.go b/op-challenger2/metrics/noop.go new file mode 100644 index 000000000000..8519d6bab4b6 --- /dev/null +++ b/op-challenger2/metrics/noop.go @@ -0,0 +1,56 @@ +package metrics + +import ( + "io" + + contractMetrics "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts/metrics" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" +) + +type NoopMetricsImpl struct { + txmetrics.NoopTxMetrics + contractMetrics.NoopMetrics +} + +func (i *NoopMetricsImpl) StartBalanceMetrics(l log.Logger, client *ethclient.Client, account common.Address) io.Closer { + return nil +} + +var NoopMetrics Metricer = new(NoopMetricsImpl) + +func (*NoopMetricsImpl) RecordInfo(version string) {} +func (*NoopMetricsImpl) RecordUp() {} + +func (*NoopMetricsImpl) RecordGameMove() {} +func (*NoopMetricsImpl) RecordGameStep() {} +func (*NoopMetricsImpl) RecordGameL2Challenge() {} + +func (*NoopMetricsImpl) RecordActedL1Block(_ uint64) {} + +func (*NoopMetricsImpl) RecordPreimageChallenged() {} +func (*NoopMetricsImpl) RecordPreimageChallengeFailed() {} + +func (*NoopMetricsImpl) RecordBondClaimFailed() {} +func (*NoopMetricsImpl) RecordBondClaimed(uint64) {} + +func (*NoopMetricsImpl) RecordCannonExecutionTime(t float64) {} +func (*NoopMetricsImpl) RecordAsteriscExecutionTime(t float64) {} +func (*NoopMetricsImpl) RecordClaimResolutionTime(t float64) {} +func (*NoopMetricsImpl) RecordGameActTime(t float64) {} + +func (*NoopMetricsImpl) RecordGamesStatus(inProgress, defenderWon, challengerWon int) {} + +func (*NoopMetricsImpl) RecordGameUpdateScheduled() {} +func (*NoopMetricsImpl) RecordGameUpdateCompleted() {} + +func (*NoopMetricsImpl) IncActiveExecutors() {} +func (*NoopMetricsImpl) DecActiveExecutors() {} +func (*NoopMetricsImpl) IncIdleExecutors() {} +func (*NoopMetricsImpl) DecIdleExecutors() {} + +func (*NoopMetricsImpl) CacheAdd(_ string, _ int, _ bool) {} +func (*NoopMetricsImpl) CacheGet(_ string, _ bool) {} diff --git a/op-challenger2/sender/sender.go b/op-challenger2/sender/sender.go new file mode 100644 index 000000000000..8774a2beae3c --- /dev/null +++ b/op-challenger2/sender/sender.go @@ -0,0 +1,62 @@ +package sender + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +var ErrTransactionReverted = errors.New("transaction published but reverted") + +type TxSender struct { + log log.Logger + + txMgr txmgr.TxManager + queue *txmgr.Queue[int] +} + +func NewTxSender(ctx context.Context, logger log.Logger, txMgr txmgr.TxManager, maxPending uint64) *TxSender { + queue := txmgr.NewQueue[int](ctx, txMgr, maxPending) + return &TxSender{ + log: logger, + txMgr: txMgr, + queue: queue, + } +} + +func (s *TxSender) From() common.Address { + return s.txMgr.From() +} + +func (s *TxSender) SendAndWaitDetailed(txPurpose string, txs ...txmgr.TxCandidate) []error { + receiptsCh := make(chan txmgr.TxReceipt[int], len(txs)) + for i, tx := range txs { + s.queue.Send(i, tx, receiptsCh) + } + completed := 0 + errs := make([]error, len(txs)) + for completed < len(txs) { + rcpt := <-receiptsCh + completed++ + if rcpt.Err != nil { + errs[rcpt.ID] = rcpt.Err + } else if rcpt.Receipt != nil { + if rcpt.Receipt.Status != types.ReceiptStatusSuccessful { + errs[rcpt.ID] = fmt.Errorf("%w purpose: %v hash: %v", ErrTransactionReverted, txPurpose, rcpt.Receipt.TxHash) + } else { + s.log.Debug("Transaction successfully published", "tx_hash", rcpt.Receipt.TxHash, "purpose", txPurpose) + } + } + } + return errs +} + +func (s *TxSender) SendAndWaitSimple(txPurpose string, txs ...txmgr.TxCandidate) error { + errs := s.SendAndWaitDetailed(txPurpose, txs...) + return errors.Join(errs...) +} diff --git a/op-challenger2/sender/sender_test.go b/op-challenger2/sender/sender_test.go new file mode 100644 index 000000000000..e83f2416d93f --- /dev/null +++ b/op-challenger2/sender/sender_test.go @@ -0,0 +1,174 @@ +package sender + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" +) + +func TestSendAndWaitQueueWithMaxPending(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + txMgr := &stubTxMgr{sending: make(map[byte]chan *types.Receipt)} + sender := NewTxSender(ctx, testlog.Logger(t, log.LevelInfo), txMgr, 5) + + tx := func(i byte) txmgr.TxCandidate { + return txmgr.TxCandidate{TxData: []byte{i}} + } + + sendAsync := func(txs ...txmgr.TxCandidate) chan []txmgr.TxCandidate { + ch := make(chan []txmgr.TxCandidate, 1) + go func() { + err := sender.SendAndWaitSimple("testing", txs...) + require.NoError(t, err) + ch <- txs + close(ch) + }() + return ch + } + + wait := func(ch chan []txmgr.TxCandidate) []txmgr.TxCandidate { + select { + case rcpts := <-ch: + return rcpts + case <-ctx.Done(): + require.FailNow(t, "Timeout waiting for receipt") + return nil + } + } + + batch1 := sendAsync(tx(1), tx(2), tx(3)) + batch2 := sendAsync(tx(4), tx(5)) + require.Eventually(t, func() bool { + return txMgr.sentCount() == 5 + }, 10*time.Second, 1*time.Millisecond, "Wait for first transactions to send") + + require.Len(t, batch1, 0, "Should not have completed batch1") + require.Len(t, batch2, 0, "Should not have completed batch2") + + // Send a third batch after the first set have started sending to avoid races + batch3 := sendAsync(tx(6)) + require.Len(t, batch3, 0, "Should not have completed batch3") + + // Sends the 6th tx after one of the previous ones completes + txMgr.txSuccess(tx(5)) + require.Eventually(t, func() bool { + return txMgr.sentCount() == 6 + }, 10*time.Second, 1*time.Millisecond, "Wait for final transaction to send") + require.Len(t, batch1, 0, "Should not have completed batch1") + require.Len(t, batch2, 0, "Should not have completed batch2") + require.Len(t, batch3, 0, "Should not have completed batch3") + + // Batches complete as soon as they are sent + txMgr.txSuccess(tx(6)) + require.Len(t, wait(batch3), 1, "Batch3 should complete") + require.Len(t, batch1, 0, "Should not have completed batch1") + require.Len(t, batch2, 0, "Should not have completed batch2") + + txMgr.txSuccess(tx(2)) + txMgr.txSuccess(tx(3)) + require.Len(t, batch1, 0, "Should not have completed batch1") + require.Len(t, batch2, 0, "Should not have completed batch2") + + txMgr.txSuccess(tx(1)) + require.Len(t, wait(batch1), 3, "Batch1 should complete") + require.Len(t, batch2, 0, "Should not have completed batch2") + + txMgr.txSuccess(tx(4)) + require.Len(t, wait(batch2), 2, "Batch2 should complete") +} + +func TestSendAndWaitReturnIndividualErrors(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + txMgr := &stubTxMgr{ + sending: make(map[byte]chan *types.Receipt), + syncStatus: map[byte]uint64{ + 0: types.ReceiptStatusSuccessful, + 1: types.ReceiptStatusFailed, + 2: types.ReceiptStatusSuccessful, + }, + } + sender := NewTxSender(ctx, testlog.Logger(t, log.LevelInfo), txMgr, 500) + + tx := func(i byte) txmgr.TxCandidate { + return txmgr.TxCandidate{TxData: []byte{i}} + } + + errs := sender.SendAndWaitDetailed("testing", tx(0), tx(1), tx(2)) + require.Len(t, errs, 3) + require.NoError(t, errs[0]) + require.ErrorIs(t, errs[1], ErrTransactionReverted) + require.NoError(t, errs[2]) +} + +type stubTxMgr struct { + m sync.Mutex + sending map[byte]chan *types.Receipt + syncStatus map[byte]uint64 +} + +func (s *stubTxMgr) IsClosed() bool { + return false +} + +func (s *stubTxMgr) Send(ctx context.Context, candidate txmgr.TxCandidate) (*types.Receipt, error) { + ch := s.recordTx(candidate) + return <-ch, nil +} + +func (s *stubTxMgr) recordTx(candidate txmgr.TxCandidate) chan *types.Receipt { + s.m.Lock() + defer s.m.Unlock() + id := candidate.TxData[0] + if _, ok := s.sending[id]; ok { + // Shouldn't happen if tests are well written, but double check... + panic("Sending duplicate transaction") + } + ch := make(chan *types.Receipt, 1) + if status, ok := s.syncStatus[id]; ok { + ch <- &types.Receipt{Status: status} + } else { + s.sending[id] = ch + } + return ch +} + +func (s *stubTxMgr) txSuccess(candidate txmgr.TxCandidate) { + s.m.Lock() + defer s.m.Unlock() + ch, ok := s.sending[candidate.TxData[0]] + if !ok { + // Shouldn't happen if tests are well written, but double check... + panic(fmt.Sprintf("Completing unknown transaction: %v Known: %v", candidate.TxData[0], maps.Keys(s.sending))) + } + ch <- &types.Receipt{Status: types.ReceiptStatusSuccessful} + close(ch) +} + +func (s *stubTxMgr) sentCount() int { + s.m.Lock() + defer s.m.Unlock() + return len(s.sending) +} + +func (s *stubTxMgr) From() common.Address { + panic("unsupported") +} + +func (s *stubTxMgr) BlockNumber(_ context.Context) (uint64, error) { + panic("unsupported") +} + +func (s *stubTxMgr) Close() { +} diff --git a/op-challenger2/tools/create_game.go b/op-challenger2/tools/create_game.go new file mode 100644 index 000000000000..db2fd8de9473 --- /dev/null +++ b/op-challenger2/tools/create_game.go @@ -0,0 +1,44 @@ +package tools + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-challenger2/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +type GameCreator struct { + contract *contracts.DisputeGameFactoryContract + txMgr txmgr.TxManager +} + +func NewGameCreator(contract *contracts.DisputeGameFactoryContract, txMgr txmgr.TxManager) *GameCreator { + return &GameCreator{ + contract: contract, + txMgr: txMgr, + } +} + +func (g *GameCreator) CreateGame(ctx context.Context, outputRoot common.Hash, traceType uint64, l2BlockNum uint64) (common.Address, error) { + txCandidate, err := g.contract.CreateTx(ctx, uint32(traceType), outputRoot, l2BlockNum) + if err != nil { + return common.Address{}, fmt.Errorf("failed to create tx: %w", err) + } + + rct, err := g.txMgr.Send(ctx, txCandidate) + if err != nil { + return common.Address{}, fmt.Errorf("failed to send tx: %w", err) + } + if rct.Status != types.ReceiptStatusSuccessful { + return common.Address{}, fmt.Errorf("game creation transaction (%v) reverted", rct.TxHash.Hex()) + } + + gameAddr, _, _, err := g.contract.DecodeDisputeGameCreatedLog(rct) + if err != nil { + return common.Address{}, fmt.Errorf("failed to decode game created: %w", err) + } + return gameAddr, nil +} diff --git a/op-challenger2/version/version.go b/op-challenger2/version/version.go new file mode 100644 index 000000000000..834fc089b19e --- /dev/null +++ b/op-challenger2/version/version.go @@ -0,0 +1,14 @@ +package version + +var ( + Version = "v0.1.0" + Meta = "dev" +) + +var SimpleWithMeta = func() string { + v := Version + if Meta != "" { + v += "-" + Meta + } + return v +}()