diff --git a/coordinator/coordinator_test.go b/coordinator/coordinator_test.go index 9e3a6503..f8ec88cc 100644 --- a/coordinator/coordinator_test.go +++ b/coordinator/coordinator_test.go @@ -18,6 +18,7 @@ import ( "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/l2db" "github.com/hermeznetwork/hermez-node/db/statedb" + "github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/etherscan" "github.com/hermeznetwork/hermez-node/log" "github.com/hermeznetwork/hermez-node/synchronizer" @@ -508,6 +509,21 @@ func TestCoordHandleMsgSyncBlock(t *testing.T) { closeTestModules(t, modules) } +// ethAddTokens adds the tokens from the blocks to the blockchain +func ethAddTokens(blocks []common.BlockData, client *test.Client) { + for _, block := range blocks { + for _, token := range block.Rollup.AddedTokens { + consts := eth.ERC20Consts{ + Name: fmt.Sprintf("Token %d", token.TokenID), + Symbol: fmt.Sprintf("TK%d", token.TokenID), + Decimals: 18, + } + // tokenConsts[token.TokenID] = consts + client.CtlAddERC20(token.EthAddr, consts) + } + } +} + func TestCoordinatorStress(t *testing.T) { if os.Getenv("TEST_COORD_STRESS") == "" { return @@ -530,7 +546,7 @@ func TestCoordinatorStress(t *testing.T) { wg.Add(1) go func() { for { - blockData, _, err := syn.Sync(ctx) + blockData, _, err := syn.Sync(ctx, nil) if ctx.Err() != nil { wg.Done() return diff --git a/coordinator/pipeline_test.go b/coordinator/pipeline_test.go index c60c0cfd..fd310ffc 100644 --- a/coordinator/pipeline_test.go +++ b/coordinator/pipeline_test.go @@ -2,6 +2,7 @@ package coordinator import ( "context" + "fmt" "io/ioutil" "math/big" "os" @@ -13,15 +14,26 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/coordinator/prover" + "github.com/hermeznetwork/hermez-node/db/historydb" + "github.com/hermeznetwork/hermez-node/db/statedb" "github.com/hermeznetwork/hermez-node/eth" "github.com/hermeznetwork/hermez-node/etherscan" "github.com/hermeznetwork/hermez-node/synchronizer" "github.com/hermeznetwork/hermez-node/test" + "github.com/hermeznetwork/hermez-node/test/til" "github.com/iden3/go-merkletree" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newBigInt(s string) *big.Int { + v, ok := new(big.Int).SetString(s, 10) + if !ok { + panic(fmt.Errorf("Can't set big.Int from %s", s)) + } + return v +} + func TestPipelineShouldL1L2Batch(t *testing.T) { ethClientSetup := test.NewClientSetupExample() ethClientSetup.ChainID = big.NewInt(int64(chainID)) @@ -93,6 +105,144 @@ func TestPipelineShouldL1L2Batch(t *testing.T) { closeTestModules(t, modules) } +const ( + testTokensLen = 3 + testUsersLen = 4 +) + +func preloadSync(t *testing.T, ethClient *test.Client, sync *synchronizer.Synchronizer, + historyDB *historydb.HistoryDB, stateDB *statedb.StateDB) *til.Context { + // Create a set with `testTokensLen` tokens and for each token + // `testUsersLen` accounts. + var set []til.Instruction + // set = append(set, til.Instruction{Typ: "Blockchain"}) + for tokenID := 1; tokenID < testTokensLen; tokenID++ { + set = append(set, til.Instruction{ + Typ: til.TypeAddToken, + TokenID: common.TokenID(tokenID), + }) + } + depositAmount, ok := new(big.Int).SetString("10225000000000000000000000000000000", 10) + require.True(t, ok) + for tokenID := 0; tokenID < testTokensLen; tokenID++ { + for user := 0; user < testUsersLen; user++ { + set = append(set, til.Instruction{ + Typ: common.TxTypeCreateAccountDeposit, + TokenID: common.TokenID(tokenID), + DepositAmount: depositAmount, + From: fmt.Sprintf("User%d", user), + }) + } + } + set = append(set, til.Instruction{Typ: til.TypeNewBatchL1}) + set = append(set, til.Instruction{Typ: til.TypeNewBatchL1}) + set = append(set, til.Instruction{Typ: til.TypeNewBlock}) + + tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) + blocks, err := tc.GenerateBlocksFromInstructions(set) + require.NoError(t, err) + require.NotNil(t, blocks) + // Set StateRoots for batches manually (til doesn't set it) + blocks[0].Rollup.Batches[0].Batch.StateRoot = + newBigInt("0") + blocks[0].Rollup.Batches[1].Batch.StateRoot = + newBigInt("6860514559199319426609623120853503165917774887908204288119245630904770452486") + + ethAddTokens(blocks, ethClient) + err = ethClient.CtlAddBlocks(blocks) + require.NoError(t, err) + + ctx := context.Background() + for { + syncBlock, discards, err := sync.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + if syncBlock == nil { + break + } + } + dbTokens, err := historyDB.GetAllTokens() + require.Nil(t, err) + require.Equal(t, testTokensLen, len(dbTokens)) + + dbAccounts, err := historyDB.GetAllAccounts() + require.Nil(t, err) + require.Equal(t, testTokensLen*testUsersLen, len(dbAccounts)) + + sdbAccounts, err := stateDB.TestGetAccounts() + require.Nil(t, err) + require.Equal(t, testTokensLen*testUsersLen, len(sdbAccounts)) + + return tc +} + +func TestPipelineForgeBatchWithTxs(t *testing.T) { + ethClientSetup := test.NewClientSetupExample() + ethClientSetup.ChainID = big.NewInt(int64(chainID)) + + var timer timer + ctx := context.Background() + ethClient := test.NewClient(true, &timer, &bidder, ethClientSetup) + etherScanService, _ := etherscan.NewEtherscanService("", "") + modules := newTestModules(t) + coord := newTestCoordinator(t, forger, ethClient, ethClientSetup, modules, etherScanService) + sync := newTestSynchronizer(t, ethClient, ethClientSetup, modules) + + // preload the synchronier (via the test ethClient) some tokens and + // users with positive balances + tilCtx := preloadSync(t, ethClient, sync, modules.historyDB, modules.stateDB) + syncStats := sync.Stats() + batchNum := syncStats.Sync.LastBatch.BatchNum + syncSCVars := sync.SCVars() + + pipeline, err := coord.newPipeline(ctx) + require.NoError(t, err) + + // Insert some l2txs in the Pool + setPool := ` +Type: PoolL2 + +PoolTransfer(0) User0-User1: 100 (126) +PoolTransfer(0) User1-User2: 200 (126) +PoolTransfer(0) User2-User3: 300 (126) + ` + l2txs, err := tilCtx.GeneratePoolL2Txs(setPool) + require.NoError(t, err) + for _, tx := range l2txs { + err := modules.l2DB.AddTxTest(&tx) //nolint:gosec + require.NoError(t, err) + } + + err = pipeline.reset(batchNum, syncStats, syncSCVars) + require.NoError(t, err) + // Sanity check + sdbAccounts, err := pipeline.txSelector.LocalAccountsDB().TestGetAccounts() + require.Nil(t, err) + require.Equal(t, testTokensLen*testUsersLen, len(sdbAccounts)) + + // Sanity check + sdbAccounts, err = pipeline.batchBuilder.LocalStateDB().TestGetAccounts() + require.Nil(t, err) + require.Equal(t, testTokensLen*testUsersLen, len(sdbAccounts)) + + // Sanity check + require.Equal(t, modules.stateDB.MT.Root(), + pipeline.batchBuilder.LocalStateDB().MT.Root()) + + batchNum++ + + batchInfo, _, err := pipeline.forgeBatch(batchNum) + require.NoError(t, err) + assert.Equal(t, 3, len(batchInfo.L2Txs)) + + batchNum++ + batchInfo, _, err = pipeline.forgeBatch(batchNum) + require.NoError(t, err) + assert.Equal(t, 0, len(batchInfo.L2Txs)) + + closeTestModules(t, modules) +} + func TestEthRollupForgeBatch(t *testing.T) { if os.Getenv("TEST_ROLLUP_FORGE_BATCH") == "" { return diff --git a/eth/ethereum.go b/eth/ethereum.go index 23237521..6ae169fb 100644 --- a/eth/ethereum.go +++ b/eth/ethereum.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "math/big" - "sort" "time" "github.com/ethereum/go-ethereum" @@ -43,8 +42,6 @@ type EthereumInterface interface { EthSuggestGasPrice(ctx context.Context) (*big.Int, error) EthKeyStore() *ethKeystore.KeyStore EthCall(ctx context.Context, tx *types.Transaction, blockNum *big.Int) ([]byte, error) - - EthNextBlockWithSCEvents(ctx context.Context, fromBlock int64, addresses []ethCommon.Address) (int64, error) } var ( @@ -76,8 +73,6 @@ type EthereumClient struct { ks *ethKeystore.KeyStore config *EthereumConfig opts *bind.CallOpts - - events map[int64][]types.Log } // NewEthereumClient creates a EthereumClient instance. The account is not mandatory (it can @@ -102,7 +97,6 @@ func NewEthereumClient(client *ethclient.Client, account *accounts.Account, return nil, tracerr.Wrap(err) } c.chainID = chainID - c.events = make(map[int64][]types.Log) return c, nil } @@ -355,64 +349,3 @@ func (c *EthereumClient) EthCall(ctx context.Context, tx *types.Transaction, result, err := c.client.CallContract(ctx, msg, blockNum) return result, tracerr.Wrap(err) } - -// EthNextBlockWithSCEvents returns the next block with events in the provided SC addresses -func (c *EthereumClient) EthNextBlockWithSCEvents(ctx context.Context, fromBlock int64, addresses []ethCommon.Address) (int64, error) { - const blocksPerCycle int64 = 10000 - - lastBlock, err := c.EthLastBlock() - if err != nil { - return 0, nil - } - - from := fromBlock - to := from + blocksPerCycle - - for bn := from; bn <= to; bn++ { - if _, ok := c.events[bn]; ok { - delete(c.events, bn) - return bn, nil - } - } - - for { - q := ethereum.FilterQuery{ - FromBlock: big.NewInt(from), - ToBlock: big.NewInt(to), - Addresses: addresses, - } - - // query logs with filter - logs, err := c.client.FilterLogs(ctx, q) - if err != nil { - return 0, err - } - - if len(logs) > 0 { - for _, log := range logs { - c.events[int64(log.BlockNumber)] = append(c.events[int64(log.BlockNumber)], log) - } - - // when we have logs, we sort the logs by block ascending and get the first one - sort.Slice(logs, func(i, j int) bool { - return logs[i].BlockNumber < logs[j].BlockNumber - }) - - return int64(logs[0].BlockNumber), nil - } - - // move to the next range until the end of the chain - // if "to" is equal lastBlock then stop searching - if to == lastBlock { - return lastBlock, nil - } - - from = to - to += blocksPerCycle - // if the "to" is greater than lastBlock, we set "to" as the lastBlock in order - // to be execute the last try to find a block with events - if to > lastBlock { - to = lastBlock - } - } -} diff --git a/node/node.go b/node/node.go index 0eefb987..260e238a 100644 --- a/node/node.go +++ b/node/node.go @@ -834,20 +834,21 @@ func (n *Node) handleReorg(ctx context.Context, stats *synchronizer.Stats, return nil } -func (n *Node) syncLoopFn(ctx context.Context) (time.Duration, error) { - blockData, discarded, err := n.sync.Sync(ctx) +func (n *Node) syncLoopFn(ctx context.Context, lastBlock *common.Block) (*common.Block, + time.Duration, error) { + blockData, discarded, err := n.sync.Sync(ctx, lastBlock) stats := n.sync.Stats() if err != nil { // case: error - return n.cfg.Synchronizer.SyncLoopInterval.Duration, tracerr.Wrap(err) + return nil, n.cfg.Synchronizer.SyncLoopInterval.Duration, tracerr.Wrap(err) } else if discarded != nil { // case: reorg log.Infow("Synchronizer.Sync reorg", "discarded", *discarded) vars := n.sync.SCVars() if err := n.handleReorg(ctx, stats, vars); err != nil { - return time.Duration(0), tracerr.Wrap(err) + return nil, time.Duration(0), tracerr.Wrap(err) } - return time.Duration(0), nil + return nil, time.Duration(0), nil } else if blockData != nil { // case: new block vars := common.SCVariablesPtr{ @@ -856,12 +857,12 @@ func (n *Node) syncLoopFn(ctx context.Context) (time.Duration, error) { WDelayer: blockData.WDelayer.Vars, } if err := n.handleNewBlock(ctx, stats, &vars, blockData.Rollup.Batches); err != nil { - return time.Duration(0), tracerr.Wrap(err) + return nil, time.Duration(0), tracerr.Wrap(err) } - return time.Duration(0), nil + return &blockData.Block, time.Duration(0), nil } else { // case: no block - return n.cfg.Synchronizer.SyncLoopInterval.Duration, nil + return lastBlock, n.cfg.Synchronizer.SyncLoopInterval.Duration, nil } } @@ -883,6 +884,7 @@ func (n *Node) StartSynchronizer() { n.wg.Add(1) go func() { var err error + var lastBlock *common.Block waitDuration := time.Duration(0) for { select { @@ -891,7 +893,8 @@ func (n *Node) StartSynchronizer() { n.wg.Done() return case <-time.After(waitDuration): - if waitDuration, err = n.syncLoopFn(n.ctx); err != nil { + if lastBlock, waitDuration, err = n.syncLoopFn(n.ctx, + lastBlock); err != nil { if n.ctx.Err() != nil { continue } diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 9c9bb709..541f77d7 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -40,7 +40,7 @@ import ( "sync" "time" - ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum" "github.com/hermeznetwork/hermez-node/common" "github.com/hermeznetwork/hermez-node/db/historydb" "github.com/hermeznetwork/hermez-node/db/l2db" @@ -502,39 +502,45 @@ func (s *Synchronizer) resetIntermediateState() error { // If a block is synced, it will be returned and also stored in the DB. If a // reorg is detected, the number of discarded blocks will be returned and no // synchronization will be made. -func (s *Synchronizer) Sync(ctx context.Context) (blockData *common.BlockData, discarded *int64, err error) { +func (s *Synchronizer) Sync(ctx context.Context, + lastSavedBlock *common.Block) (blockData *common.BlockData, discarded *int64, err error) { if s.resetStateFailed { if err := s.resetIntermediateState(); err != nil { return nil, nil, tracerr.Wrap(err) } } - // Get lastSavedBlock from History DB - lastSavedBlock, err := s.historyDB.GetLastBlock() - if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { - return nil, nil, tracerr.Wrap(err) + var nextBlockNum int64 // next block number to sync + if lastSavedBlock == nil { + // Get lastSavedBlock from History DB + lastSavedBlock, err = s.historyDB.GetLastBlock() + if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { + return nil, nil, tracerr.Wrap(err) + } + // If we don't have any stored block, we must do a full sync + // starting from the startBlockNum + if tracerr.Unwrap(err) == sql.ErrNoRows || lastSavedBlock.Num == 0 { + nextBlockNum = s.startBlockNum + lastSavedBlock = nil + } } - - // search the next block with events in hermez smart contracts until it reaches the - // last eth block, initially we set the from block as the startBlock - fromBlock := s.startBlockNum - // If we have any stored block, we must search the next block after this block if lastSavedBlock != nil { - fromBlock = lastSavedBlock.Num + 1 + nextBlockNum = lastSavedBlock.Num + 1 + if lastSavedBlock.Num < s.startBlockNum { + return nil, nil, tracerr.Wrap( + fmt.Errorf("lastSavedBlock (%v) < startBlockNum (%v)", + lastSavedBlock.Num, s.startBlockNum)) + } } - // next block number to sync - nextBlockNum, err := s.EthClient.EthNextBlockWithSCEvents(ctx, fromBlock, []ethCommon.Address{ - s.consts.Auction.HermezRollup, - s.consts.Rollup.HermezAuctionContract, - s.consts.Rollup.WithdrawDelayerContract, - }) - if err != nil { - return nil, nil, err - } - if nextBlockNum == lastSavedBlock.Num { + ethBlock, err := s.EthClient.EthBlockByNumber(ctx, nextBlockNum) + if tracerr.Unwrap(err) == ethereum.NotFound { return nil, nil, nil + } else if err != nil { + return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) } + log.Debugf("ethBlock: num: %v, parent: %v, hash: %v", + ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String()) // While having more blocks to sync than UpdateBlockNumDiffThreshold, UpdateEth will be called once in // UpdateFrequencyDivider blocks @@ -545,28 +551,23 @@ func (s *Synchronizer) Sync(ctx context.Context) (blockData *common.BlockData, d } } - ethBlock, err := s.EthClient.EthBlockByNumber(ctx, nextBlockNum) - if err != nil { - return nil, nil, tracerr.Wrap(fmt.Errorf("EthBlockByNumber: %w", err)) - } - log.Debugf("ethBlock: num: %v, parent: %v, hash: %v", ethBlock.Num, ethBlock.ParentHash.String(), ethBlock.Hash.String()) - log.Debugw("Syncing...", "block", nextBlockNum, "ethLastBlock", s.stats.Eth.LastBlock) + log.Debugw("Syncing...", + "block", nextBlockNum, + "ethLastBlock", s.stats.Eth.LastBlock, + ) - if nextBlockNum > 0 { - parentBlockNumber := nextBlockNum - 1 - parentBlock, err := s.historyDB.GetBlock(parentBlockNumber) - if err != nil && tracerr.Unwrap(err) != sql.ErrNoRows { - return nil, nil, tracerr.Wrap(err) - } - // Check that the obtained ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg! - if tracerr.Unwrap(err) != sql.ErrNoRows && parentBlock != nil && parentBlock.Hash != ethBlock.ParentHash { + // Check that the obtained ethBlock.ParentHash == prevEthBlock.Hash; if not, reorg! + if lastSavedBlock != nil { + if lastSavedBlock.Hash != ethBlock.ParentHash { // Reorg detected - log.Debugw("Reorg Detected", "blockNum", ethBlock.Num, "block.parent(got)", ethBlock.ParentHash, "parent.hash(exp)", parentBlock.Hash) - lastDBBlockNum, err := s.reorg(parentBlock) + log.Debugw("Reorg Detected", + "blockNum", ethBlock.Num, + "block.parent(got)", ethBlock.ParentHash, "parent.hash(exp)", lastSavedBlock.Hash) + lastDBBlockNum, err := s.reorg(lastSavedBlock) if err != nil { return nil, nil, tracerr.Wrap(err) } - discarded := parentBlock.Num - lastDBBlockNum + discarded := lastSavedBlock.Num - lastDBBlockNum metric.Reorgs.Inc() return nil, &discarded, nil } @@ -685,28 +686,24 @@ func (s *Synchronizer) Sync(ctx context.Context) (blockData *common.BlockData, d // corresponding batches in StateBD are discarded. Returns the last valid // blockNum from the HistoryDB. func (s *Synchronizer) reorg(uncleBlock *common.Block) (int64, error) { - var err error - var block *common.Block + blockNum := uncleBlock.Num - for blockNum := uncleBlock.Num; blockNum >= s.startBlockNum; blockNum-- { - block, err = s.historyDB.GetBlock(blockNum) - if tracerr.Unwrap(err) == sql.ErrNoRows { - continue - } + var block *common.Block + for blockNum >= s.startBlockNum { + ethBlock, err := s.EthClient.EthBlockByNumber(context.Background(), blockNum) if err != nil { - return 0, tracerr.Wrap(fmt.Errorf("historyDB.GetBlock: %w", err)) + return 0, tracerr.Wrap(fmt.Errorf("ethClient.EthBlockByNumber: %w", err)) } - var ethBlock *common.Block - ethBlock, err = s.EthClient.EthBlockByNumber(context.Background(), blockNum) + block, err = s.historyDB.GetBlock(blockNum) if err != nil { - return 0, tracerr.Wrap(fmt.Errorf("ethClient.EthBlockByNumber: %w", err)) + return 0, tracerr.Wrap(fmt.Errorf("historyDB.GetBlock: %w", err)) } - if block.Hash == ethBlock.Hash { log.Debugf("Found valid block: %v", blockNum) break } + blockNum-- } total := uncleBlock.Num - block.Num log.Debugw("Discarding blocks", "total", total, "from", uncleBlock.Num, "to", block.Num+1) diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go new file mode 100644 index 00000000..19a1cc9c --- /dev/null +++ b/synchronizer/synchronizer_test.go @@ -0,0 +1,883 @@ +package synchronizer + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "os" + "sort" + "testing" + "time" + + ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/hermeznetwork/hermez-node/common" + dbUtils "github.com/hermeznetwork/hermez-node/db" + "github.com/hermeznetwork/hermez-node/db/historydb" + "github.com/hermeznetwork/hermez-node/db/l2db" + "github.com/hermeznetwork/hermez-node/db/statedb" + "github.com/hermeznetwork/hermez-node/eth" + "github.com/hermeznetwork/hermez-node/test" + "github.com/hermeznetwork/hermez-node/test/til" + "github.com/jinzhu/copier" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var tokenConsts = map[common.TokenID]eth.ERC20Consts{} + +type timer struct { + time int64 +} + +func (t *timer) Time() int64 { + currentTime := t.time + t.time++ + return currentTime +} + +func accountsCmp(accounts []common.Account) func(i, j int) bool { + return func(i, j int) bool { return accounts[i].Idx < accounts[j].Idx } +} + +// Check Sync output and HistoryDB state against expected values generated by +// til +func checkSyncBlock(t *testing.T, s *Synchronizer, blockNum int, block, + syncBlock *common.BlockData) { + // Check Blocks + dbBlocks, err := s.historyDB.GetAllBlocks() + require.NoError(t, err) + dbBlocks = dbBlocks[1:] // ignore block 0, added by default in the DB + assert.Equal(t, blockNum, len(dbBlocks)) + assert.Equal(t, int64(blockNum), dbBlocks[blockNum-1].Num) + assert.NotEqual(t, dbBlocks[blockNum-1].Hash, dbBlocks[blockNum-2].Hash) + assert.Greater(t, dbBlocks[blockNum-1].Timestamp.Unix(), dbBlocks[blockNum-2].Timestamp.Unix()) + + // Check Tokens + assert.Equal(t, len(block.Rollup.AddedTokens), len(syncBlock.Rollup.AddedTokens)) + dbTokens, err := s.historyDB.GetAllTokens() + require.NoError(t, err) + dbTokens = dbTokens[1:] // ignore token 0, added by default in the DB + for i, token := range block.Rollup.AddedTokens { + dbToken := dbTokens[i] + syncToken := syncBlock.Rollup.AddedTokens[i] + + assert.Equal(t, block.Block.Num, syncToken.EthBlockNum) + assert.Equal(t, token.TokenID, syncToken.TokenID) + assert.Equal(t, token.EthAddr, syncToken.EthAddr) + tokenConst := tokenConsts[token.TokenID] + assert.Equal(t, tokenConst.Name, syncToken.Name) + assert.Equal(t, tokenConst.Symbol, syncToken.Symbol) + assert.Equal(t, tokenConst.Decimals, syncToken.Decimals) + + var tokenCpy historydb.TokenWithUSD + //nolint:gosec + require.Nil(t, copier.Copy(&tokenCpy, &token)) // copy common.Token to historydb.TokenWithUSD + require.Nil(t, copier.Copy(&tokenCpy, &tokenConst)) // copy common.Token to historydb.TokenWithUSD + tokenCpy.ItemID = dbToken.ItemID // we don't care about ItemID + assert.Equal(t, tokenCpy, dbToken) + } + + // Check submitted L1UserTxs + assert.Equal(t, len(block.Rollup.L1UserTxs), len(syncBlock.Rollup.L1UserTxs)) + dbL1UserTxs, err := s.historyDB.GetAllL1UserTxs() + require.NoError(t, err) + // Ignore BatchNum in syncBlock.L1UserTxs because this value is set by + // the HistoryDB. Also ignore EffectiveAmount & EffectiveDepositAmount + // because this value is set by StateDB.ProcessTxs. + for i := range syncBlock.Rollup.L1UserTxs { + syncBlock.Rollup.L1UserTxs[i].BatchNum = block.Rollup.L1UserTxs[i].BatchNum + assert.Nil(t, syncBlock.Rollup.L1UserTxs[i].EffectiveDepositAmount) + assert.Nil(t, syncBlock.Rollup.L1UserTxs[i].EffectiveAmount) + } + assert.Equal(t, block.Rollup.L1UserTxs, syncBlock.Rollup.L1UserTxs) + for _, tx := range block.Rollup.L1UserTxs { + var dbTx *common.L1Tx + // Find tx in DB output + for _, _dbTx := range dbL1UserTxs { + if *tx.ToForgeL1TxsNum == *_dbTx.ToForgeL1TxsNum && + tx.Position == _dbTx.Position { + dbTx = new(common.L1Tx) + *dbTx = _dbTx + // NOTE: Overwrite EffectiveFromIdx in L1UserTx + // from db because we don't expect + // EffectiveFromIdx to be set yet, as this tx + // is not in yet forged + dbTx.EffectiveFromIdx = 0 + break + } + } + // If the tx has been forged in this block, this will be + // reflected in the DB, and so the Effective values will be + // already set + if dbTx.BatchNum != nil { + tx.EffectiveAmount = tx.Amount + tx.EffectiveDepositAmount = tx.DepositAmount + } + assert.Equal(t, &tx, dbTx) //nolint:gosec + } + + // Check Batches + assert.Equal(t, len(block.Rollup.Batches), len(syncBlock.Rollup.Batches)) + dbBatches, err := s.historyDB.GetAllBatches() + require.NoError(t, err) + + dbL1CoordinatorTxs, err := s.historyDB.GetAllL1CoordinatorTxs() + require.NoError(t, err) + dbL2Txs, err := s.historyDB.GetAllL2Txs() + require.NoError(t, err) + dbExits, err := s.historyDB.GetAllExits() + require.NoError(t, err) + // dbL1CoordinatorTxs := []common.L1Tx{} + for i, batch := range block.Rollup.Batches { + var dbBatch *common.Batch + // Find batch in DB output + for _, _dbBatch := range dbBatches { + if batch.Batch.BatchNum == _dbBatch.BatchNum { + dbBatch = new(common.Batch) + *dbBatch = _dbBatch + dbBatch.GasPrice = batch.Batch.GasPrice + break + } + } + syncBatch := syncBlock.Rollup.Batches[i] + + // We don't care about TotalFeesUSD. Use the syncBatch that + // has a TotalFeesUSD inserted by the HistoryDB + batch.Batch.TotalFeesUSD = syncBatch.Batch.TotalFeesUSD + assert.Equal(t, batch.CreatedAccounts, syncBatch.CreatedAccounts) + batch.Batch.NumAccounts = len(batch.CreatedAccounts) + + // Test field by field to facilitate debugging of errors + assert.Equal(t, len(batch.L1UserTxs), len(syncBatch.L1UserTxs)) + // NOTE: EffectiveFromIdx is set to til L1UserTxs in + // `FillBlocksForgedL1UserTxs` function + for j := range syncBatch.L1UserTxs { + assert.NotEqual(t, 0, syncBatch.L1UserTxs[j].EffectiveFromIdx) + } + assert.Equal(t, batch.L1UserTxs, syncBatch.L1UserTxs) + // NOTE: EffectiveFromIdx is set to til L1CoordinatorTxs in + // `FillBlocksExtra` function + for j := range syncBatch.L1CoordinatorTxs { + assert.NotEqual(t, 0, syncBatch.L1CoordinatorTxs[j].EffectiveFromIdx) + } + for i := range batch.L1CoordinatorTxs { + batch.L1CoordinatorTxs[i].EthTxHash = ethCommon.HexToHash("0xef98421250239de255750811293f167abb9325152520acb62e40de72746d4d5e") + } + assert.Equal(t, batch.L1CoordinatorTxs, syncBatch.L1CoordinatorTxs) + assert.Equal(t, batch.L2Txs, syncBatch.L2Txs) + // In exit tree, we only check AccountIdx and Balance, because + // it's what we have precomputed before. + require.Equal(t, len(batch.ExitTree), len(syncBatch.ExitTree)) + for j := range batch.ExitTree { + exit := &batch.ExitTree[j] + assert.Equal(t, exit.AccountIdx, syncBatch.ExitTree[j].AccountIdx) + assert.Equal(t, exit.Balance, syncBatch.ExitTree[j].Balance) + *exit = syncBatch.ExitTree[j] + } + assert.Equal(t, batch.Batch, syncBatch.Batch) + // Ignore updated accounts + syncBatch.UpdatedAccounts = nil + assert.Equal(t, batch, syncBatch) + assert.Equal(t, &batch.Batch, dbBatch) //nolint:gosec + + // Check forged L1UserTxs from DB, and check effective values + // in sync output + for j, tx := range batch.L1UserTxs { + var dbTx *common.L1Tx + // Find tx in DB output + for _, _dbTx := range dbL1UserTxs { + if *tx.BatchNum == *_dbTx.BatchNum && + tx.Position == _dbTx.Position { + dbTx = new(common.L1Tx) + *dbTx = _dbTx + break + } + } + assert.Equal(t, &tx, dbTx) //nolint:gosec + + syncTx := &syncBlock.Rollup.Batches[i].L1UserTxs[j] + assert.Equal(t, syncTx.DepositAmount, syncTx.EffectiveDepositAmount) + assert.Equal(t, syncTx.Amount, syncTx.EffectiveAmount) + } + + // Check L1CoordinatorTxs from DB + for _, tx := range batch.L1CoordinatorTxs { + var dbTx *common.L1Tx + // Find tx in DB output + for _, _dbTx := range dbL1CoordinatorTxs { + if *tx.BatchNum == *_dbTx.BatchNum && + tx.Position == _dbTx.Position { + dbTx = new(common.L1Tx) + *dbTx = _dbTx + break + } + } + dbTx.EthTxHash = ethCommon.HexToHash("0xef98421250239de255750811293f167abb9325152520acb62e40de72746d4d5e") + assert.Equal(t, &tx, dbTx) //nolint:gosec + } + + // Check L2Txs from DB + for _, tx := range batch.L2Txs { + var dbTx *common.L2Tx + // Find tx in DB output + for _, _dbTx := range dbL2Txs { + if tx.BatchNum == _dbTx.BatchNum && + tx.Position == _dbTx.Position { + dbTx = new(common.L2Tx) + *dbTx = _dbTx + break + } + } + assert.Equal(t, &tx, dbTx) //nolint:gosec + } + + // Check Exits from DB + for _, exit := range batch.ExitTree { + var dbExit *common.ExitInfo + // Find exit in DB output + for _, _dbExit := range dbExits { + if exit.BatchNum == _dbExit.BatchNum && + exit.AccountIdx == _dbExit.AccountIdx { + dbExit = new(common.ExitInfo) + *dbExit = _dbExit + break + } + } + // Compare MerkleProof in JSON because unmarshaled 0 + // big.Int leaves the internal big.Int array at nil, + // and gives trouble when comparing big.Int with + // internal big.Int array != nil but empty. + mtp, err := json.Marshal(exit.MerkleProof) + require.NoError(t, err) + dbMtp, err := json.Marshal(dbExit.MerkleProof) + require.NoError(t, err) + assert.Equal(t, mtp, dbMtp) + dbExit.MerkleProof = exit.MerkleProof + assert.Equal(t, &exit, dbExit) //nolint:gosec + } + } + + // Compare accounts from HistoryDB with StateDB (they should match) + dbAccounts, err := s.historyDB.GetAllAccounts() + require.NoError(t, err) + sdbAccounts, err := s.stateDB.TestGetAccounts() + require.NoError(t, err) + assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts) +} + +func assertEqualAccountsHistoryDBStateDB(t *testing.T, hdbAccs, sdbAccs []common.Account) { + assert.Equal(t, len(hdbAccs), len(sdbAccs)) + sort.SliceStable(hdbAccs, accountsCmp(hdbAccs)) + sort.SliceStable(sdbAccs, accountsCmp(sdbAccs)) + for i := range hdbAccs { + hdbAcc := hdbAccs[i] + sdbAcc := sdbAccs[i] + assert.Equal(t, hdbAcc.Idx, sdbAcc.Idx) + assert.Equal(t, hdbAcc.TokenID, sdbAcc.TokenID) + assert.Equal(t, hdbAcc.EthAddr, sdbAcc.EthAddr) + assert.Equal(t, hdbAcc.BJJ, sdbAcc.BJJ) + } +} + +// ethAddTokens adds the tokens from the blocks to the blockchain +func ethAddTokens(blocks []common.BlockData, client *test.Client) { + for _, block := range blocks { + for _, token := range block.Rollup.AddedTokens { + consts := eth.ERC20Consts{ + Name: fmt.Sprintf("Token %d", token.TokenID), + Symbol: fmt.Sprintf("TK%d", token.TokenID), + Decimals: 18, + } + tokenConsts[token.TokenID] = consts + client.CtlAddERC20(token.EthAddr, consts) + } + } +} + +var chainID uint16 = 0 +var deleteme = []string{} + +func TestMain(m *testing.M) { + exitVal := m.Run() + for _, dir := range deleteme { + if err := os.RemoveAll(dir); err != nil { + panic(err) + } + } + os.Exit(exitVal) +} + +func newTestModules(t *testing.T) (*statedb.StateDB, *historydb.HistoryDB, *l2db.L2DB) { + // Int State DB + dir, err := ioutil.TempDir("", "tmpdb") + require.NoError(t, err) + deleteme = append(deleteme, dir) + + stateDB, err := statedb.NewStateDB(statedb.Config{Path: dir, Keep: 128, + Type: statedb.TypeSynchronizer, NLevels: 32}) + require.NoError(t, err) + + // Init History DB + db, err := dbUtils.InitTestSQLDB() + require.NoError(t, err) + historyDB := historydb.NewHistoryDB(db, db, nil) + // Clear DB + test.WipeDB(historyDB.DB()) + + // Init L2 DB + l2DB := l2db.NewL2DB(db, db, 10, 100, 0.0, 1000.0, 24*time.Hour, nil) + + return stateDB, historyDB, l2DB +} + +func closeTestModules(_ *testing.T, statedb *statedb.StateDB, historydb *historydb.HistoryDB, l2db *l2db.L2DB) { + statedb.Close() + _ = l2db.DB().Close() +} + +func newBigInt(s string) *big.Int { + v, ok := new(big.Int).SetString(s, 10) + if !ok { + panic(fmt.Errorf("Can't set big.Int from %s", s)) + } + return v +} + +func TestSyncGeneral(t *testing.T) { + // + // Setup + // + + stateDB, historyDB, l2DB := newTestModules(t) + + // Init eth client + var timer timer + clientSetup := test.NewClientSetupExample() + clientSetup.ChainID = big.NewInt(int64(chainID)) + bootCoordAddr := clientSetup.AuctionVariables.BootCoordinator + client := test.NewClient(true, &timer, ðCommon.Address{}, clientSetup) + + // Create Synchronizer + s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{ + StatsUpdateBlockNumDiffThreshold: 100, + StatsUpdateFrequencyDivider: 100, + }) + require.NoError(t, err) + + ctx := context.Background() + + // + // First Sync from an initial state + // + stats := s.Stats() + assert.Equal(t, false, stats.Synced()) + + // Test Sync for rollup genesis block + syncBlock, discards, err := s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + require.NotNil(t, syncBlock) + require.Nil(t, syncBlock.Rollup.Vars) + require.Nil(t, syncBlock.Auction.Vars) + require.Nil(t, syncBlock.WDelayer.Vars) + assert.Equal(t, int64(1), syncBlock.Block.Num) + stats = s.Stats() + assert.Equal(t, int64(1), stats.Eth.FirstBlockNum) + assert.Equal(t, int64(1), stats.Eth.LastBlock.Num) + assert.Equal(t, int64(1), stats.Sync.LastBlock.Num) + vars := s.SCVars() + assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) + assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) + assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) + + dbBlocks, err := s.historyDB.GetAllBlocks() + require.NoError(t, err) + assert.Equal(t, 2, len(dbBlocks)) + assert.Equal(t, int64(1), dbBlocks[1].Num) + + // Sync again and expect no new blocks + syncBlock, discards, err = s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + require.Nil(t, syncBlock) + + // + // Generate blockchain and smart contract data, and fill the test smart contracts + // + + // Generate blockchain data with til + set1 := ` + Type: Blockchain + + AddToken(1) + AddToken(2) + AddToken(3) + + CreateAccountDeposit(1) C: 2000 // Idx=256+2=258 + CreateAccountDeposit(2) A: 2000 // Idx=256+3=259 + CreateAccountDeposit(1) D: 500 // Idx=256+4=260 + CreateAccountDeposit(2) B: 500 // Idx=256+5=261 + CreateAccountDeposit(2) C: 500 // Idx=256+6=262 + + CreateAccountCoordinator(1) A // Idx=256+0=256 + CreateAccountCoordinator(1) B // Idx=256+1=257 + + > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{5} + > batchL1 // forge defined L1UserTxs{5}, freeze L1UserTxs{nil} + > block // blockNum=2 + + CreateAccountDepositTransfer(1) E-A: 1000, 200 // Idx=256+7=263 + ForceTransfer(1) C-B: 80 + ForceExit(1) A: 100 + ForceExit(1) B: 80 + ForceTransfer(1) A-D: 100 + + Transfer(1) C-A: 100 (126) + Exit(1) C: 50 (100) + Exit(1) D: 30 (100) + + > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{3} + > batchL1 // forge L1UserTxs{3}, freeze defined L1UserTxs{nil} + > block // blockNum=3 + ` + tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) + tilCfgExtra := til.ConfigExtra{ + BootCoordAddr: bootCoordAddr, + CoordUser: "A", + } + blocks, err := tc.GenerateBlocks(set1) + require.NoError(t, err) + // Sanity check + require.Equal(t, 2, len(blocks)) + // blocks 0 (blockNum=2) + i := 0 + require.Equal(t, 2, int(blocks[i].Block.Num)) + require.Equal(t, 3, len(blocks[i].Rollup.AddedTokens)) + require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) + require.Equal(t, 2, len(blocks[i].Rollup.Batches)) + require.Equal(t, 2, len(blocks[i].Rollup.Batches[0].L1CoordinatorTxs)) + // Set StateRoots for batches manually (til doesn't set it) + blocks[i].Rollup.Batches[0].Batch.StateRoot = + newBigInt("11432094872416618651837327395264042968926668786266585816625577088890451620254") + blocks[i].Rollup.Batches[1].Batch.StateRoot = + newBigInt("16914212635847451457076355431350059348585556180740555407203882688922702410093") + // blocks 1 (blockNum=3) + i = 1 + require.Equal(t, 3, int(blocks[i].Block.Num)) + require.Equal(t, 5, len(blocks[i].Rollup.L1UserTxs)) + require.Equal(t, 2, len(blocks[i].Rollup.Batches)) + require.Equal(t, 3, len(blocks[i].Rollup.Batches[0].L2Txs)) + // Set StateRoots for batches manually (til doesn't set it) + blocks[i].Rollup.Batches[0].Batch.StateRoot = + newBigInt("13535760140937349829640752733057594576151546047374619177689224612061148090678") + blocks[i].Rollup.Batches[1].Batch.StateRoot = + newBigInt("19413739476363469870744893742469056615496274423228302914851564791727474664804") + + // Generate extra required data + ethAddTokens(blocks, client) + + err = tc.FillBlocksExtra(blocks, &tilCfgExtra) + require.NoError(t, err) + tc.FillBlocksL1UserTxsBatchNum(blocks) + err = tc.FillBlocksForgedL1UserTxs(blocks) + require.NoError(t, err) + + // Add block data to the smart contracts + err = client.CtlAddBlocks(blocks) + require.NoError(t, err) + + // + // Sync to synchronize the current state from the test smart contracts, + // and check the outcome + // + + // Block 2 + + syncBlock, discards, err = s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + require.NotNil(t, syncBlock) + assert.Nil(t, syncBlock.Rollup.Vars) + assert.Nil(t, syncBlock.Auction.Vars) + assert.Nil(t, syncBlock.WDelayer.Vars) + assert.Equal(t, int64(2), syncBlock.Block.Num) + stats = s.Stats() + assert.Equal(t, int64(1), stats.Eth.FirstBlockNum) + assert.Equal(t, int64(3), stats.Eth.LastBlock.Num) + assert.Equal(t, int64(2), stats.Sync.LastBlock.Num) + // Set ethereum transaction hash (til doesn't set it) + blocks[0].Rollup.Batches[0].Batch.EthTxHash = syncBlock.Rollup.Batches[0].Batch.EthTxHash + blocks[0].Rollup.Batches[1].Batch.EthTxHash = syncBlock.Rollup.Batches[1].Batch.EthTxHash + blocks[0].Rollup.Batches[0].Batch.GasPrice = syncBlock.Rollup.Batches[0].Batch.GasPrice + blocks[0].Rollup.Batches[1].Batch.GasPrice = syncBlock.Rollup.Batches[1].Batch.GasPrice + + checkSyncBlock(t, s, 2, &blocks[0], syncBlock) + + // Block 3 + + syncBlock, discards, err = s.Sync(ctx, nil) + assert.NoError(t, err) + require.NoError(t, err) + require.Nil(t, discards) + require.NotNil(t, syncBlock) + assert.Nil(t, syncBlock.Rollup.Vars) + assert.Nil(t, syncBlock.Auction.Vars) + assert.Nil(t, syncBlock.WDelayer.Vars) + assert.Equal(t, int64(3), syncBlock.Block.Num) + stats = s.Stats() + assert.Equal(t, int64(1), stats.Eth.FirstBlockNum) + assert.Equal(t, int64(3), stats.Eth.LastBlock.Num) + assert.Equal(t, int64(3), stats.Sync.LastBlock.Num) + // Set ethereum transaction hash (til doesn't set it) + blocks[1].Rollup.Batches[0].Batch.EthTxHash = syncBlock.Rollup.Batches[0].Batch.EthTxHash + blocks[1].Rollup.Batches[1].Batch.EthTxHash = syncBlock.Rollup.Batches[1].Batch.EthTxHash + blocks[1].Rollup.Batches[0].Batch.GasPrice = syncBlock.Rollup.Batches[0].Batch.GasPrice + blocks[1].Rollup.Batches[1].Batch.GasPrice = syncBlock.Rollup.Batches[1].Batch.GasPrice + + checkSyncBlock(t, s, 3, &blocks[1], syncBlock) + + // Block 4 + // Generate 2 withdraws manually + _, err = client.RollupWithdrawMerkleProof(tc.Users["A"].BJJ.Public().Compress(), 1, 4, 256, + big.NewInt(100), []*big.Int{}, true) + require.NoError(t, err) + _, err = client.RollupWithdrawMerkleProof(tc.Users["C"].BJJ.Public().Compress(), 1, 3, 258, + big.NewInt(50), []*big.Int{}, false) + require.NoError(t, err) + client.CtlMineBlock() + + syncBlock, discards, err = s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + require.NotNil(t, syncBlock) + assert.Nil(t, syncBlock.Rollup.Vars) + assert.Nil(t, syncBlock.Auction.Vars) + assert.Nil(t, syncBlock.WDelayer.Vars) + assert.Equal(t, int64(4), syncBlock.Block.Num) + stats = s.Stats() + assert.Equal(t, int64(1), stats.Eth.FirstBlockNum) + assert.Equal(t, int64(4), stats.Eth.LastBlock.Num) + assert.Equal(t, int64(4), stats.Sync.LastBlock.Num) + vars = s.SCVars() + assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) + assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) + assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) + + dbExits, err := s.historyDB.GetAllExits() + require.NoError(t, err) + foundA1, foundC1 := false, false + for _, exit := range dbExits { + if exit.AccountIdx == 256 && exit.BatchNum == 4 { + foundA1 = true + assert.Equal(t, int64(4), *exit.InstantWithdrawn) + } + if exit.AccountIdx == 258 && exit.BatchNum == 3 { + foundC1 = true + assert.Equal(t, int64(4), *exit.DelayedWithdrawRequest) + } + } + assert.True(t, foundA1) + assert.True(t, foundC1) + + // Block 5 + // Update variables manually + rollupVars, auctionVars, wDelayerVars, err := s.historyDB.GetSCVars() + require.NoError(t, err) + rollupVars.ForgeL1L2BatchTimeout = 42 + _, err = client.RollupUpdateForgeL1L2BatchTimeout(rollupVars.ForgeL1L2BatchTimeout) + require.NoError(t, err) + + auctionVars.OpenAuctionSlots = 17 + _, err = client.AuctionSetOpenAuctionSlots(auctionVars.OpenAuctionSlots) + require.NoError(t, err) + + wDelayerVars.WithdrawalDelay = 99 + _, err = client.WDelayerChangeWithdrawalDelay(wDelayerVars.WithdrawalDelay) + require.NoError(t, err) + + client.CtlMineBlock() + + syncBlock, discards, err = s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + require.NotNil(t, syncBlock) + assert.NotNil(t, syncBlock.Rollup.Vars) + assert.NotNil(t, syncBlock.Auction.Vars) + assert.NotNil(t, syncBlock.WDelayer.Vars) + assert.Equal(t, int64(5), syncBlock.Block.Num) + stats = s.Stats() + assert.Equal(t, int64(1), stats.Eth.FirstBlockNum) + assert.Equal(t, int64(5), stats.Eth.LastBlock.Num) + assert.Equal(t, int64(5), stats.Sync.LastBlock.Num) + vars = s.SCVars() + assert.NotEqual(t, clientSetup.RollupVariables, vars.Rollup) + assert.NotEqual(t, clientSetup.AuctionVariables, vars.Auction) + assert.NotEqual(t, clientSetup.WDelayerVariables, vars.WDelayer) + + dbRollupVars, dbAuctionVars, dbWDelayerVars, err := s.historyDB.GetSCVars() + require.NoError(t, err) + // Set EthBlockNum for Vars to the blockNum in which they were updated (should be 5) + rollupVars.EthBlockNum = syncBlock.Block.Num + auctionVars.EthBlockNum = syncBlock.Block.Num + wDelayerVars.EthBlockNum = syncBlock.Block.Num + assert.Equal(t, rollupVars, dbRollupVars) + assert.Equal(t, auctionVars, dbAuctionVars) + assert.Equal(t, wDelayerVars, dbWDelayerVars) + + // + // Reorg test + // + + // Redo blocks 2-5 (as a reorg) only leaving: + // - 2 create account transactions + // - 2 add tokens + // We add a 6th block so that the synchronizer can detect the reorg + set2 := ` + Type: Blockchain + + AddToken(1) + AddToken(2) + + CreateAccountDeposit(1) C: 2000 // Idx=256+1=257 + + CreateAccountCoordinator(1) A // Idx=256+0=256 + + > batchL1 // forge L1UserTxs{nil}, freeze defined L1UserTxs{1} + > batchL1 // forge defined L1UserTxs{1}, freeze L1UserTxs{nil} + > block // blockNum=2 + > block // blockNum=3 + > block // blockNum=4 + > block // blockNum=5 + > block // blockNum=6 + ` + tc = til.NewContext(chainID, common.RollupConstMaxL1UserTx) + tilCfgExtra = til.ConfigExtra{ + BootCoordAddr: bootCoordAddr, + CoordUser: "A", + } + blocks, err = tc.GenerateBlocks(set2) + require.NoError(t, err) + + // Set StateRoots for batches manually (til doesn't set it) + blocks[0].Rollup.Batches[0].Batch.StateRoot = + newBigInt("14095767774967159269372103336737817266053275274769794195030162905513860477094") + blocks[0].Rollup.Batches[1].Batch.StateRoot = + newBigInt("2095674348545184674850951945506660952512376416769035169971006930847780339914") + + for i := 0; i < 4; i++ { + client.CtlRollback() + } + block := client.CtlLastBlock() + require.Equal(t, int64(1), block.Num) + + // Generate extra required data + ethAddTokens(blocks, client) + + err = tc.FillBlocksExtra(blocks, &tilCfgExtra) + require.NoError(t, err) + tc.FillBlocksL1UserTxsBatchNum(blocks) + + // Add block data to the smart contracts + err = client.CtlAddBlocks(blocks) + require.NoError(t, err) + + // First sync detects the reorg and discards 4 blocks + syncBlock, discards, err = s.Sync(ctx, nil) + require.NoError(t, err) + expetedDiscards := int64(4) + require.Equal(t, &expetedDiscards, discards) + require.Nil(t, syncBlock) + stats = s.Stats() + assert.Equal(t, false, stats.Synced()) + assert.Equal(t, int64(6), stats.Eth.LastBlock.Num) + vars = s.SCVars() + assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) + assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) + assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) + + // At this point, the DB only has data up to block 1 + dbBlock, err := s.historyDB.GetLastBlock() + require.NoError(t, err) + assert.Equal(t, int64(1), dbBlock.Num) + + // Accounts in HistoryDB and StateDB must be empty + dbAccounts, err := s.historyDB.GetAllAccounts() + require.NoError(t, err) + sdbAccounts, err := s.stateDB.TestGetAccounts() + require.NoError(t, err) + assert.Equal(t, 0, len(dbAccounts)) + assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts) + + // Sync blocks 2-6 + for i := 0; i < 5; i++ { + syncBlock, discards, err = s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + require.NotNil(t, syncBlock) + assert.Nil(t, syncBlock.Rollup.Vars) + assert.Nil(t, syncBlock.Auction.Vars) + assert.Nil(t, syncBlock.WDelayer.Vars) + assert.Equal(t, int64(2+i), syncBlock.Block.Num) + + stats = s.Stats() + assert.Equal(t, int64(1), stats.Eth.FirstBlockNum) + assert.Equal(t, int64(6), stats.Eth.LastBlock.Num) + assert.Equal(t, int64(2+i), stats.Sync.LastBlock.Num) + if i == 4 { + assert.Equal(t, true, stats.Synced()) + } else { + assert.Equal(t, false, stats.Synced()) + } + + vars = s.SCVars() + assert.Equal(t, *clientSetup.RollupVariables, vars.Rollup) + assert.Equal(t, *clientSetup.AuctionVariables, vars.Auction) + assert.Equal(t, *clientSetup.WDelayerVariables, vars.WDelayer) + } + + dbBlock, err = s.historyDB.GetLastBlock() + require.NoError(t, err) + assert.Equal(t, int64(6), dbBlock.Num) + + // Accounts in HistoryDB and StateDB is only 2 entries + dbAccounts, err = s.historyDB.GetAllAccounts() + require.NoError(t, err) + sdbAccounts, err = s.stateDB.TestGetAccounts() + require.NoError(t, err) + assert.Equal(t, 2, len(dbAccounts)) + assertEqualAccountsHistoryDBStateDB(t, dbAccounts, sdbAccounts) + + closeTestModules(t, stateDB, historyDB, l2DB) +} + +func TestSyncForgerCommitment(t *testing.T) { + stateDB, historyDB, l2DB := newTestModules(t) + + // Init eth client + var timer timer + clientSetup := test.NewClientSetupExample() + clientSetup.ChainID = big.NewInt(int64(chainID)) + clientSetup.AuctionConstants.GenesisBlockNum = 2 + clientSetup.AuctionConstants.BlocksPerSlot = 4 + clientSetup.AuctionVariables.SlotDeadline = 2 + bootCoordAddr := clientSetup.AuctionVariables.BootCoordinator + client := test.NewClient(true, &timer, ðCommon.Address{}, clientSetup) + + // Create Synchronizer + s, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{ + StatsUpdateBlockNumDiffThreshold: 100, + StatsUpdateFrequencyDivider: 100, + }) + require.NoError(t, err) + + ctx := context.Background() + + set := ` + Type: Blockchain + + // Slot = 0 + + > block // 2 + > block // 3 + > block // 4 + > block // 5 + + // Slot = 1 + + > block // 6 + > batch + > block // 7 + > block // 8 + > block // 9 + + // Slot = 2 + + > block // 10 + > block // 11 + > batch + > block // 12 + > block // 13 + + ` + // For each block, true when the slot that belongs to the following + // block has forgerCommitment + commitment := map[int64]bool{ + 2: false, + 3: false, + 4: false, + 5: false, + + 6: false, + 7: true, + 8: true, + 9: false, + + 10: false, + 11: false, + 12: false, + 13: false, + } + tc := til.NewContext(chainID, common.RollupConstMaxL1UserTx) + blocks, err := tc.GenerateBlocks(set) + assert.NoError(t, err) + + tilCfgExtra := til.ConfigExtra{ + BootCoordAddr: bootCoordAddr, + CoordUser: "A", + } + err = tc.FillBlocksExtra(blocks, &tilCfgExtra) + require.NoError(t, err) + + // for i := range blocks { + // for j := range blocks[i].Rollup.Batches { + // blocks[i].Rollup.Batches[j].Batch.SlotNum = int64(i) / 4 + // } + // } + + // be in sync + for { + syncBlock, discards, err := s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + if syncBlock == nil { + break + } + } + stats := s.Stats() + require.Equal(t, int64(1), stats.Sync.LastBlock.Num) + + // Store ForgerComitmnent observed at every block by the live synchronizer + syncCommitment := map[int64]bool{} + // Store ForgerComitmnent observed at every block by a synchronizer that is restarted + syncRestartedCommitment := map[int64]bool{} + for _, block := range blocks { + // Add block data to the smart contracts + err = client.CtlAddBlocks([]common.BlockData{block}) + require.NoError(t, err) + + syncBlock, discards, err := s.Sync(ctx, nil) + require.NoError(t, err) + require.Nil(t, discards) + if syncBlock == nil { + break + } + stats := s.Stats() + require.True(t, stats.Synced()) + syncCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment + + s2, err := NewSynchronizer(client, historyDB, l2DB, stateDB, Config{ + StatsUpdateBlockNumDiffThreshold: 100, + StatsUpdateFrequencyDivider: 100, + }) + require.NoError(t, err) + stats = s2.Stats() + require.True(t, stats.Synced()) + syncRestartedCommitment[syncBlock.Block.Num] = stats.Sync.Auction.CurrentSlot.ForgerCommitment + } + assert.Equal(t, commitment, syncCommitment) + assert.Equal(t, commitment, syncRestartedCommitment) + + closeTestModules(t, stateDB, historyDB, l2DB) +} diff --git a/test/ethclient.go b/test/ethclient.go index 742affb5..d94dd41d 100644 --- a/test/ethclient.go +++ b/test/ethclient.go @@ -8,7 +8,6 @@ import ( "fmt" "math/big" "reflect" - "sort" "sync" "time" @@ -680,61 +679,6 @@ func (c *Client) EthTransactionReceipt(ctx context.Context, return nil, nil } -// EthNextBlockWithSCEvents returns the next block with events in the provided SC addresses -func (c *Client) EthNextBlockWithSCEvents(ctx context.Context, fromBlock int64, addresses []ethCommon.Address) (int64, error) { - bb := make([]*Block, 0, len(c.blocks)) - - for _, block := range c.blocks { - if block.Eth.BlockNum >= fromBlock { - bb = append(bb, block) - } - } - - sort.Slice(bb, func(i, j int) bool { - return bb[i].Eth.BlockNum < bb[j].Eth.BlockNum - }) - - for _, block := range bb { - if len(block.Rollup.Events.L1UserTx) > 0 || - len(block.Rollup.Events.AddToken) > 0 || - len(block.Rollup.Events.ForgeBatch) > 0 || - len(block.Rollup.Events.UpdateForgeL1L2BatchTimeout) > 0 || - len(block.Rollup.Events.UpdateFeeAddToken) > 0 || - len(block.Rollup.Events.Withdraw) > 0 || - len(block.Rollup.Events.UpdateWithdrawalDelay) > 0 || - len(block.Rollup.Events.UpdateBucketWithdraw) > 0 || - len(block.Rollup.Events.UpdateBucketsParameters) > 0 || - len(block.Rollup.Events.UpdateTokenExchange) > 0 || - len(block.Rollup.Events.SafeMode) > 0 || - - len(block.Auction.Events.NewBid) > 0 || - len(block.Auction.Events.NewSlotDeadline) > 0 || - len(block.Auction.Events.NewClosedAuctionSlots) > 0 || - len(block.Auction.Events.NewOutbidding) > 0 || - len(block.Auction.Events.NewDonationAddress) > 0 || - len(block.Auction.Events.NewBootCoordinator) > 0 || - len(block.Auction.Events.NewOpenAuctionSlots) > 0 || - len(block.Auction.Events.NewAllocationRatio) > 0 || - len(block.Auction.Events.SetCoordinator) > 0 || - len(block.Auction.Events.NewForgeAllocated) > 0 || - len(block.Auction.Events.NewDefaultSlotSetBid) > 0 || - len(block.Auction.Events.NewForge) > 0 || - len(block.Auction.Events.HEZClaimed) > 0 || - - len(block.WDelayer.Events.Deposit) > 0 || - len(block.WDelayer.Events.Withdraw) > 0 || - len(block.WDelayer.Events.EmergencyModeEnabled) > 0 || - len(block.WDelayer.Events.NewWithdrawalDelay) > 0 || - len(block.WDelayer.Events.EscapeHatchWithdrawal) > 0 || - len(block.WDelayer.Events.NewEmergencyCouncil) > 0 || - len(block.WDelayer.Events.NewHermezGovernanceAddress) > 0 { - return block.Eth.BlockNum, nil - } - } - - return 0, nil -} - // CtlAddERC20 adds an ERC20 token to the blockchain. func (c *Client) CtlAddERC20(tokenAddr ethCommon.Address, constants eth.ERC20Consts) { nextBlock := c.nextBlock()