Skip to content

Commit

Permalink
Merge branch 'upstream/v2.0.0-beta17' into zjg/merge-base17
Browse files Browse the repository at this point in the history
  • Loading branch information
zjg555543 committed Aug 29, 2024
2 parents d5c777e + 8c3b1fa commit ac59137
Show file tree
Hide file tree
Showing 18 changed files with 235 additions and 258 deletions.
3 changes: 3 additions & 0 deletions core/vm/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,9 @@ func getData(data []byte, start uint64, size uint64) []byte {
// getDataBig returns a slice from the data based on the start and size and pads
// up to size with zero's. This function is overflow safe.
func getDataBig(data []byte, start *uint256.Int, size uint64) []byte {
if size >= 1*1024*1024*1024 {
size = 1 * 1024 * 1024 * 1024
}
start64, overflow := start.Uint64WithOverflow()
if overflow {
start64 = ^uint64(0)
Expand Down
5 changes: 4 additions & 1 deletion core/vm/contract.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,10 @@ func (c *Contract) validJumpdest(dest *uint256.Int) (bool, bool) {
if c.skipAnalysis {
return true, false
}
return c.isCode(udest), true
/*
* zkEVM doesn't do dynamic jumpdest analysis. So PUSHN is not considered.
*/
return true, false
}

func isCodeFromAnalysis(analysis []uint64, udest uint64) bool {
Expand Down
21 changes: 1 addition & 20 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -754,9 +754,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
latestHeader := backend.dataStream.GetHeader()
if latestHeader.TotalEntries == 0 {
log.Info("[dataStream] setting the stream progress to 0")
if err := stages.SaveStageProgress(tx, stages.DataStream, 0); err != nil {
return nil, err
}
backend.preStartTasks.WarmUpDataStream = true
}
}
Expand Down Expand Up @@ -1054,23 +1051,7 @@ func newEtherMan(cfg *ethconfig.Config, l2ChainName, url string) *etherman.Clien

// creates a datastream client with default parameters
func initDataStreamClient(ctx context.Context, cfg *ethconfig.Zk, latestForkId uint16) *client.StreamClient {
// datastream
// Create client
log.Info("Starting datastream client...")
// retry connection
datastreamClient := client.NewClient(ctx, cfg.L2DataStreamerUrl, cfg.DatastreamVersion, cfg.L2DataStreamerTimeout, latestForkId)

for i := 0; i < 30; i++ {
// Start client (connect to the server)
if err := datastreamClient.Start(); err != nil {
log.Warn(fmt.Sprintf("Error when starting datastream client, retrying... Error: %s", err))
time.Sleep(1 * time.Second)
} else {
log.Info("Datastream client initialized...")
return datastreamClient
}
}
panic("datastream client could not be initialized")
return client.NewClient(ctx, cfg.L2DataStreamerUrl, cfg.DatastreamVersion, cfg.L2DataStreamerTimeout, latestForkId)
}

func (backend *Ethereum) Init(stack *node.Node, config *ethconfig.Config) error {
Expand Down
6 changes: 3 additions & 3 deletions zk/datastream/client/stream_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,10 +186,10 @@ func (c *StreamClient) ExecutePerFile(bookmark *types.BookmarkProto, function fu
}
file, err := c.readFileEntry()
if err != nil {
return fmt.Errorf("error reading file entry: %v", err)
return fmt.Errorf("reading file entry: %v", err)
}
if err := function(file); err != nil {
return fmt.Errorf("error executing function: %v", err)
return fmt.Errorf("executing function: %v", err)

}
count++
Expand Down Expand Up @@ -317,7 +317,7 @@ LOOP:
case *types.BatchStart:
c.currentFork = parsedProto.ForkId
c.entryChan <- parsedProto
case *types.GerUpdateProto:
case *types.GerUpdate:
c.entryChan <- parsedProto
case *types.BatchEnd:
c.entryChan <- parsedProto
Expand Down
181 changes: 87 additions & 94 deletions zk/debug_tools/datastream-correctness-check/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"fmt"

"github.com/gateway-fm/cdk-erigon-lib/common"
"github.com/ledgerwatch/erigon/zk/datastream/client"
"github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream"
"github.com/ledgerwatch/erigon/zk/datastream/types"
Expand All @@ -27,123 +28,115 @@ func main() {
}

// create bookmark
bookmark := types.NewBookmarkProto(5191325, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK)
bookmark := types.NewBookmarkProto(0, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK)

// var previousFile *types.FileEntry
var previousFile *types.FileEntry
var lastBlockRoot common.Hash
progressBatch := uint64(0)
progressBlock := uint64(0)

printFunction := func(file *types.FileEntry) error {
function := func(file *types.FileEntry) error {
switch file.EntryType {
case types.EntryTypeL2Block:
l2Block, err := types.UnmarshalL2Block(file.Data)
case types.EntryTypeL2BlockEnd:
if previousFile != nil && previousFile.EntryType != types.EntryTypeL2Block && previousFile.EntryType != types.EntryTypeL2Tx {
return fmt.Errorf("unexpected entry type before l2 block end: %v", previousFile.EntryType)
}
case types.BookmarkEntryType:
bookmark, err := types.UnmarshalBookmark(file.Data)
if err != nil {
return err
}
fmt.Println("L2Block: ", l2Block.L2BlockNumber, "batch", l2Block.BatchNumber, "stateRoot", l2Block.StateRoot.Hex())
if l2Block.L2BlockNumber > 5191335 {
return fmt.Errorf("stop")
if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_BATCH {
progressBatch = bookmark.Value
if previousFile != nil && previousFile.EntryType != types.EntryTypeBatchEnd {
return fmt.Errorf("unexpected entry type before batch bookmark type: %v, bookmark batch number: %d", previousFile.EntryType, bookmark.Value)
}
}
if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK {
progressBlock = bookmark.Value
if previousFile != nil &&
previousFile.EntryType != types.EntryTypeBatchStart &&
previousFile.EntryType != types.EntryTypeL2BlockEnd {
return fmt.Errorf("unexpected entry type before block bookmark type: %v, bookmark block number: %d", previousFile.EntryType, bookmark.Value)
}
}
case types.EntryTypeBatchStart:
batchStart, err := types.UnmarshalBatchStart(file.Data)
if err != nil {
return err
}
progressBatch = batchStart.Number
if previousFile != nil {
if previousFile.EntryType != types.BookmarkEntryType {
return fmt.Errorf("unexpected entry type before batch start: %v, batchStart Batch number: %d", previousFile.EntryType, batchStart.Number)
} else {
bookmark, err := types.UnmarshalBookmark(previousFile.Data)
if err != nil {
return err
}
if bookmark.BookmarkType() != datastream.BookmarkType_BOOKMARK_TYPE_BATCH {
return fmt.Errorf("unexpected bookmark type before batch start: %v, batchStart Batch number: %d", bookmark.BookmarkType(), batchStart.Number)
}
}
}
case types.EntryTypeBatchEnd:
if previousFile != nil &&
previousFile.EntryType != types.EntryTypeL2BlockEnd &&
previousFile.EntryType != types.EntryTypeL2Tx &&
previousFile.EntryType != types.EntryTypeL2Block &&
previousFile.EntryType != types.EntryTypeBatchStart {
return fmt.Errorf("unexpected entry type before batch end: %v", previousFile.EntryType)
}
batchEnd, err := types.UnmarshalBatchEnd(file.Data)
if err != nil {
return err
}
fmt.Println("BatchEnd: ", batchEnd.Number, "stateRoot", batchEnd.StateRoot.Hex())
if batchEnd.Number != progressBatch {
return fmt.Errorf("batch end number mismatch: %d, expected: %d", batchEnd.Number, progressBatch)
}
if batchEnd.StateRoot != lastBlockRoot {
return fmt.Errorf("batch end state root mismatch: %x, expected: %x", batchEnd.StateRoot, lastBlockRoot)
}
case types.EntryTypeL2Tx:
if previousFile != nil && previousFile.EntryType != types.EntryTypeL2Tx && previousFile.EntryType != types.EntryTypeL2Block {
return fmt.Errorf("unexpected entry type before l2 tx: %v", previousFile.EntryType)
}
case types.EntryTypeL2Block:
l2Block, err := types.UnmarshalL2Block(file.Data)
if err != nil {
return err
}
progressBlock = l2Block.L2BlockNumber
if previousFile != nil {
if previousFile.EntryType != types.BookmarkEntryType && !previousFile.IsL2BlockEnd() {
return fmt.Errorf("unexpected entry type before l2 block: %v, block number: %d", previousFile.EntryType, l2Block.L2BlockNumber)
} else {
bookmark, err := types.UnmarshalBookmark(previousFile.Data)
if err != nil {
return err
}
if bookmark.BookmarkType() != datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK {
return fmt.Errorf("unexpected bookmark type before l2 block: %v, block number: %d", bookmark.BookmarkType(), l2Block.L2BlockNumber)
}

}
}
lastBlockRoot = l2Block.StateRoot
case types.EntryTypeGerUpdate:
return nil
default:
return fmt.Errorf("unexpected entry type: %v", file.EntryType)
}

previousFile = file
return nil
}

// function := func(file *types.FileEntry) error {
// switch file.EntryType {
// case types.EntryTypeL2BlockEnd:
// if previousFile != nil && previousFile.EntryType != types.EntryTypeL2Block && previousFile.EntryType != types.EntryTypeL2Tx {
// return fmt.Errorf("unexpected entry type before l2 block end: %v", previousFile.EntryType)
// }
// case types.BookmarkEntryType:
// bookmark, err := types.UnmarshalBookmark(file.Data)
// if err != nil {
// return err
// }
// if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_BATCH {
// progressBatch = bookmark.Value
// if previousFile != nil && previousFile.EntryType != types.EntryTypeBatchEnd {
// return fmt.Errorf("unexpected entry type before batch bookmark type: %v, bookmark batch number: %d", previousFile.EntryType, bookmark.Value)
// }
// }
// if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK {
// progressBlock = bookmark.Value
// if previousFile != nil &&
// previousFile.EntryType != types.EntryTypeBatchStart &&
// previousFile.EntryType != types.EntryTypeL2BlockEnd {
// return fmt.Errorf("unexpected entry type before block bookmark type: %v, bookmark block number: %d", previousFile.EntryType, bookmark.Value)
// }
// }
// case types.EntryTypeBatchStart:
// batchStart, err := types.UnmarshalBatchStart(file.Data)
// if err != nil {
// return err
// }
// progressBatch = batchStart.Number
// if previousFile != nil {
// if previousFile.EntryType != types.BookmarkEntryType {
// return fmt.Errorf("unexpected entry type before batch start: %v, batchStart Batch number: %d", previousFile.EntryType, batchStart.Number)
// } else {
// bookmark, err := types.UnmarshalBookmark(previousFile.Data)
// if err != nil {
// return err
// }
// if bookmark.BookmarkType() != datastream.BookmarkType_BOOKMARK_TYPE_BATCH {
// return fmt.Errorf("unexpected bookmark type before batch start: %v, batchStart Batch number: %d", bookmark.BookmarkType(), batchStart.Number)
// }
// }
// }
// case types.EntryTypeBatchEnd:
// if previousFile != nil &&
// previousFile.EntryType != types.EntryTypeL2BlockEnd &&
// previousFile.EntryType != types.EntryTypeBatchStart {
// return fmt.Errorf("unexpected entry type before batch end: %v", previousFile.EntryType)
// }
// case types.EntryTypeL2Tx:
// if previousFile != nil && previousFile.EntryType != types.EntryTypeL2Tx && previousFile.EntryType != types.EntryTypeL2Block {
// return fmt.Errorf("unexpected entry type before l2 tx: %v", previousFile.EntryType)
// }
// case types.EntryTypeL2Block:
// l2Block, err := types.UnmarshalL2Block(file.Data)
// if err != nil {
// return err
// }
// progressBlock = l2Block.L2BlockNumber
// if previousFile != nil {
// if previousFile.EntryType != types.BookmarkEntryType && !previousFile.IsL2BlockEnd() {
// return fmt.Errorf("unexpected entry type before l2 block: %v, block number: %d", previousFile.EntryType, l2Block.L2BlockNumber)
// } else {
// bookmark, err := types.UnmarshalBookmark(previousFile.Data)
// if err != nil {
// return err
// }
// if bookmark.BookmarkType() != datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK {
// return fmt.Errorf("unexpected bookmark type before l2 block: %v, block number: %d", bookmark.BookmarkType(), l2Block.L2BlockNumber)
// }

// }
// }
// case types.EntryTypeGerUpdate:
// return nil
// default:
// return fmt.Errorf("unexpected entry type: %v", file.EntryType)
// }

// previousFile = file
// return nil
// }
// send start command
err = client.ExecutePerFile(bookmark, printFunction)
err = client.ExecutePerFile(bookmark, function)
fmt.Println("progress block: ", progressBlock)
fmt.Println("progress batch: ", progressBatch)
if err != nil {
panic(fmt.Sprintf("found an error: %s", err))
panic(err)
}
}
25 changes: 0 additions & 25 deletions zk/hermez_db/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -1614,31 +1614,6 @@ func (db *HermezDbReader) GetInvalidBatch(batchNo uint64) (bool, error) {
return len(v) > 0, nil
}

func (db *HermezDb) WriteIsBatchPartiallyProcessed(batchNo uint64) error {
return db.tx.Put(BATCH_PARTIALLY_PROCESSED, Uint64ToBytes(batchNo), []byte{1})
}

func (db *HermezDb) DeleteIsBatchPartiallyProcessed(batchNo uint64) error {
return db.tx.Delete(BATCH_PARTIALLY_PROCESSED, Uint64ToBytes(batchNo))
}

func (db *HermezDbReader) GetIsBatchPartiallyProcessed(batchNo uint64) (bool, error) {
v, err := db.tx.GetOne(BATCH_PARTIALLY_PROCESSED, Uint64ToBytes(batchNo))
if err != nil {
return false, err
}
return len(v) > 0, nil
}

func (db *HermezDb) TruncateIsBatchPartiallyProcessed(fromBatch, toBatch uint64) error {
for batch := fromBatch; batch <= toBatch; batch++ {
if err := db.DeleteIsBatchPartiallyProcessed(batch); err != nil {
return err
}
}
return nil
}

func (db *HermezDb) WriteLocalExitRootForBatchNo(batchNo uint64, root common.Hash) error {
return db.tx.Put(LOCAL_EXIT_ROOTS, Uint64ToBytes(batchNo), root.Bytes())
}
Expand Down
22 changes: 11 additions & 11 deletions zk/legacy_executor_verifier/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func (e *Executor) CheckOnline() bool {
return true
}

func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot common.Hash) (bool, *executor.ProcessBatchResponseV2, error) {
func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot common.Hash) (bool, *executor.ProcessBatchResponseV2, error, error) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()

Expand All @@ -183,12 +183,12 @@ func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot com
if e.outputLocation != "" {
asJson, err := json.Marshal(grpcRequest)
if err != nil {
return false, nil, err
return false, nil, nil, err
}
file := path.Join(e.outputLocation, fmt.Sprintf("payload_%d.json", request.BatchNumber))
err = os.WriteFile(file, asJson, 0644)
if err != nil {
return false, nil, err
return false, nil, nil, err
}

// now save the witness as a hex string along with the datastream
Expand All @@ -197,20 +197,23 @@ func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot com
witnessAsHex := "0x" + hex.EncodeToString(p.Witness)
err = os.WriteFile(witnessHexFile, []byte(witnessAsHex), 0644)
if err != nil {
return false, nil, err
return false, nil, nil, err
}

dataStreamHexFile := path.Join(e.outputLocation, fmt.Sprintf("datastream_%d.hex", request.BatchNumber))
dataStreamAsHex := "0x" + hex.EncodeToString(p.DataStream)
err = os.WriteFile(dataStreamHexFile, []byte(dataStreamAsHex), 0644)
if err != nil {
return false, nil, err
return false, nil, nil, err
}
}

resp, err := e.client.ProcessStatelessBatchV2(ctx, grpcRequest, grpc.MaxCallSendMsgSize(size), grpc.MaxCallRecvMsgSize(size))
if err != nil {
return false, nil, fmt.Errorf("failed to process stateless batch: %w", err)
return false, nil, nil, fmt.Errorf("failed to process stateless batch: %w", err)
}
if resp == nil {
return false, nil, nil, fmt.Errorf("nil response")
}

counters := map[string]int{
Expand Down Expand Up @@ -266,14 +269,11 @@ func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot com

log.Debug("Received response from executor", "grpcUrl", e.grpcUrl, "response", resp)

return responseCheck(resp, request)
ok, executorResponse, executorErr := responseCheck(resp, request)
return ok, executorResponse, executorErr, nil
}

func responseCheck(resp *executor.ProcessBatchResponseV2, request *VerifierRequest) (bool, *executor.ProcessBatchResponseV2, error) {
if resp == nil {
return false, nil, fmt.Errorf("nil response")
}

if resp.ForkId != request.ForkId {
log.Warn("Executor fork id mismatch", "executor", resp.ForkId, "our", request.ForkId)
}
Expand Down
Loading

0 comments on commit ac59137

Please sign in to comment.