From 766272ff8c1c3b1574d40db2eefff53371b84ce7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 12 Sep 2023 14:03:50 +0300 Subject: [PATCH 01/98] params: begin v1.13.1 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 385ec2208f..c18c650790 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 13 // Minor version component of the current release - VersionPatch = 0 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 13 // Minor version component of the current release + VersionPatch = 1 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From 43df612268b0cd460a01d25ad08bdb06bc566bb0 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 13 Sep 2023 07:42:32 +0200 Subject: [PATCH 02/98] internal, log: remove code for old unsupported go-versions (#28090) --- internal/debug/loudpanic.go | 3 --- internal/debug/loudpanic_fallback.go | 25 ---------------------- internal/debug/trace.go | 3 --- internal/debug/trace_fallback.go | 32 ---------------------------- log/handler.go | 19 +++++++++++++++++ log/handler_go13.go | 27 ----------------------- log/handler_go14.go | 24 --------------------- 7 files changed, 19 insertions(+), 114 deletions(-) delete mode 100644 internal/debug/loudpanic_fallback.go delete mode 100644 internal/debug/trace_fallback.go delete mode 100644 log/handler_go13.go delete mode 100644 log/handler_go14.go diff --git a/internal/debug/loudpanic.go b/internal/debug/loudpanic.go index 86e6bc88f8..a7296e7b3f 100644 --- a/internal/debug/loudpanic.go +++ b/internal/debug/loudpanic.go @@ -14,9 +14,6 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -//go:build go1.6 -// +build go1.6 - package debug import "runtime/debug" diff --git a/internal/debug/loudpanic_fallback.go b/internal/debug/loudpanic_fallback.go deleted file mode 100644 index 377490e5be..0000000000 --- a/internal/debug/loudpanic_fallback.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !go1.6 -// +build !go1.6 - -package debug - -// LoudPanic panics in a way that gets all goroutine stacks printed on stderr. -func LoudPanic(x interface{}) { - panic(x) -} diff --git a/internal/debug/trace.go b/internal/debug/trace.go index eea8798234..e291030b82 100644 --- a/internal/debug/trace.go +++ b/internal/debug/trace.go @@ -14,9 +14,6 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -//go:build go1.5 -// +build go1.5 - package debug import ( diff --git a/internal/debug/trace_fallback.go b/internal/debug/trace_fallback.go deleted file mode 100644 index ec07d991ef..0000000000 --- a/internal/debug/trace_fallback.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !go1.5 -// +build !go1.5 - -// no-op implementation of tracing methods for Go < 1.5. - -package debug - -import "errors" - -func (*HandlerT) StartGoTrace(string) error { - return errors.New("tracing is not supported on Go < 1.5") -} - -func (*HandlerT) StopGoTrace() error { - return errors.New("tracing is not supported on Go < 1.5") -} diff --git a/log/handler.go b/log/handler.go index 892cfcc3e1..4a0cf578f6 100644 --- a/log/handler.go +++ b/log/handler.go @@ -7,6 +7,7 @@ import ( "os" "reflect" "sync" + "sync/atomic" "github.com/go-stack/stack" ) @@ -354,3 +355,21 @@ func (m muster) FileHandler(path string, fmtr Format) Handler { func (m muster) NetHandler(network, addr string, fmtr Format) Handler { return must(NetHandler(network, addr, fmtr)) } + +// swapHandler wraps another handler that may be swapped out +// dynamically at runtime in a thread-safe fashion. +type swapHandler struct { + handler atomic.Value +} + +func (h *swapHandler) Log(r *Record) error { + return (*h.handler.Load().(*Handler)).Log(r) +} + +func (h *swapHandler) Swap(newHandler Handler) { + h.handler.Store(&newHandler) +} + +func (h *swapHandler) Get() Handler { + return *h.handler.Load().(*Handler) +} diff --git a/log/handler_go13.go b/log/handler_go13.go deleted file mode 100644 index 4df694debe..0000000000 --- a/log/handler_go13.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build !go1.4 -// +build !go1.4 - -package log - -import ( - "sync/atomic" - "unsafe" -) - -// swapHandler wraps another handler that may be swapped out -// dynamically at runtime in a thread-safe fashion. -type swapHandler struct { - handler unsafe.Pointer -} - -func (h *swapHandler) Log(r *Record) error { - return h.Get().Log(r) -} - -func (h *swapHandler) Get() Handler { - return *(*Handler)(atomic.LoadPointer(&h.handler)) -} - -func (h *swapHandler) Swap(newHandler Handler) { - atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler)) -} diff --git a/log/handler_go14.go b/log/handler_go14.go deleted file mode 100644 index d0cb14aa06..0000000000 --- a/log/handler_go14.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build go1.4 -// +build go1.4 - -package log - -import "sync/atomic" - -// swapHandler wraps another handler that may be swapped out -// dynamically at runtime in a thread-safe fashion. -type swapHandler struct { - handler atomic.Value -} - -func (h *swapHandler) Log(r *Record) error { - return (*h.handler.Load().(*Handler)).Log(r) -} - -func (h *swapHandler) Swap(newHandler Handler) { - h.handler.Store(&newHandler) -} - -func (h *swapHandler) Get() Handler { - return *h.handler.Load().(*Handler) -} From 8d38b1fe62950e8675795abf63b7c978415ab7ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 13 Sep 2023 15:13:10 +0300 Subject: [PATCH 03/98] core/rawdb: skip pathdb state inspection in hashdb mode (#28108) --- core/rawdb/ancient_utils.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 96bd9ee408..dfb2fdfb67 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -88,6 +88,9 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { infos = append(infos, info) case stateFreezerName: + if ReadStateScheme(db) != PathScheme { + continue + } datadir, err := db.AncientDatadir() if err != nil { return nil, err From 8b6cf128af65db9faca8529f680c0f24f9660377 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 13 Sep 2023 19:13:47 +0200 Subject: [PATCH 04/98] metrics: refactor metrics (#28035) This change includes a lot of things, listed below. ### Split up interfaces, write vs read The interfaces have been split up into one write-interface and one read-interface, with `Snapshot` being the gateway from write to read. This simplifies the semantics _a lot_. Example of splitting up an interface into one readonly 'snapshot' part, and one updatable writeonly part: ```golang type MeterSnapshot interface { Count() int64 Rate1() float64 Rate5() float64 Rate15() float64 RateMean() float64 } // Meters count events to produce exponentially-weighted moving average rates // at one-, five-, and fifteen-minutes and a mean rate. type Meter interface { Mark(int64) Snapshot() MeterSnapshot Stop() } ``` ### A note about concurrency This PR makes the concurrency model clearer. We have actual meters and snapshot of meters. The `meter` is the thing which can be accessed from the registry, and updates can be made to it. - For all `meters`, (`Gauge`, `Timer` etc), it is assumed that they are accessed by different threads, making updates. Therefore, all `meters` update-methods (`Inc`, `Add`, `Update`, `Clear` etc) need to be concurrency-safe. - All `meters` have a `Snapshot()` method. This method is _usually_ called from one thread, a backend-exporter. But it's fully possible to have several exporters simultaneously: therefore this method should also be concurrency-safe. TLDR: `meter`s are accessible via registry, all their methods must be concurrency-safe. For all `Snapshot`s, it is assumed that an individual exporter-thread has obtained a `meter` from the registry, and called the `Snapshot` method to obtain a readonly snapshot. This snapshot is _not_ guaranteed to be concurrency-safe. There's no need for a snapshot to be concurrency-safe, since exporters should not share snapshots. Note, though: that by happenstance a lot of the snapshots _are_ concurrency-safe, being unmutable minimal representations of a value. Only the more complex ones are _not_ threadsafe, those that lazily calculate things like `Variance()`, `Mean()`. Example of how a background exporter typically works, obtaining the snapshot and sequentially accessing the non-threadsafe methods in it: ```golang ms := metric.Snapshot() ... fields := map[string]interface{}{ "count": ms.Count(), "max": ms.Max(), "mean": ms.Mean(), "min": ms.Min(), "stddev": ms.StdDev(), "variance": ms.Variance(), ``` TLDR: `snapshots` are not guaranteed to be concurrency-safe (but often are). ### Sample changes I also changed the `Sample` type: previously, it iterated the samples fully every time `Mean()`,`Sum()`, `Min()` or `Max()` was invoked. Since we now have readonly base data, we can just iterate it once, in the constructor, and set all four values at once. The same thing has been done for runtimehistogram. ### ResettingTimer API Back when ResettingTImer was implemented, as part of https://github.com/ethereum/go-ethereum/pull/15910, Anton implemented a `Percentiles` on the new type. However, the method did not conform to the other existing types which also had a `Percentiles`. 1. The existing ones, on input, took `0.5` to mean `50%`. Anton used `50` to mean `50%`. 2. The existing ones returned `float64` outputs, thus interpolating between values. A value-set of `0, 10`, at `50%` would return `5`, whereas Anton's would return either `0` or `10`. This PR removes the 'new' version, and uses only the 'legacy' percentiles, also for the ResettingTimer type. The resetting timer snapshot was also defined so that it would expose the internal values. This has been removed, and getters for `Max, Min, Mean` have been added instead. ### Unexport types A lot of types were exported, but do not need to be. This PR unexports quite a lot of them. --- core/state/statedb.go | 10 +- metrics/counter.go | 72 +--- metrics/counter_float64.go | 61 +-- metrics/counter_float_64_test.go | 16 +- metrics/counter_test.go | 14 +- metrics/doc.go | 4 - metrics/ewma.go | 91 ++--- metrics/ewma_test.go | 233 +++-------- metrics/exp/exp.go | 32 +- metrics/gauge.go | 116 ++---- metrics/gauge_float64.go | 80 +--- metrics/gauge_float64_test.go | 33 +- metrics/gauge_info.go | 88 +--- metrics/gauge_info_test.go | 49 +-- metrics/gauge_test.go | 39 +- metrics/graphite.go | 10 +- metrics/histogram.go | 158 +------ metrics/histogram_test.go | 8 +- metrics/inactive.go | 48 +++ metrics/influxdb/influxdb.go | 21 +- metrics/influxdb/influxdb_test.go | 2 +- metrics/influxdb/testdata/influxdbv1.want | 4 +- metrics/influxdb/testdata/influxdbv2.want | 4 +- metrics/internal/sampledata.go | 33 +- metrics/internal/sampledata_test.go | 27 ++ metrics/librato/librato.go | 73 ++-- metrics/log.go | 10 +- metrics/meter.go | 207 ++-------- metrics/meter_test.go | 30 +- metrics/metrics.go | 6 + metrics/metrics_test.go | 4 +- metrics/opentsdb.go | 10 +- metrics/opentsdb_test.go | 15 + metrics/prometheus/collector.go | 25 +- metrics/prometheus/collector_test.go | 4 +- metrics/prometheus/testdata/prometheus.want | 28 +- metrics/registry.go | 8 +- metrics/registry_test.go | 4 +- metrics/resetting_sample.go | 2 +- metrics/resetting_timer.go | 204 ++++----- metrics/resetting_timer_test.go | 101 ++--- metrics/runtimehistogram.go | 218 +++++----- metrics/runtimehistogram_test.go | 37 +- metrics/sample.go | 432 ++++++-------------- metrics/sample_test.go | 150 +++---- metrics/syslog.go | 10 +- metrics/testdata/opentsb.want | 2 +- metrics/timer.go | 214 ++-------- metrics/timer_test.go | 10 +- metrics/writer.go | 10 +- 50 files changed, 1035 insertions(+), 2032 deletions(-) delete mode 100644 metrics/doc.go create mode 100644 metrics/inactive.go create mode 100644 metrics/internal/sampledata_test.go diff --git a/core/state/statedb.go b/core/state/statedb.go index bd578ba23d..c1b5b0874c 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1061,12 +1061,10 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root slotDeletionSkip.Inc(1) } n := int64(len(slots)) - if n > slotDeletionMaxCount.Value() { - slotDeletionMaxCount.Update(n) - } - if int64(size) > slotDeletionMaxSize.Value() { - slotDeletionMaxSize.Update(int64(size)) - } + + slotDeletionMaxCount.UpdateIfGt(int64(len(slots))) + slotDeletionMaxSize.UpdateIfGt(int64(size)) + slotDeletionTimer.UpdateSince(start) slotDeletionCount.Mark(n) slotDeletionSize.Mark(int64(size)) diff --git a/metrics/counter.go b/metrics/counter.go index 55e1c59540..cb81599c21 100644 --- a/metrics/counter.go +++ b/metrics/counter.go @@ -4,13 +4,16 @@ import ( "sync/atomic" ) +type CounterSnapshot interface { + Count() int64 +} + // Counters hold an int64 value that can be incremented and decremented. type Counter interface { Clear() - Count() int64 Dec(int64) Inc(int64) - Snapshot() Counter + Snapshot() CounterSnapshot } // GetOrRegisterCounter returns an existing Counter or constructs and registers @@ -38,13 +41,13 @@ func NewCounter() Counter { if !Enabled { return NilCounter{} } - return &StandardCounter{} + return new(StandardCounter) } // NewCounterForced constructs a new StandardCounter and returns it no matter if // the global switch is enabled or not. func NewCounterForced() Counter { - return &StandardCounter{} + return new(StandardCounter) } // NewRegisteredCounter constructs and registers a new StandardCounter. @@ -70,75 +73,40 @@ func NewRegisteredCounterForced(name string, r Registry) Counter { return c } -// CounterSnapshot is a read-only copy of another Counter. -type CounterSnapshot int64 - -// Clear panics. -func (CounterSnapshot) Clear() { - panic("Clear called on a CounterSnapshot") -} +// counterSnapshot is a read-only copy of another Counter. +type counterSnapshot int64 // Count returns the count at the time the snapshot was taken. -func (c CounterSnapshot) Count() int64 { return int64(c) } - -// Dec panics. -func (CounterSnapshot) Dec(int64) { - panic("Dec called on a CounterSnapshot") -} - -// Inc panics. -func (CounterSnapshot) Inc(int64) { - panic("Inc called on a CounterSnapshot") -} - -// Snapshot returns the snapshot. -func (c CounterSnapshot) Snapshot() Counter { return c } +func (c counterSnapshot) Count() int64 { return int64(c) } // NilCounter is a no-op Counter. type NilCounter struct{} -// Clear is a no-op. -func (NilCounter) Clear() {} - -// Count is a no-op. -func (NilCounter) Count() int64 { return 0 } - -// Dec is a no-op. -func (NilCounter) Dec(i int64) {} - -// Inc is a no-op. -func (NilCounter) Inc(i int64) {} - -// Snapshot is a no-op. -func (NilCounter) Snapshot() Counter { return NilCounter{} } +func (NilCounter) Clear() {} +func (NilCounter) Dec(i int64) {} +func (NilCounter) Inc(i int64) {} +func (NilCounter) Snapshot() CounterSnapshot { return (*emptySnapshot)(nil) } // StandardCounter is the standard implementation of a Counter and uses the // sync/atomic package to manage a single int64 value. -type StandardCounter struct { - count atomic.Int64 -} +type StandardCounter atomic.Int64 // Clear sets the counter to zero. func (c *StandardCounter) Clear() { - c.count.Store(0) -} - -// Count returns the current count. -func (c *StandardCounter) Count() int64 { - return c.count.Load() + (*atomic.Int64)(c).Store(0) } // Dec decrements the counter by the given amount. func (c *StandardCounter) Dec(i int64) { - c.count.Add(-i) + (*atomic.Int64)(c).Add(-i) } // Inc increments the counter by the given amount. func (c *StandardCounter) Inc(i int64) { - c.count.Add(i) + (*atomic.Int64)(c).Add(i) } // Snapshot returns a read-only copy of the counter. -func (c *StandardCounter) Snapshot() Counter { - return CounterSnapshot(c.Count()) +func (c *StandardCounter) Snapshot() CounterSnapshot { + return counterSnapshot((*atomic.Int64)(c).Load()) } diff --git a/metrics/counter_float64.go b/metrics/counter_float64.go index d1197bb8e0..15c81494ef 100644 --- a/metrics/counter_float64.go +++ b/metrics/counter_float64.go @@ -5,13 +5,16 @@ import ( "sync/atomic" ) +type CounterFloat64Snapshot interface { + Count() float64 +} + // CounterFloat64 holds a float64 value that can be incremented and decremented. type CounterFloat64 interface { Clear() - Count() float64 Dec(float64) Inc(float64) - Snapshot() CounterFloat64 + Snapshot() CounterFloat64Snapshot } // GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers @@ -71,47 +74,19 @@ func NewRegisteredCounterFloat64Forced(name string, r Registry) CounterFloat64 { return c } -// CounterFloat64Snapshot is a read-only copy of another CounterFloat64. -type CounterFloat64Snapshot float64 - -// Clear panics. -func (CounterFloat64Snapshot) Clear() { - panic("Clear called on a CounterFloat64Snapshot") -} +// counterFloat64Snapshot is a read-only copy of another CounterFloat64. +type counterFloat64Snapshot float64 // Count returns the value at the time the snapshot was taken. -func (c CounterFloat64Snapshot) Count() float64 { return float64(c) } - -// Dec panics. -func (CounterFloat64Snapshot) Dec(float64) { - panic("Dec called on a CounterFloat64Snapshot") -} +func (c counterFloat64Snapshot) Count() float64 { return float64(c) } -// Inc panics. -func (CounterFloat64Snapshot) Inc(float64) { - panic("Inc called on a CounterFloat64Snapshot") -} - -// Snapshot returns the snapshot. -func (c CounterFloat64Snapshot) Snapshot() CounterFloat64 { return c } - -// NilCounterFloat64 is a no-op CounterFloat64. type NilCounterFloat64 struct{} -// Clear is a no-op. -func (NilCounterFloat64) Clear() {} - -// Count is a no-op. -func (NilCounterFloat64) Count() float64 { return 0.0 } - -// Dec is a no-op. -func (NilCounterFloat64) Dec(i float64) {} - -// Inc is a no-op. -func (NilCounterFloat64) Inc(i float64) {} - -// Snapshot is a no-op. -func (NilCounterFloat64) Snapshot() CounterFloat64 { return NilCounterFloat64{} } +func (NilCounterFloat64) Clear() {} +func (NilCounterFloat64) Count() float64 { return 0.0 } +func (NilCounterFloat64) Dec(i float64) {} +func (NilCounterFloat64) Inc(i float64) {} +func (NilCounterFloat64) Snapshot() CounterFloat64Snapshot { return NilCounterFloat64{} } // StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the // atomic to manage a single float64 value. @@ -124,11 +99,6 @@ func (c *StandardCounterFloat64) Clear() { c.floatBits.Store(0) } -// Count returns the current value. -func (c *StandardCounterFloat64) Count() float64 { - return math.Float64frombits(c.floatBits.Load()) -} - // Dec decrements the counter by the given amount. func (c *StandardCounterFloat64) Dec(v float64) { atomicAddFloat(&c.floatBits, -v) @@ -140,8 +110,9 @@ func (c *StandardCounterFloat64) Inc(v float64) { } // Snapshot returns a read-only copy of the counter. -func (c *StandardCounterFloat64) Snapshot() CounterFloat64 { - return CounterFloat64Snapshot(c.Count()) +func (c *StandardCounterFloat64) Snapshot() CounterFloat64Snapshot { + v := math.Float64frombits(c.floatBits.Load()) + return counterFloat64Snapshot(v) } func atomicAddFloat(fbits *atomic.Uint64, v float64) { diff --git a/metrics/counter_float_64_test.go b/metrics/counter_float_64_test.go index f17aca330c..c21bd3307f 100644 --- a/metrics/counter_float_64_test.go +++ b/metrics/counter_float_64_test.go @@ -27,7 +27,7 @@ func BenchmarkCounterFloat64Parallel(b *testing.B) { }() } wg.Wait() - if have, want := c.Count(), 10.0*float64(b.N); have != want { + if have, want := c.Snapshot().Count(), 10.0*float64(b.N); have != want { b.Fatalf("have %f want %f", have, want) } } @@ -36,7 +36,7 @@ func TestCounterFloat64Clear(t *testing.T) { c := NewCounterFloat64() c.Inc(1.0) c.Clear() - if count := c.Count(); count != 0 { + if count := c.Snapshot().Count(); count != 0 { t.Errorf("c.Count(): 0 != %v\n", count) } } @@ -44,7 +44,7 @@ func TestCounterFloat64Clear(t *testing.T) { func TestCounterFloat64Dec1(t *testing.T) { c := NewCounterFloat64() c.Dec(1.0) - if count := c.Count(); count != -1.0 { + if count := c.Snapshot().Count(); count != -1.0 { t.Errorf("c.Count(): -1.0 != %v\n", count) } } @@ -52,7 +52,7 @@ func TestCounterFloat64Dec1(t *testing.T) { func TestCounterFloat64Dec2(t *testing.T) { c := NewCounterFloat64() c.Dec(2.0) - if count := c.Count(); count != -2.0 { + if count := c.Snapshot().Count(); count != -2.0 { t.Errorf("c.Count(): -2.0 != %v\n", count) } } @@ -60,7 +60,7 @@ func TestCounterFloat64Dec2(t *testing.T) { func TestCounterFloat64Inc1(t *testing.T) { c := NewCounterFloat64() c.Inc(1.0) - if count := c.Count(); count != 1.0 { + if count := c.Snapshot().Count(); count != 1.0 { t.Errorf("c.Count(): 1.0 != %v\n", count) } } @@ -68,7 +68,7 @@ func TestCounterFloat64Inc1(t *testing.T) { func TestCounterFloat64Inc2(t *testing.T) { c := NewCounterFloat64() c.Inc(2.0) - if count := c.Count(); count != 2.0 { + if count := c.Snapshot().Count(); count != 2.0 { t.Errorf("c.Count(): 2.0 != %v\n", count) } } @@ -85,7 +85,7 @@ func TestCounterFloat64Snapshot(t *testing.T) { func TestCounterFloat64Zero(t *testing.T) { c := NewCounterFloat64() - if count := c.Count(); count != 0 { + if count := c.Snapshot().Count(); count != 0 { t.Errorf("c.Count(): 0 != %v\n", count) } } @@ -93,7 +93,7 @@ func TestCounterFloat64Zero(t *testing.T) { func TestGetOrRegisterCounterFloat64(t *testing.T) { r := NewRegistry() NewRegisteredCounterFloat64("foo", r).Inc(47.0) - if c := GetOrRegisterCounterFloat64("foo", r); c.Count() != 47.0 { + if c := GetOrRegisterCounterFloat64("foo", r).Snapshot(); c.Count() != 47.0 { t.Fatal(c) } } diff --git a/metrics/counter_test.go b/metrics/counter_test.go index af26ef1548..1b15b23f21 100644 --- a/metrics/counter_test.go +++ b/metrics/counter_test.go @@ -14,7 +14,7 @@ func TestCounterClear(t *testing.T) { c := NewCounter() c.Inc(1) c.Clear() - if count := c.Count(); count != 0 { + if count := c.Snapshot().Count(); count != 0 { t.Errorf("c.Count(): 0 != %v\n", count) } } @@ -22,7 +22,7 @@ func TestCounterClear(t *testing.T) { func TestCounterDec1(t *testing.T) { c := NewCounter() c.Dec(1) - if count := c.Count(); count != -1 { + if count := c.Snapshot().Count(); count != -1 { t.Errorf("c.Count(): -1 != %v\n", count) } } @@ -30,7 +30,7 @@ func TestCounterDec1(t *testing.T) { func TestCounterDec2(t *testing.T) { c := NewCounter() c.Dec(2) - if count := c.Count(); count != -2 { + if count := c.Snapshot().Count(); count != -2 { t.Errorf("c.Count(): -2 != %v\n", count) } } @@ -38,7 +38,7 @@ func TestCounterDec2(t *testing.T) { func TestCounterInc1(t *testing.T) { c := NewCounter() c.Inc(1) - if count := c.Count(); count != 1 { + if count := c.Snapshot().Count(); count != 1 { t.Errorf("c.Count(): 1 != %v\n", count) } } @@ -46,7 +46,7 @@ func TestCounterInc1(t *testing.T) { func TestCounterInc2(t *testing.T) { c := NewCounter() c.Inc(2) - if count := c.Count(); count != 2 { + if count := c.Snapshot().Count(); count != 2 { t.Errorf("c.Count(): 2 != %v\n", count) } } @@ -63,7 +63,7 @@ func TestCounterSnapshot(t *testing.T) { func TestCounterZero(t *testing.T) { c := NewCounter() - if count := c.Count(); count != 0 { + if count := c.Snapshot().Count(); count != 0 { t.Errorf("c.Count(): 0 != %v\n", count) } } @@ -71,7 +71,7 @@ func TestCounterZero(t *testing.T) { func TestGetOrRegisterCounter(t *testing.T) { r := NewRegistry() NewRegisteredCounter("foo", r).Inc(47) - if c := GetOrRegisterCounter("foo", r); c.Count() != 47 { + if c := GetOrRegisterCounter("foo", r).Snapshot(); c.Count() != 47 { t.Fatal(c) } } diff --git a/metrics/doc.go b/metrics/doc.go deleted file mode 100644 index 13f429c168..0000000000 --- a/metrics/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -package metrics - -const epsilon = 0.0000000000000001 -const epsilonPercentile = .00000000001 diff --git a/metrics/ewma.go b/metrics/ewma.go index ed95cba19b..1d7a4f00cf 100644 --- a/metrics/ewma.go +++ b/metrics/ewma.go @@ -7,11 +7,14 @@ import ( "time" ) +type EWMASnapshot interface { + Rate() float64 +} + // EWMAs continuously calculate an exponentially-weighted moving average // based on an outside source of clock ticks. type EWMA interface { - Rate() float64 - Snapshot() EWMA + Snapshot() EWMASnapshot Tick() Update(int64) } @@ -36,40 +39,19 @@ func NewEWMA15() EWMA { return NewEWMA(1 - math.Exp(-5.0/60.0/15)) } -// EWMASnapshot is a read-only copy of another EWMA. -type EWMASnapshot float64 +// ewmaSnapshot is a read-only copy of another EWMA. +type ewmaSnapshot float64 // Rate returns the rate of events per second at the time the snapshot was // taken. -func (a EWMASnapshot) Rate() float64 { return float64(a) } - -// Snapshot returns the snapshot. -func (a EWMASnapshot) Snapshot() EWMA { return a } - -// Tick panics. -func (EWMASnapshot) Tick() { - panic("Tick called on an EWMASnapshot") -} - -// Update panics. -func (EWMASnapshot) Update(int64) { - panic("Update called on an EWMASnapshot") -} +func (a ewmaSnapshot) Rate() float64 { return float64(a) } // NilEWMA is a no-op EWMA. type NilEWMA struct{} -// Rate is a no-op. -func (NilEWMA) Rate() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } - -// Tick is a no-op. -func (NilEWMA) Tick() {} - -// Update is a no-op. -func (NilEWMA) Update(n int64) {} +func (NilEWMA) Snapshot() EWMASnapshot { return (*emptySnapshot)(nil) } +func (NilEWMA) Tick() {} +func (NilEWMA) Update(n int64) {} // StandardEWMA is the standard implementation of an EWMA and tracks the number // of uncounted events and processes them on each tick. It uses the @@ -77,37 +59,50 @@ func (NilEWMA) Update(n int64) {} type StandardEWMA struct { uncounted atomic.Int64 alpha float64 - rate float64 - init bool + rate atomic.Uint64 + init atomic.Bool mutex sync.Mutex } -// Rate returns the moving average rate of events per second. -func (a *StandardEWMA) Rate() float64 { - a.mutex.Lock() - defer a.mutex.Unlock() - return a.rate * float64(time.Second) -} - // Snapshot returns a read-only copy of the EWMA. -func (a *StandardEWMA) Snapshot() EWMA { - return EWMASnapshot(a.Rate()) +func (a *StandardEWMA) Snapshot() EWMASnapshot { + r := math.Float64frombits(a.rate.Load()) * float64(time.Second) + return ewmaSnapshot(r) } // Tick ticks the clock to update the moving average. It assumes it is called // every five seconds. func (a *StandardEWMA) Tick() { - count := a.uncounted.Load() - a.uncounted.Add(-count) - instantRate := float64(count) / float64(5*time.Second) + // Optimization to avoid mutex locking in the hot-path. + if a.init.Load() { + a.updateRate(a.fetchInstantRate()) + return + } + // Slow-path: this is only needed on the first Tick() and preserves transactional updating + // of init and rate in the else block. The first conditional is needed below because + // a different thread could have set a.init = 1 between the time of the first atomic load and when + // the lock was acquired. a.mutex.Lock() - defer a.mutex.Unlock() - if a.init { - a.rate += a.alpha * (instantRate - a.rate) + if a.init.Load() { + // The fetchInstantRate() uses atomic loading, which is unnecessary in this critical section + // but again, this section is only invoked on the first successful Tick() operation. + a.updateRate(a.fetchInstantRate()) } else { - a.init = true - a.rate = instantRate + a.init.Store(true) + a.rate.Store(math.Float64bits(a.fetchInstantRate())) } + a.mutex.Unlock() +} + +func (a *StandardEWMA) fetchInstantRate() float64 { + count := a.uncounted.Swap(0) + return float64(count) / float64(5*time.Second) +} + +func (a *StandardEWMA) updateRate(instantRate float64) { + currentRate := math.Float64frombits(a.rate.Load()) + currentRate += a.alpha * (instantRate - currentRate) + a.rate.Store(math.Float64bits(currentRate)) } // Update adds n uncounted events. diff --git a/metrics/ewma_test.go b/metrics/ewma_test.go index 5b24419161..9a91b43db8 100644 --- a/metrics/ewma_test.go +++ b/metrics/ewma_test.go @@ -5,6 +5,8 @@ import ( "testing" ) +const epsilon = 0.0000000000000001 + func BenchmarkEWMA(b *testing.B) { a := NewEWMA1() b.ResetTimer() @@ -14,72 +16,33 @@ func BenchmarkEWMA(b *testing.B) { } } +func BenchmarkEWMAParallel(b *testing.B) { + a := NewEWMA1() + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + a.Update(1) + a.Tick() + } + }) +} + func TestEWMA1(t *testing.T) { a := NewEWMA1() a.Update(3) a.Tick() - if rate := a.Rate(); math.Abs(0.6-rate) > epsilon { - t.Errorf("initial a.Rate(): 0.6 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.22072766470286553-rate) > epsilon { - t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.08120116994196772-rate) > epsilon { - t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.029872241020718428-rate) > epsilon { - t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.01098938333324054-rate) > epsilon { - t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.004042768199451294-rate) > epsilon { - t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.0014872513059998212-rate) > epsilon { - t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.0005471291793327122-rate) > epsilon { - t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.00020127757674150815-rate) > epsilon { - t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(7.404588245200814e-05-rate) > epsilon { - t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(2.7239957857491083e-05-rate) > epsilon { - t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(1.0021020474147462e-05-rate) > epsilon { - t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(3.6865274119969525e-06-rate) > epsilon { - t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(1.3561976441886433e-06-rate) > epsilon { - t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(4.989172314621449e-07-rate) > epsilon { - t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(1.8354139230109722e-07-rate) > epsilon { - t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate) + for i, want := range []float64{0.6, + 0.22072766470286553, 0.08120116994196772, 0.029872241020718428, + 0.01098938333324054, 0.004042768199451294, 0.0014872513059998212, + 0.0005471291793327122, 0.00020127757674150815, 7.404588245200814e-05, + 2.7239957857491083e-05, 1.0021020474147462e-05, 3.6865274119969525e-06, + 1.3561976441886433e-06, 4.989172314621449e-07, 1.8354139230109722e-07, + } { + if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon { + t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate) + } + elapseMinute(a) } } @@ -87,68 +50,17 @@ func TestEWMA5(t *testing.T) { a := NewEWMA5() a.Update(3) a.Tick() - if rate := a.Rate(); math.Abs(0.6-rate) > epsilon { - t.Errorf("initial a.Rate(): 0.6 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.49123845184678905-rate) > epsilon { - t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.4021920276213837-rate) > epsilon { - t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.32928698165641596-rate) > epsilon { - t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.269597378470333-rate) > epsilon { - t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.2207276647028654-rate) > epsilon { - t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.18071652714732128-rate) > epsilon { - t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.14795817836496392-rate) > epsilon { - t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.12113791079679326-rate) > epsilon { - t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.09917933293295193-rate) > epsilon { - t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.08120116994196763-rate) > epsilon { - t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.06648189501740036-rate) > epsilon { - t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.05443077197364752-rate) > epsilon { - t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.04456414692860035-rate) > epsilon { - t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.03648603757513079-rate) > epsilon { - t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.0298722410207183831020718428-rate) > epsilon { - t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate) + for i, want := range []float64{ + 0.6, 0.49123845184678905, 0.4021920276213837, 0.32928698165641596, + 0.269597378470333, 0.2207276647028654, 0.18071652714732128, + 0.14795817836496392, 0.12113791079679326, 0.09917933293295193, + 0.08120116994196763, 0.06648189501740036, 0.05443077197364752, + 0.04456414692860035, 0.03648603757513079, 0.0298722410207183831020718428, + } { + if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon { + t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate) + } + elapseMinute(a) } } @@ -156,68 +68,17 @@ func TestEWMA15(t *testing.T) { a := NewEWMA15() a.Update(3) a.Tick() - if rate := a.Rate(); math.Abs(0.6-rate) > epsilon { - t.Errorf("initial a.Rate(): 0.6 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.5613041910189706-rate) > epsilon { - t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.5251039914257684-rate) > epsilon { - t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.4912384518467888184678905-rate) > epsilon { - t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.459557003018789-rate) > epsilon { - t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.4299187863442732-rate) > epsilon { - t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.4021920276213831-rate) > epsilon { - t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.37625345116383313-rate) > epsilon { - t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.3519877317060185-rate) > epsilon { - t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.3292869816564153165641596-rate) > epsilon { - t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.3080502714195546-rate) > epsilon { - t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.2881831806538789-rate) > epsilon { - t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.26959737847033216-rate) > epsilon { - t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.2522102307052083-rate) > epsilon { - t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.23594443252115815-rate) > epsilon { - t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); math.Abs(0.2207276647028646247028654470286553-rate) > epsilon { - t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate) + for i, want := range []float64{ + 0.6, 0.5613041910189706, 0.5251039914257684, 0.4912384518467888184678905, + 0.459557003018789, 0.4299187863442732, 0.4021920276213831, + 0.37625345116383313, 0.3519877317060185, 0.3292869816564153165641596, + 0.3080502714195546, 0.2881831806538789, 0.26959737847033216, + 0.2522102307052083, 0.23594443252115815, 0.2207276647028646247028654470286553, + } { + if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon { + t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate) + } + elapseMinute(a) } } diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index 9e850f96b2..7e3f82a075 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -109,25 +109,25 @@ func (exp *exp) getInfo(name string) *expvar.String { return v } -func (exp *exp) publishCounter(name string, metric metrics.Counter) { +func (exp *exp) publishCounter(name string, metric metrics.CounterSnapshot) { v := exp.getInt(name) v.Set(metric.Count()) } -func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64) { +func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64Snapshot) { v := exp.getFloat(name) v.Set(metric.Count()) } -func (exp *exp) publishGauge(name string, metric metrics.Gauge) { +func (exp *exp) publishGauge(name string, metric metrics.GaugeSnapshot) { v := exp.getInt(name) v.Set(metric.Value()) } -func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) { +func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64Snapshot) { exp.getFloat(name).Set(metric.Value()) } -func (exp *exp) publishGaugeInfo(name string, metric metrics.GaugeInfo) { +func (exp *exp) publishGaugeInfo(name string, metric metrics.GaugeInfoSnapshot) { exp.getInfo(name).Set(metric.Value().String()) } @@ -176,28 +176,28 @@ func (exp *exp) publishTimer(name string, metric metrics.Timer) { func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer) { t := metric.Snapshot() - ps := t.Percentiles([]float64{50, 75, 95, 99}) - exp.getInt(name + ".count").Set(int64(len(t.Values()))) + ps := t.Percentiles([]float64{0.50, 0.75, 0.95, 0.99}) + exp.getInt(name + ".count").Set(int64(t.Count())) exp.getFloat(name + ".mean").Set(t.Mean()) - exp.getInt(name + ".50-percentile").Set(ps[0]) - exp.getInt(name + ".75-percentile").Set(ps[1]) - exp.getInt(name + ".95-percentile").Set(ps[2]) - exp.getInt(name + ".99-percentile").Set(ps[3]) + exp.getFloat(name + ".50-percentile").Set(ps[0]) + exp.getFloat(name + ".75-percentile").Set(ps[1]) + exp.getFloat(name + ".95-percentile").Set(ps[2]) + exp.getFloat(name + ".99-percentile").Set(ps[3]) } func (exp *exp) syncToExpvar() { exp.registry.Each(func(name string, i interface{}) { switch i := i.(type) { case metrics.Counter: - exp.publishCounter(name, i) + exp.publishCounter(name, i.Snapshot()) case metrics.CounterFloat64: - exp.publishCounterFloat64(name, i) + exp.publishCounterFloat64(name, i.Snapshot()) case metrics.Gauge: - exp.publishGauge(name, i) + exp.publishGauge(name, i.Snapshot()) case metrics.GaugeFloat64: - exp.publishGaugeFloat64(name, i) + exp.publishGaugeFloat64(name, i.Snapshot()) case metrics.GaugeInfo: - exp.publishGaugeInfo(name, i) + exp.publishGaugeInfo(name, i.Snapshot()) case metrics.Histogram: exp.publishHistogram(name, i) case metrics.Meter: diff --git a/metrics/gauge.go b/metrics/gauge.go index 81137d7f7c..68f8f11abc 100644 --- a/metrics/gauge.go +++ b/metrics/gauge.go @@ -2,13 +2,18 @@ package metrics import "sync/atomic" +// gaugeSnapshot contains a readonly int64. +type GaugeSnapshot interface { + Value() int64 +} + // Gauges hold an int64 value that can be set arbitrarily. type Gauge interface { - Snapshot() Gauge + Snapshot() GaugeSnapshot Update(int64) + UpdateIfGt(int64) Dec(int64) Inc(int64) - Value() int64 } // GetOrRegisterGauge returns an existing Gauge or constructs and registers a @@ -38,65 +43,20 @@ func NewRegisteredGauge(name string, r Registry) Gauge { return c } -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGauge(f func() int64) Gauge { - if !Enabled { - return NilGauge{} - } - return &FunctionalGauge{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { - c := NewFunctionalGauge(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeSnapshot is a read-only copy of another Gauge. -type GaugeSnapshot int64 - -// Snapshot returns the snapshot. -func (g GaugeSnapshot) Snapshot() Gauge { return g } - -// Update panics. -func (GaugeSnapshot) Update(int64) { - panic("Update called on a GaugeSnapshot") -} - -// Dec panics. -func (GaugeSnapshot) Dec(int64) { - panic("Dec called on a GaugeSnapshot") -} - -// Inc panics. -func (GaugeSnapshot) Inc(int64) { - panic("Inc called on a GaugeSnapshot") -} +// gaugeSnapshot is a read-only copy of another Gauge. +type gaugeSnapshot int64 // Value returns the value at the time the snapshot was taken. -func (g GaugeSnapshot) Value() int64 { return int64(g) } +func (g gaugeSnapshot) Value() int64 { return int64(g) } // NilGauge is a no-op Gauge. type NilGauge struct{} -// Snapshot is a no-op. -func (NilGauge) Snapshot() Gauge { return NilGauge{} } - -// Update is a no-op. -func (NilGauge) Update(v int64) {} - -// Dec is a no-op. -func (NilGauge) Dec(i int64) {} - -// Inc is a no-op. -func (NilGauge) Inc(i int64) {} - -// Value is a no-op. -func (NilGauge) Value() int64 { return 0 } +func (NilGauge) Snapshot() GaugeSnapshot { return (*emptySnapshot)(nil) } +func (NilGauge) Update(v int64) {} +func (NilGauge) UpdateIfGt(v int64) {} +func (NilGauge) Dec(i int64) {} +func (NilGauge) Inc(i int64) {} // StandardGauge is the standard implementation of a Gauge and uses the // sync/atomic package to manage a single int64 value. @@ -105,8 +65,8 @@ type StandardGauge struct { } // Snapshot returns a read-only copy of the gauge. -func (g *StandardGauge) Snapshot() Gauge { - return GaugeSnapshot(g.Value()) +func (g *StandardGauge) Snapshot() GaugeSnapshot { + return gaugeSnapshot(g.value.Load()) } // Update updates the gauge's value. @@ -114,9 +74,17 @@ func (g *StandardGauge) Update(v int64) { g.value.Store(v) } -// Value returns the gauge's current value. -func (g *StandardGauge) Value() int64 { - return g.value.Load() +// Update updates the gauge's value if v is larger then the current valie. +func (g *StandardGauge) UpdateIfGt(v int64) { + for { + exist := g.value.Load() + if exist >= v { + break + } + if g.value.CompareAndSwap(exist, v) { + break + } + } } // Dec decrements the gauge's current value by the given amount. @@ -128,31 +96,3 @@ func (g *StandardGauge) Dec(i int64) { func (g *StandardGauge) Inc(i int64) { g.value.Add(i) } - -// FunctionalGauge returns value from given function -type FunctionalGauge struct { - value func() int64 -} - -// Value returns the gauge's current value. -func (g FunctionalGauge) Value() int64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } - -// Update panics. -func (FunctionalGauge) Update(int64) { - panic("Update called on a FunctionalGauge") -} - -// Dec panics. -func (FunctionalGauge) Dec(int64) { - panic("Dec called on a FunctionalGauge") -} - -// Inc panics. -func (FunctionalGauge) Inc(int64) { - panic("Inc called on a FunctionalGauge") -} diff --git a/metrics/gauge_float64.go b/metrics/gauge_float64.go index 237ff8036e..967f2bc60e 100644 --- a/metrics/gauge_float64.go +++ b/metrics/gauge_float64.go @@ -5,11 +5,14 @@ import ( "sync/atomic" ) -// GaugeFloat64s hold a float64 value that can be set arbitrarily. +type GaugeFloat64Snapshot interface { + Value() float64 +} + +// GaugeFloat64 hold a float64 value that can be set arbitrarily. type GaugeFloat64 interface { - Snapshot() GaugeFloat64 + Snapshot() GaugeFloat64Snapshot Update(float64) - Value() float64 } // GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a @@ -39,49 +42,18 @@ func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { return c } -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { - if !Enabled { - return NilGaugeFloat64{} - } - return &FunctionalGaugeFloat64{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { - c := NewFunctionalGaugeFloat64(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. -type GaugeFloat64Snapshot float64 - -// Snapshot returns the snapshot. -func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } - -// Update panics. -func (GaugeFloat64Snapshot) Update(float64) { - panic("Update called on a GaugeFloat64Snapshot") -} +// gaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. +type gaugeFloat64Snapshot float64 // Value returns the value at the time the snapshot was taken. -func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } +func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) } // NilGauge is a no-op Gauge. type NilGaugeFloat64 struct{} -// Snapshot is a no-op. -func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } - -// Update is a no-op. -func (NilGaugeFloat64) Update(v float64) {} - -// Value is a no-op. -func (NilGaugeFloat64) Value() float64 { return 0.0 } +func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} } +func (NilGaugeFloat64) Update(v float64) {} +func (NilGaugeFloat64) Value() float64 { return 0.0 } // StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses // atomic to manage a single float64 value. @@ -90,34 +62,12 @@ type StandardGaugeFloat64 struct { } // Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { - return GaugeFloat64Snapshot(g.Value()) +func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64Snapshot { + v := math.Float64frombits(g.floatBits.Load()) + return gaugeFloat64Snapshot(v) } // Update updates the gauge's value. func (g *StandardGaugeFloat64) Update(v float64) { g.floatBits.Store(math.Float64bits(v)) } - -// Value returns the gauge's current value. -func (g *StandardGaugeFloat64) Value() float64 { - return math.Float64frombits(g.floatBits.Load()) -} - -// FunctionalGaugeFloat64 returns value from given function -type FunctionalGaugeFloat64 struct { - value func() float64 -} - -// Value returns the gauge's current value. -func (g FunctionalGaugeFloat64) Value() float64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } - -// Update panics. -func (FunctionalGaugeFloat64) Update(float64) { - panic("Update called on a FunctionalGaugeFloat64") -} diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go index 647d090009..f0ac7ea5e7 100644 --- a/metrics/gauge_float64_test.go +++ b/metrics/gauge_float64_test.go @@ -26,19 +26,11 @@ func BenchmarkGaugeFloat64Parallel(b *testing.B) { }() } wg.Wait() - if have, want := c.Value(), float64(b.N-1); have != want { + if have, want := c.Snapshot().Value(), float64(b.N-1); have != want { b.Fatalf("have %f want %f", have, want) } } -func TestGaugeFloat64(t *testing.T) { - g := NewGaugeFloat64() - g.Update(47.0) - if v := g.Value(); 47.0 != v { - t.Errorf("g.Value(): 47.0 != %v\n", v) - } -} - func TestGaugeFloat64Snapshot(t *testing.T) { g := NewGaugeFloat64() g.Update(47.0) @@ -53,28 +45,7 @@ func TestGetOrRegisterGaugeFloat64(t *testing.T) { r := NewRegistry() NewRegisteredGaugeFloat64("foo", r).Update(47.0) t.Logf("registry: %v", r) - if g := GetOrRegisterGaugeFloat64("foo", r); 47.0 != g.Value() { - t.Fatal(g) - } -} - -func TestFunctionalGaugeFloat64(t *testing.T) { - var counter float64 - fg := NewFunctionalGaugeFloat64(func() float64 { - counter++ - return counter - }) - fg.Value() - fg.Value() - if counter != 2 { - t.Error("counter != 2") - } -} - -func TestGetOrRegisterFunctionalGaugeFloat64(t *testing.T) { - r := NewRegistry() - NewRegisteredFunctionalGaugeFloat64("foo", r, func() float64 { return 47 }) - if g := GetOrRegisterGaugeFloat64("foo", r); g.Value() != 47 { + if g := GetOrRegisterGaugeFloat64("foo", r).Snapshot(); 47.0 != g.Value() { t.Fatal(g) } } diff --git a/metrics/gauge_info.go b/metrics/gauge_info.go index f1b2075939..c44b2d85f3 100644 --- a/metrics/gauge_info.go +++ b/metrics/gauge_info.go @@ -5,14 +5,17 @@ import ( "sync" ) +type GaugeInfoSnapshot interface { + Value() GaugeInfoValue +} + // GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily. type GaugeInfo interface { - Snapshot() GaugeInfo Update(GaugeInfoValue) - Value() GaugeInfoValue + Snapshot() GaugeInfoSnapshot } -// GaugeInfoValue is a mappng of (string) keys to (string) values +// GaugeInfoValue is a mapping of keys to values type GaugeInfoValue map[string]string func (val GaugeInfoValue) String() string { @@ -49,49 +52,17 @@ func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo { return c } -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGaugeInfo(f func() GaugeInfoValue) GaugeInfo { - if !Enabled { - return NilGaugeInfo{} - } - return &FunctionalGaugeInfo{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGaugeInfo(name string, r Registry, f func() GaugeInfoValue) GaugeInfo { - c := NewFunctionalGaugeInfo(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeInfoSnapshot is a read-only copy of another GaugeInfo. -type GaugeInfoSnapshot GaugeInfoValue - -// Snapshot returns the snapshot. -func (g GaugeInfoSnapshot) Snapshot() GaugeInfo { return g } - -// Update panics. -func (GaugeInfoSnapshot) Update(GaugeInfoValue) { - panic("Update called on a GaugeInfoSnapshot") -} +// gaugeInfoSnapshot is a read-only copy of another GaugeInfo. +type gaugeInfoSnapshot GaugeInfoValue // Value returns the value at the time the snapshot was taken. -func (g GaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) } +func (g gaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) } -// NilGauge is a no-op Gauge. type NilGaugeInfo struct{} -// Snapshot is a no-op. -func (NilGaugeInfo) Snapshot() GaugeInfo { return NilGaugeInfo{} } - -// Update is a no-op. -func (NilGaugeInfo) Update(v GaugeInfoValue) {} - -// Value is a no-op. -func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} } +func (NilGaugeInfo) Snapshot() GaugeInfoSnapshot { return NilGaugeInfo{} } +func (NilGaugeInfo) Update(v GaugeInfoValue) {} +func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} } // StandardGaugeInfo is the standard implementation of a GaugeInfo and uses // sync.Mutex to manage a single string value. @@ -101,8 +72,8 @@ type StandardGaugeInfo struct { } // Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeInfo) Snapshot() GaugeInfo { - return GaugeInfoSnapshot(g.Value()) +func (g *StandardGaugeInfo) Snapshot() GaugeInfoSnapshot { + return gaugeInfoSnapshot(g.value) } // Update updates the gauge's value. @@ -111,34 +82,3 @@ func (g *StandardGaugeInfo) Update(v GaugeInfoValue) { defer g.mutex.Unlock() g.value = v } - -// Value returns the gauge's current value. -func (g *StandardGaugeInfo) Value() GaugeInfoValue { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value -} - -// FunctionalGaugeInfo returns value from given function -type FunctionalGaugeInfo struct { - value func() GaugeInfoValue -} - -// Value returns the gauge's current value. -func (g FunctionalGaugeInfo) Value() GaugeInfoValue { - return g.value() -} - -// Value returns the gauge's current value in JSON string format -func (g FunctionalGaugeInfo) ValueJsonString() string { - data, _ := json.Marshal(g.value()) - return string(data) -} - -// Snapshot returns the snapshot. -func (g FunctionalGaugeInfo) Snapshot() GaugeInfo { return GaugeInfoSnapshot(g.Value()) } - -// Update panics. -func (FunctionalGaugeInfo) Update(GaugeInfoValue) { - panic("Update called on a FunctionalGaugeInfo") -} diff --git a/metrics/gauge_info_test.go b/metrics/gauge_info_test.go index 4227a6a85f..319afbf92e 100644 --- a/metrics/gauge_info_test.go +++ b/metrics/gauge_info_test.go @@ -1,7 +1,6 @@ package metrics import ( - "strconv" "testing" ) @@ -14,22 +13,14 @@ func TestGaugeInfoJsonString(t *testing.T) { }, ) want := `{"anotherKey":"any_string_value","chain_id":"5","third_key":"anything"}` - if have := g.Value().String(); have != want { - t.Errorf("\nhave: %v\nwant: %v\n", have, want) - } -} -func TestGaugeInfoSnapshot(t *testing.T) { - g := NewGaugeInfo() - g.Update(GaugeInfoValue{"value": "original"}) - snapshot := g.Snapshot() // Snapshot @chainid 5 + original := g.Snapshot() g.Update(GaugeInfoValue{"value": "updated"}) - // The 'g' should be updated - if have, want := g.Value().String(), `{"value":"updated"}`; have != want { + + if have := original.Value().String(); have != want { t.Errorf("\nhave: %v\nwant: %v\n", have, want) } - // Snapshot should be unupdated - if have, want := snapshot.Value().String(), `{"value":"original"}`; have != want { + if have, want := g.Snapshot().Value().String(), `{"value":"updated"}`; have != want { t.Errorf("\nhave: %v\nwant: %v\n", have, want) } } @@ -38,38 +29,8 @@ func TestGetOrRegisterGaugeInfo(t *testing.T) { r := NewRegistry() NewRegisteredGaugeInfo("foo", r).Update( GaugeInfoValue{"chain_id": "5"}) - g := GetOrRegisterGaugeInfo("foo", r) + g := GetOrRegisterGaugeInfo("foo", r).Snapshot() if have, want := g.Value().String(), `{"chain_id":"5"}`; have != want { t.Errorf("have\n%v\nwant\n%v\n", have, want) } } - -func TestFunctionalGaugeInfo(t *testing.T) { - info := GaugeInfoValue{"chain_id": "0"} - counter := 1 - // A "functional" gauge invokes the method to obtain the value - fg := NewFunctionalGaugeInfo(func() GaugeInfoValue { - info["chain_id"] = strconv.Itoa(counter) - counter++ - return info - }) - fg.Value() - fg.Value() - if have, want := info["chain_id"], "2"; have != want { - t.Errorf("have %v want %v", have, want) - } -} - -func TestGetOrRegisterFunctionalGaugeInfo(t *testing.T) { - r := NewRegistry() - NewRegisteredFunctionalGaugeInfo("foo", r, func() GaugeInfoValue { - return GaugeInfoValue{ - "chain_id": "5", - } - }) - want := `{"chain_id":"5"}` - have := GetOrRegisterGaugeInfo("foo", r).Value().String() - if have != want { - t.Errorf("have\n%v\nwant\n%v\n", have, want) - } -} diff --git a/metrics/gauge_test.go b/metrics/gauge_test.go index a98fe985d8..f2ba930bc4 100644 --- a/metrics/gauge_test.go +++ b/metrics/gauge_test.go @@ -1,7 +1,6 @@ package metrics import ( - "fmt" "testing" ) @@ -13,14 +12,6 @@ func BenchmarkGauge(b *testing.B) { } } -func TestGauge(t *testing.T) { - g := NewGauge() - g.Update(int64(47)) - if v := g.Value(); v != 47 { - t.Errorf("g.Value(): 47 != %v\n", v) - } -} - func TestGaugeSnapshot(t *testing.T) { g := NewGauge() g.Update(int64(47)) @@ -34,35 +25,7 @@ func TestGaugeSnapshot(t *testing.T) { func TestGetOrRegisterGauge(t *testing.T) { r := NewRegistry() NewRegisteredGauge("foo", r).Update(47) - if g := GetOrRegisterGauge("foo", r); g.Value() != 47 { - t.Fatal(g) - } -} - -func TestFunctionalGauge(t *testing.T) { - var counter int64 - fg := NewFunctionalGauge(func() int64 { - counter++ - return counter - }) - fg.Value() - fg.Value() - if counter != 2 { - t.Error("counter != 2") - } -} - -func TestGetOrRegisterFunctionalGauge(t *testing.T) { - r := NewRegistry() - NewRegisteredFunctionalGauge("foo", r, func() int64 { return 47 }) - if g := GetOrRegisterGauge("foo", r); g.Value() != 47 { + if g := GetOrRegisterGauge("foo", r); g.Snapshot().Value() != 47 { t.Fatal(g) } } - -func ExampleGetOrRegisterGauge() { - m := "server.bytes_sent" - g := GetOrRegisterGauge(m, nil) - g.Update(47) - fmt.Println(g.Value()) // Output: 47 -} diff --git a/metrics/graphite.go b/metrics/graphite.go index 4e3dd3b3b8..aba752e0ed 100644 --- a/metrics/graphite.go +++ b/metrics/graphite.go @@ -66,15 +66,15 @@ func graphite(c *GraphiteConfig) error { c.Registry.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Snapshot().Count(), now) case CounterFloat64: - fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Count(), now) + fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Snapshot().Count(), now) case Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Snapshot().Value(), now) case GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Snapshot().Value(), now) case GaugeInfo: - fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Value().String(), now) + fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Snapshot().Value().String(), now) case Histogram: h := metric.Snapshot() ps := h.Percentiles(c.Percentiles) diff --git a/metrics/histogram.go b/metrics/histogram.go index 2c54ce8b40..44de588bc1 100644 --- a/metrics/histogram.go +++ b/metrics/histogram.go @@ -1,20 +1,14 @@ package metrics +type HistogramSnapshot interface { + SampleSnapshot +} + // Histograms calculate distribution statistics from a series of int64 values. type Histogram interface { Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Sample() Sample - Snapshot() Histogram - StdDev() float64 - Sum() int64 Update(int64) - Variance() float64 + Snapshot() HistogramSnapshot } // GetOrRegisterHistogram returns an existing Histogram or constructs and @@ -54,108 +48,12 @@ func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { return c } -// HistogramSnapshot is a read-only copy of another Histogram. -type HistogramSnapshot struct { - sample *SampleSnapshot -} - -// Clear panics. -func (*HistogramSnapshot) Clear() { - panic("Clear called on a HistogramSnapshot") -} - -// Count returns the number of samples recorded at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample at the time the snapshot -// was taken. -func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the sample -// at the time the snapshot was taken. -func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *HistogramSnapshot) Sample() Sample { return h.sample } - -// Snapshot returns the snapshot. -func (h *HistogramSnapshot) Snapshot() Histogram { return h } - -// StdDev returns the standard deviation of the values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample at the time the snapshot was taken. -func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } - -// Update panics. -func (*HistogramSnapshot) Update(int64) { - panic("Update called on a HistogramSnapshot") -} - -// Variance returns the variance of inputs at the time the snapshot was taken. -func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } - // NilHistogram is a no-op Histogram. type NilHistogram struct{} -// Clear is a no-op. -func (NilHistogram) Clear() {} - -// Count is a no-op. -func (NilHistogram) Count() int64 { return 0 } - -// Max is a no-op. -func (NilHistogram) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilHistogram) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilHistogram) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilHistogram) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilHistogram) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Sample is a no-op. -func (NilHistogram) Sample() Sample { return NilSample{} } - -// Snapshot is a no-op. -func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } - -// StdDev is a no-op. -func (NilHistogram) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilHistogram) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilHistogram) Update(v int64) {} - -// Variance is a no-op. -func (NilHistogram) Variance() float64 { return 0.0 } +func (NilHistogram) Clear() {} +func (NilHistogram) Snapshot() HistogramSnapshot { return (*emptySnapshot)(nil) } +func (NilHistogram) Update(v int64) {} // StandardHistogram is the standard implementation of a Histogram and uses a // Sample to bound its memory use. @@ -166,46 +64,10 @@ type StandardHistogram struct { // Clear clears the histogram and its sample. func (h *StandardHistogram) Clear() { h.sample.Clear() } -// Count returns the number of samples recorded since the histogram was last -// cleared. -func (h *StandardHistogram) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample. -func (h *StandardHistogram) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample. -func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample. -func (h *StandardHistogram) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of the values in the sample. -func (h *StandardHistogram) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (h *StandardHistogram) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *StandardHistogram) Sample() Sample { return h.sample } - // Snapshot returns a read-only copy of the histogram. -func (h *StandardHistogram) Snapshot() Histogram { - return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} +func (h *StandardHistogram) Snapshot() HistogramSnapshot { + return h.sample.Snapshot() } -// StdDev returns the standard deviation of the values in the sample. -func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample. -func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } - // Update samples a new value. func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } - -// Variance returns the variance of the values in the sample. -func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/metrics/histogram_test.go b/metrics/histogram_test.go index 7c9f42fcec..22fc5468b0 100644 --- a/metrics/histogram_test.go +++ b/metrics/histogram_test.go @@ -14,7 +14,7 @@ func TestGetOrRegisterHistogram(t *testing.T) { r := NewRegistry() s := NewUniformSample(100) NewRegisteredHistogram("foo", r, s).Update(47) - if h := GetOrRegisterHistogram("foo", r, s); h.Count() != 1 { + if h := GetOrRegisterHistogram("foo", r, s).Snapshot(); h.Count() != 1 { t.Fatal(h) } } @@ -24,11 +24,11 @@ func TestHistogram10000(t *testing.T) { for i := 1; i <= 10000; i++ { h.Update(int64(i)) } - testHistogram10000(t, h) + testHistogram10000(t, h.Snapshot()) } func TestHistogramEmpty(t *testing.T) { - h := NewHistogram(NewUniformSample(100)) + h := NewHistogram(NewUniformSample(100)).Snapshot() if count := h.Count(); count != 0 { t.Errorf("h.Count(): 0 != %v\n", count) } @@ -66,7 +66,7 @@ func TestHistogramSnapshot(t *testing.T) { testHistogram10000(t, snapshot) } -func testHistogram10000(t *testing.T, h Histogram) { +func testHistogram10000(t *testing.T, h HistogramSnapshot) { if count := h.Count(); count != 10000 { t.Errorf("h.Count(): 10000 != %v\n", count) } diff --git a/metrics/inactive.go b/metrics/inactive.go new file mode 100644 index 0000000000..1f47f0210a --- /dev/null +++ b/metrics/inactive.go @@ -0,0 +1,48 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package metrics + +// compile-time checks that interfaces are implemented. +var ( + _ SampleSnapshot = (*emptySnapshot)(nil) + _ HistogramSnapshot = (*emptySnapshot)(nil) + _ CounterSnapshot = (*emptySnapshot)(nil) + _ GaugeSnapshot = (*emptySnapshot)(nil) + _ MeterSnapshot = (*emptySnapshot)(nil) + _ EWMASnapshot = (*emptySnapshot)(nil) + _ TimerSnapshot = (*emptySnapshot)(nil) +) + +type emptySnapshot struct{} + +func (*emptySnapshot) Count() int64 { return 0 } +func (*emptySnapshot) Max() int64 { return 0 } +func (*emptySnapshot) Mean() float64 { return 0.0 } +func (*emptySnapshot) Min() int64 { return 0 } +func (*emptySnapshot) Percentile(p float64) float64 { return 0.0 } +func (*emptySnapshot) Percentiles(ps []float64) []float64 { return make([]float64, len(ps)) } +func (*emptySnapshot) Size() int { return 0 } +func (*emptySnapshot) StdDev() float64 { return 0.0 } +func (*emptySnapshot) Sum() int64 { return 0 } +func (*emptySnapshot) Values() []int64 { return []int64{} } +func (*emptySnapshot) Variance() float64 { return 0.0 } +func (*emptySnapshot) Value() int64 { return 0 } +func (*emptySnapshot) Rate() float64 { return 0.0 } +func (*emptySnapshot) Rate1() float64 { return 0.0 } +func (*emptySnapshot) Rate5() float64 { return 0.0 } +func (*emptySnapshot) Rate15() float64 { return 0.0 } +func (*emptySnapshot) RateMean() float64 { return 0.0 } diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go index 9354f1a633..bbc4fc024b 100644 --- a/metrics/influxdb/influxdb.go +++ b/metrics/influxdb/influxdb.go @@ -11,13 +11,13 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf case metrics.Counter: measurement := fmt.Sprintf("%s%s.count", namespace, name) fields := map[string]interface{}{ - "value": metric.Count(), + "value": metric.Snapshot().Count(), } return measurement, fields case metrics.CounterFloat64: measurement := fmt.Sprintf("%s%s.count", namespace, name) fields := map[string]interface{}{ - "value": metric.Count(), + "value": metric.Snapshot().Count(), } return measurement, fields case metrics.Gauge: @@ -99,20 +99,19 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf return measurement, fields case metrics.ResettingTimer: t := metric.Snapshot() - if len(t.Values()) == 0 { + if t.Count() == 0 { break } - ps := t.Percentiles([]float64{50, 95, 99}) - val := t.Values() + ps := t.Percentiles([]float64{0.50, 0.95, 0.99}) measurement := fmt.Sprintf("%s%s.span", namespace, name) fields := map[string]interface{}{ - "count": len(val), - "max": val[len(val)-1], + "count": t.Count(), + "max": t.Max(), "mean": t.Mean(), - "min": val[0], - "p50": ps[0], - "p95": ps[1], - "p99": ps[2], + "min": t.Min(), + "p50": int(ps[0]), + "p95": int(ps[1]), + "p99": int(ps[2]), } return measurement, fields } diff --git a/metrics/influxdb/influxdb_test.go b/metrics/influxdb/influxdb_test.go index beeb36a531..c6f2eeac62 100644 --- a/metrics/influxdb/influxdb_test.go +++ b/metrics/influxdb/influxdb_test.go @@ -96,7 +96,7 @@ func TestExampleV2(t *testing.T) { } if have != want { t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want) - t.Logf("have vs want:\n %v", findFirstDiffPos(have, want)) + t.Logf("have vs want:\n%v", findFirstDiffPos(have, want)) } } diff --git a/metrics/influxdb/testdata/influxdbv1.want b/metrics/influxdb/testdata/influxdbv1.want index 5efffb9595..9443faedc5 100644 --- a/metrics/influxdb/testdata/influxdbv1.want +++ b/metrics/influxdb/testdata/influxdbv1.want @@ -1,3 +1,5 @@ +goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000 +goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000 goth.test/counter.count value=12345 978307200000000000 goth.test/counter_float64.count value=54321.98 978307200000000000 goth.test/gauge.gauge value=23456i 978307200000000000 @@ -5,5 +7,5 @@ goth.test/gauge_float64.gauge value=34567.89 978307200000000000 goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000 goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000 goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000 -goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12000000i,p95=120000000i,p99=120000000i 978307200000000000 +goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000 goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000 diff --git a/metrics/influxdb/testdata/influxdbv2.want b/metrics/influxdb/testdata/influxdbv2.want index 5efffb9595..9443faedc5 100644 --- a/metrics/influxdb/testdata/influxdbv2.want +++ b/metrics/influxdb/testdata/influxdbv2.want @@ -1,3 +1,5 @@ +goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000 +goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000 goth.test/counter.count value=12345 978307200000000000 goth.test/counter_float64.count value=54321.98 978307200000000000 goth.test/gauge.gauge value=23456i 978307200000000000 @@ -5,5 +7,5 @@ goth.test/gauge_float64.gauge value=34567.89 978307200000000000 goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000 goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000 goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000 -goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12000000i,p95=120000000i,p99=120000000i 978307200000000000 +goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000 goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000 diff --git a/metrics/internal/sampledata.go b/metrics/internal/sampledata.go index 9ace069576..de9b207b6d 100644 --- a/metrics/internal/sampledata.go +++ b/metrics/internal/sampledata.go @@ -17,6 +17,9 @@ package internal import ( + "bytes" + "encoding/gob" + metrics2 "runtime/metrics" "time" "github.com/ethereum/go-ethereum/metrics" @@ -38,7 +41,15 @@ func ExampleMetrics() metrics.Registry { "commit": "7caa2d8163ae3132c1c2d6978c76610caee2d949", "protocol_versions": "64 65 66", }) - metrics.NewRegisteredHistogram("test/histogram", registry, metrics.NewSampleSnapshot(3, []int64{1, 2, 3})) + + { + s := metrics.NewUniformSample(3) + s.Update(1) + s.Update(2) + s.Update(3) + //metrics.NewRegisteredHistogram("test/histogram", registry, metrics.NewSampleSnapshot(3, []int64{1, 2, 3})) + metrics.NewRegisteredHistogram("test/histogram", registry, s) + } registry.Register("test/meter", metrics.NewInactiveMeter()) { timer := metrics.NewRegisteredResettingTimer("test/resetting_timer", registry) @@ -60,5 +71,25 @@ func ExampleMetrics() metrics.Registry { timer.Stop() } registry.Register("test/empty_resetting_timer", metrics.NewResettingTimer().Snapshot()) + + { // go runtime metrics + var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00" + var gcPauses = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06R\xff\x82\x01\xff\xa2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x00\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00" + + var secondsToNs = float64(time.Second) + + dserialize := func(data string) *metrics2.Float64Histogram { + var res metrics2.Float64Histogram + if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil { + panic(err) + } + return &res + } + cpuSchedLatency := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(sLatency)) + registry.Register("system/cpu/schedlatency", cpuSchedLatency) + + memPauses := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(gcPauses)) + registry.Register("system/memory/pauses", memPauses) + } return registry } diff --git a/metrics/internal/sampledata_test.go b/metrics/internal/sampledata_test.go new file mode 100644 index 0000000000..0013299406 --- /dev/null +++ b/metrics/internal/sampledata_test.go @@ -0,0 +1,27 @@ +package internal + +import ( + "bytes" + "encoding/gob" + "fmt" + metrics2 "runtime/metrics" + "testing" + "time" + + "github.com/ethereum/go-ethereum/metrics" +) + +func TestCollectRuntimeMetrics(t *testing.T) { + t.Skip("Only used for generating testdata") + serialize := func(path string, histogram *metrics2.Float64Histogram) { + var f = new(bytes.Buffer) + if err := gob.NewEncoder(f).Encode(histogram); err != nil { + panic(err) + } + fmt.Printf("var %v = %q\n", path, f.Bytes()) + } + time.Sleep(2 * time.Second) + stats := metrics.ReadRuntimeStats() + serialize("schedlatency", stats.SchedLatency) + serialize("gcpauses", stats.GCPauses) +} diff --git a/metrics/librato/librato.go b/metrics/librato/librato.go index fa98595991..a86f758637 100644 --- a/metrics/librato/librato.go +++ b/metrics/librato/librato.go @@ -61,16 +61,16 @@ func (rep *Reporter) Run() { // calculate sum of squares from data provided by metrics.Histogram // see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods -func sumSquares(s metrics.Sample) float64 { - count := float64(s.Count()) - sumSquared := math.Pow(count*s.Mean(), 2) - sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count +func sumSquares(icount int64, mean, stDev float64) float64 { + count := float64(icount) + sumSquared := math.Pow(count*mean, 2) + sumSquares := math.Pow(count*stDev, 2) + sumSquared/count if math.IsNaN(sumSquares) { return 0.0 } return sumSquares } -func sumSquaresTimer(t metrics.Timer) float64 { +func sumSquaresTimer(t metrics.TimerSnapshot) float64 { count := float64(t.Count()) sumSquared := math.Pow(count*t.Mean(), 2) sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count @@ -97,9 +97,10 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B measurement[Period] = rep.Interval.Seconds() switch m := metric.(type) { case metrics.Counter: - if m.Count() > 0 { + ms := m.Snapshot() + if ms.Count() > 0 { measurement[Name] = fmt.Sprintf("%s.%s", name, "count") - measurement[Value] = float64(m.Count()) + measurement[Value] = float64(ms.Count()) measurement[Attributes] = map[string]interface{}{ DisplayUnitsLong: Operations, DisplayUnitsShort: OperationsShort, @@ -108,9 +109,9 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B snapshot.Counters = append(snapshot.Counters, measurement) } case metrics.CounterFloat64: - if m.Count() > 0 { + if count := m.Snapshot().Count(); count > 0 { measurement[Name] = fmt.Sprintf("%s.%s", name, "count") - measurement[Value] = m.Count() + measurement[Value] = count measurement[Attributes] = map[string]interface{}{ DisplayUnitsLong: Operations, DisplayUnitsShort: OperationsShort, @@ -120,44 +121,45 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B } case metrics.Gauge: measurement[Name] = name - measurement[Value] = float64(m.Value()) + measurement[Value] = float64(m.Snapshot().Value()) snapshot.Gauges = append(snapshot.Gauges, measurement) case metrics.GaugeFloat64: measurement[Name] = name - measurement[Value] = m.Value() + measurement[Value] = m.Snapshot().Value() snapshot.Gauges = append(snapshot.Gauges, measurement) case metrics.GaugeInfo: measurement[Name] = name - measurement[Value] = m.Value() + measurement[Value] = m.Snapshot().Value() snapshot.Gauges = append(snapshot.Gauges, measurement) case metrics.Histogram: - if m.Count() > 0 { + ms := m.Snapshot() + if ms.Count() > 0 { gauges := make([]Measurement, histogramGaugeCount) - s := m.Sample() measurement[Name] = fmt.Sprintf("%s.%s", name, "hist") - measurement[Count] = uint64(s.Count()) - measurement[Max] = float64(s.Max()) - measurement[Min] = float64(s.Min()) - measurement[Sum] = float64(s.Sum()) - measurement[SumSquares] = sumSquares(s) + measurement[Count] = uint64(ms.Count()) + measurement[Max] = float64(ms.Max()) + measurement[Min] = float64(ms.Min()) + measurement[Sum] = float64(ms.Sum()) + measurement[SumSquares] = sumSquares(ms.Count(), ms.Mean(), ms.StdDev()) gauges[0] = measurement for i, p := range rep.Percentiles { gauges[i+1] = Measurement{ Name: fmt.Sprintf("%s.%.2f", measurement[Name], p), - Value: s.Percentile(p), + Value: ms.Percentile(p), Period: measurement[Period], } } snapshot.Gauges = append(snapshot.Gauges, gauges...) } case metrics.Meter: + ms := m.Snapshot() measurement[Name] = name - measurement[Value] = float64(m.Count()) + measurement[Value] = float64(ms.Count()) snapshot.Counters = append(snapshot.Counters, measurement) snapshot.Gauges = append(snapshot.Gauges, Measurement{ Name: fmt.Sprintf("%s.%s", name, "1min"), - Value: m.Rate1(), + Value: ms.Rate1(), Period: int64(rep.Interval.Seconds()), Attributes: map[string]interface{}{ DisplayUnitsLong: Operations, @@ -167,7 +169,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B }, Measurement{ Name: fmt.Sprintf("%s.%s", name, "5min"), - Value: m.Rate5(), + Value: ms.Rate5(), Period: int64(rep.Interval.Seconds()), Attributes: map[string]interface{}{ DisplayUnitsLong: Operations, @@ -177,7 +179,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B }, Measurement{ Name: fmt.Sprintf("%s.%s", name, "15min"), - Value: m.Rate15(), + Value: ms.Rate15(), Period: int64(rep.Interval.Seconds()), Attributes: map[string]interface{}{ DisplayUnitsLong: Operations, @@ -187,26 +189,27 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B }, ) case metrics.Timer: + ms := m.Snapshot() measurement[Name] = name - measurement[Value] = float64(m.Count()) + measurement[Value] = float64(ms.Count()) snapshot.Counters = append(snapshot.Counters, measurement) - if m.Count() > 0 { + if ms.Count() > 0 { libratoName := fmt.Sprintf("%s.%s", name, "timer.mean") gauges := make([]Measurement, histogramGaugeCount) gauges[0] = Measurement{ Name: libratoName, - Count: uint64(m.Count()), - Sum: m.Mean() * float64(m.Count()), - Max: float64(m.Max()), - Min: float64(m.Min()), - SumSquares: sumSquaresTimer(m), + Count: uint64(ms.Count()), + Sum: ms.Mean() * float64(ms.Count()), + Max: float64(ms.Max()), + Min: float64(ms.Min()), + SumSquares: sumSquaresTimer(ms), Period: int64(rep.Interval.Seconds()), Attributes: rep.TimerAttributes, } for i, p := range rep.Percentiles { gauges[i+1] = Measurement{ Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100), - Value: m.Percentile(p), + Value: ms.Percentile(p), Period: int64(rep.Interval.Seconds()), Attributes: rep.TimerAttributes, } @@ -215,7 +218,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B snapshot.Gauges = append(snapshot.Gauges, Measurement{ Name: fmt.Sprintf("%s.%s", name, "rate.1min"), - Value: m.Rate1(), + Value: ms.Rate1(), Period: int64(rep.Interval.Seconds()), Attributes: map[string]interface{}{ DisplayUnitsLong: Operations, @@ -225,7 +228,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B }, Measurement{ Name: fmt.Sprintf("%s.%s", name, "rate.5min"), - Value: m.Rate5(), + Value: ms.Rate5(), Period: int64(rep.Interval.Seconds()), Attributes: map[string]interface{}{ DisplayUnitsLong: Operations, @@ -235,7 +238,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B }, Measurement{ Name: fmt.Sprintf("%s.%s", name, "rate.15min"), - Value: m.Rate15(), + Value: ms.Rate15(), Period: int64(rep.Interval.Seconds()), Attributes: map[string]interface{}{ DisplayUnitsLong: Operations, diff --git a/metrics/log.go b/metrics/log.go index d71a1c3d9c..3b9773faa7 100644 --- a/metrics/log.go +++ b/metrics/log.go @@ -23,19 +23,19 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { switch metric := i.(type) { case Counter: l.Printf("counter %s\n", name) - l.Printf(" count: %9d\n", metric.Count()) + l.Printf(" count: %9d\n", metric.Snapshot().Count()) case CounterFloat64: l.Printf("counter %s\n", name) - l.Printf(" count: %f\n", metric.Count()) + l.Printf(" count: %f\n", metric.Snapshot().Count()) case Gauge: l.Printf("gauge %s\n", name) - l.Printf(" value: %9d\n", metric.Value()) + l.Printf(" value: %9d\n", metric.Snapshot().Value()) case GaugeFloat64: l.Printf("gauge %s\n", name) - l.Printf(" value: %f\n", metric.Value()) + l.Printf(" value: %f\n", metric.Snapshot().Value()) case GaugeInfo: l.Printf("gauge %s\n", name) - l.Printf(" value: %s\n", metric.Value()) + l.Printf(" value: %s\n", metric.Snapshot().Value()) case Healthcheck: metric.Check() l.Printf("healthcheck %s\n", name) diff --git a/metrics/meter.go b/metrics/meter.go index 8a89dc4275..22475ef6eb 100644 --- a/metrics/meter.go +++ b/metrics/meter.go @@ -1,21 +1,25 @@ package metrics import ( + "math" "sync" "sync/atomic" "time" ) -// Meters count events to produce exponentially-weighted moving average rates -// at one-, five-, and fifteen-minutes and a mean rate. -type Meter interface { +type MeterSnapshot interface { Count() int64 - Mark(int64) Rate1() float64 Rate5() float64 Rate15() float64 RateMean() float64 - Snapshot() Meter +} + +// Meters count events to produce exponentially-weighted moving average rates +// at one-, five-, and fifteen-minutes and a mean rate. +type Meter interface { + Mark(int64) + Snapshot() MeterSnapshot Stop() } @@ -30,17 +34,6 @@ func GetOrRegisterMeter(name string, r Registry) Meter { return r.GetOrRegister(name, NewMeter).(Meter) } -// GetOrRegisterMeterForced returns an existing Meter or constructs and registers a -// new StandardMeter no matter the global switch is enabled or not. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterMeterForced(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeterForced).(Meter) -} - // NewMeter constructs a new StandardMeter and launches a goroutine. // Be sure to call Stop() once the meter is of no use to allow for garbage collection. func NewMeter() Meter { @@ -68,115 +61,53 @@ func NewInactiveMeter() Meter { return m } -// NewMeterForced constructs a new StandardMeter and launches a goroutine no matter -// the global switch is enabled or not. -// Be sure to call Stop() once the meter is of no use to allow for garbage collection. -func NewMeterForced() Meter { - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters[m] = struct{}{} - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - // NewRegisteredMeter constructs and registers a new StandardMeter // and launches a goroutine. // Be sure to unregister the meter from the registry once it is of no use to // allow for garbage collection. func NewRegisteredMeter(name string, r Registry) Meter { - c := NewMeter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c + return GetOrRegisterMeter(name, r) } -// NewRegisteredMeterForced constructs and registers a new StandardMeter -// and launches a goroutine no matter the global switch is enabled or not. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredMeterForced(name string, r Registry) Meter { - c := NewMeterForced() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// MeterSnapshot is a read-only copy of another Meter. -type MeterSnapshot struct { - temp atomic.Int64 +// meterSnapshot is a read-only copy of the meter's internal values. +type meterSnapshot struct { count int64 rate1, rate5, rate15, rateMean float64 } // Count returns the count of events at the time the snapshot was taken. -func (m *MeterSnapshot) Count() int64 { return m.count } - -// Mark panics. -func (*MeterSnapshot) Mark(n int64) { - panic("Mark called on a MeterSnapshot") -} +func (m *meterSnapshot) Count() int64 { return m.count } // Rate1 returns the one-minute moving average rate of events per second at the // time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } +func (m *meterSnapshot) Rate1() float64 { return m.rate1 } // Rate5 returns the five-minute moving average rate of events per second at // the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } +func (m *meterSnapshot) Rate5() float64 { return m.rate5 } // Rate15 returns the fifteen-minute moving average rate of events per second // at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } +func (m *meterSnapshot) Rate15() float64 { return m.rate15 } // RateMean returns the meter's mean rate of events per second at the time the // snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } - -// Snapshot returns the snapshot. -func (m *MeterSnapshot) Snapshot() Meter { return m } - -// Stop is a no-op. -func (m *MeterSnapshot) Stop() {} +func (m *meterSnapshot) RateMean() float64 { return m.rateMean } // NilMeter is a no-op Meter. type NilMeter struct{} -// Count is a no-op. -func (NilMeter) Count() int64 { return 0 } - -// Mark is a no-op. -func (NilMeter) Mark(n int64) {} - -// Rate1 is a no-op. -func (NilMeter) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilMeter) Rate5() float64 { return 0.0 } - -// Rate15 is a no-op. -func (NilMeter) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilMeter) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilMeter) Snapshot() Meter { return NilMeter{} } - -// Stop is a no-op. -func (NilMeter) Stop() {} +func (NilMeter) Count() int64 { return 0 } +func (NilMeter) Mark(n int64) {} +func (NilMeter) Snapshot() MeterSnapshot { return (*emptySnapshot)(nil) } +func (NilMeter) Stop() {} // StandardMeter is the standard implementation of a Meter. type StandardMeter struct { - lock sync.RWMutex - snapshot *MeterSnapshot + count atomic.Int64 + uncounted atomic.Int64 // not yet added to the EWMAs + rateMean atomic.Uint64 + a1, a5, a15 EWMA startTime time.Time stopped atomic.Bool @@ -184,7 +115,6 @@ type StandardMeter struct { func newStandardMeter() *StandardMeter { return &StandardMeter{ - snapshot: &MeterSnapshot{}, a1: NewEWMA1(), a5: NewEWMA5(), a15: NewEWMA15(), @@ -194,97 +124,42 @@ func newStandardMeter() *StandardMeter { // Stop stops the meter, Mark() will be a no-op if you use it after being stopped. func (m *StandardMeter) Stop() { - stopped := m.stopped.Swap(true) - if !stopped { + if stopped := m.stopped.Swap(true); !stopped { arbiter.Lock() delete(arbiter.meters, m) arbiter.Unlock() } } -// Count returns the number of events recorded. -// It updates the meter to be as accurate as possible -func (m *StandardMeter) Count() int64 { - m.lock.Lock() - defer m.lock.Unlock() - m.updateMeter() - return m.snapshot.count -} - // Mark records the occurrence of n events. func (m *StandardMeter) Mark(n int64) { - m.snapshot.temp.Add(n) -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (m *StandardMeter) Rate1() float64 { - m.lock.RLock() - defer m.lock.RUnlock() - return m.snapshot.rate1 -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (m *StandardMeter) Rate5() float64 { - m.lock.RLock() - defer m.lock.RUnlock() - return m.snapshot.rate5 -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (m *StandardMeter) Rate15() float64 { - m.lock.RLock() - defer m.lock.RUnlock() - return m.snapshot.rate15 -} - -// RateMean returns the meter's mean rate of events per second. -func (m *StandardMeter) RateMean() float64 { - m.lock.RLock() - defer m.lock.RUnlock() - return m.snapshot.rateMean + m.uncounted.Add(n) } // Snapshot returns a read-only copy of the meter. -func (m *StandardMeter) Snapshot() Meter { - m.lock.RLock() - snapshot := MeterSnapshot{ - count: m.snapshot.count, - rate1: m.snapshot.rate1, - rate5: m.snapshot.rate5, - rate15: m.snapshot.rate15, - rateMean: m.snapshot.rateMean, +func (m *StandardMeter) Snapshot() MeterSnapshot { + return &meterSnapshot{ + count: m.count.Load() + m.uncounted.Load(), + rate1: m.a1.Snapshot().Rate(), + rate5: m.a5.Snapshot().Rate(), + rate15: m.a15.Snapshot().Rate(), + rateMean: math.Float64frombits(m.rateMean.Load()), } - snapshot.temp.Store(m.snapshot.temp.Load()) - m.lock.RUnlock() - return &snapshot -} - -func (m *StandardMeter) updateSnapshot() { - // should run with write lock held on m.lock - snapshot := m.snapshot - snapshot.rate1 = m.a1.Rate() - snapshot.rate5 = m.a5.Rate() - snapshot.rate15 = m.a15.Rate() - snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() } -func (m *StandardMeter) updateMeter() { - // should only run with write lock held on m.lock - n := m.snapshot.temp.Swap(0) - m.snapshot.count += n +func (m *StandardMeter) tick() { + // Take the uncounted values, add to count + n := m.uncounted.Swap(0) + count := m.count.Add(n) + m.rateMean.Store(math.Float64bits(float64(count) / time.Since(m.startTime).Seconds())) + // Update the EWMA's internal state m.a1.Update(n) m.a5.Update(n) m.a15.Update(n) -} - -func (m *StandardMeter) tick() { - m.lock.Lock() - defer m.lock.Unlock() - m.updateMeter() + // And trigger them to calculate the rates m.a1.Tick() m.a5.Tick() m.a15.Tick() - m.updateSnapshot() } // meterArbiter ticks meters every 5s from a single goroutine. diff --git a/metrics/meter_test.go b/metrics/meter_test.go index b3f6cb8c0c..019c4d765b 100644 --- a/metrics/meter_test.go +++ b/metrics/meter_test.go @@ -12,11 +12,17 @@ func BenchmarkMeter(b *testing.B) { m.Mark(1) } } - +func TestMeter(t *testing.T) { + m := NewMeter() + m.Mark(47) + if v := m.Snapshot().Count(); v != 47 { + t.Fatalf("have %d want %d", v, 47) + } +} func TestGetOrRegisterMeter(t *testing.T) { r := NewRegistry() NewRegisteredMeter("foo", r).Mark(47) - if m := GetOrRegisterMeter("foo", r); m.Count() != 47 { + if m := GetOrRegisterMeter("foo", r).Snapshot(); m.Count() != 47 { t.Fatal(m.Count()) } } @@ -31,10 +37,10 @@ func TestMeterDecay(t *testing.T) { ma.meters[m] = struct{}{} m.Mark(1) ma.tickMeters() - rateMean := m.RateMean() + rateMean := m.Snapshot().RateMean() time.Sleep(100 * time.Millisecond) ma.tickMeters() - if m.RateMean() >= rateMean { + if m.Snapshot().RateMean() >= rateMean { t.Error("m.RateMean() didn't decrease") } } @@ -42,7 +48,7 @@ func TestMeterDecay(t *testing.T) { func TestMeterNonzero(t *testing.T) { m := NewMeter() m.Mark(3) - if count := m.Count(); count != 3 { + if count := m.Snapshot().Count(); count != 3 { t.Errorf("m.Count(): 3 != %v\n", count) } } @@ -59,16 +65,8 @@ func TestMeterStop(t *testing.T) { } } -func TestMeterSnapshot(t *testing.T) { - m := NewMeter() - m.Mark(1) - if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() { - t.Fatal(snapshot) - } -} - func TestMeterZero(t *testing.T) { - m := NewMeter() + m := NewMeter().Snapshot() if count := m.Count(); count != 0 { t.Errorf("m.Count(): 0 != %v\n", count) } @@ -79,13 +77,13 @@ func TestMeterRepeat(t *testing.T) { for i := 0; i < 101; i++ { m.Mark(int64(i)) } - if count := m.Count(); count != 5050 { + if count := m.Snapshot().Count(); count != 5050 { t.Errorf("m.Count(): 5050 != %v\n", count) } for i := 0; i < 101; i++ { m.Mark(int64(i)) } - if count := m.Count(); count != 10100 { + if count := m.Snapshot().Count(); count != 10100 { t.Errorf("m.Count(): 10100 != %v\n", count) } } diff --git a/metrics/metrics.go b/metrics/metrics.go index c206f16924..97f03fa31d 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -85,6 +85,12 @@ var runtimeSamples = []metrics.Sample{ {Name: "/sched/latencies:seconds"}, // histogram } +func ReadRuntimeStats() *runtimeStats { + r := new(runtimeStats) + readRuntimeStats(r) + return r +} + func readRuntimeStats(v *runtimeStats) { metrics.Read(runtimeSamples) for _, s := range runtimeSamples { diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index 534c44139b..2861d5f2ca 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -98,8 +98,8 @@ func Example() { t.Time(func() { time.Sleep(10 * time.Millisecond) }) t.Update(1) - fmt.Println(c.Count()) - fmt.Println(t.Min()) + fmt.Println(c.Snapshot().Count()) + fmt.Println(t.Snapshot().Min()) // Output: 17 // 1 } diff --git a/metrics/opentsdb.go b/metrics/opentsdb.go index 4d2e209238..e81690f943 100644 --- a/metrics/opentsdb.go +++ b/metrics/opentsdb.go @@ -65,15 +65,15 @@ func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname str c.Registry.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname) case CounterFloat64: - fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname) case Gauge: - fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname) case GaugeFloat64: - fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname) case GaugeInfo: - fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Value().String(), shortHostname) + fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Snapshot().Value().String(), shortHostname) case Histogram: h := metric.Snapshot() ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) diff --git a/metrics/opentsdb_test.go b/metrics/opentsdb_test.go index c02b98af06..4548309f9c 100644 --- a/metrics/opentsdb_test.go +++ b/metrics/opentsdb_test.go @@ -1,6 +1,7 @@ package metrics import ( + "fmt" "net" "os" "strings" @@ -47,5 +48,19 @@ func TestExampleOpenTSB(t *testing.T) { } if have, want := w.String(), string(wantB); have != want { t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want) + t.Logf("have vs want:\n%v", findFirstDiffPos(have, want)) } } + +func findFirstDiffPos(a, b string) string { + yy := strings.Split(b, "\n") + for i, x := range strings.Split(a, "\n") { + if i >= len(yy) { + return fmt.Sprintf("have:%d: %s\nwant:%d: ", i, x, i) + } + if y := yy[i]; x != y { + return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y) + } + } + return "" +} diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go index 8624311c4b..25b258d56a 100644 --- a/metrics/prometheus/collector.go +++ b/metrics/prometheus/collector.go @@ -75,27 +75,27 @@ func (c *collector) Add(name string, i any) error { return nil } -func (c *collector) addCounter(name string, m metrics.Counter) { +func (c *collector) addCounter(name string, m metrics.CounterSnapshot) { c.writeGaugeCounter(name, m.Count()) } -func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64) { +func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64Snapshot) { c.writeGaugeCounter(name, m.Count()) } -func (c *collector) addGauge(name string, m metrics.Gauge) { +func (c *collector) addGauge(name string, m metrics.GaugeSnapshot) { c.writeGaugeCounter(name, m.Value()) } -func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64) { +func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64Snapshot) { c.writeGaugeCounter(name, m.Value()) } -func (c *collector) addGaugeInfo(name string, m metrics.GaugeInfo) { +func (c *collector) addGaugeInfo(name string, m metrics.GaugeInfoSnapshot) { c.writeGaugeInfo(name, m.Value()) } -func (c *collector) addHistogram(name string, m metrics.Histogram) { +func (c *collector) addHistogram(name string, m metrics.HistogramSnapshot) { pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} ps := m.Percentiles(pv) c.writeSummaryCounter(name, m.Count()) @@ -106,11 +106,11 @@ func (c *collector) addHistogram(name string, m metrics.Histogram) { c.buff.WriteRune('\n') } -func (c *collector) addMeter(name string, m metrics.Meter) { +func (c *collector) addMeter(name string, m metrics.MeterSnapshot) { c.writeGaugeCounter(name, m.Count()) } -func (c *collector) addTimer(name string, m metrics.Timer) { +func (c *collector) addTimer(name string, m metrics.TimerSnapshot) { pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} ps := m.Percentiles(pv) c.writeSummaryCounter(name, m.Count()) @@ -121,13 +121,12 @@ func (c *collector) addTimer(name string, m metrics.Timer) { c.buff.WriteRune('\n') } -func (c *collector) addResettingTimer(name string, m metrics.ResettingTimer) { - if len(m.Values()) <= 0 { +func (c *collector) addResettingTimer(name string, m metrics.ResettingTimerSnapshot) { + if m.Count() <= 0 { return } - ps := m.Percentiles([]float64{50, 95, 99}) - val := m.Values() - c.writeSummaryCounter(name, len(val)) + ps := m.Percentiles([]float64{0.50, 0.95, 0.99}) + c.writeSummaryCounter(name, m.Count()) c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name))) c.writeSummaryPercentile(name, "0.50", ps[0]) c.writeSummaryPercentile(name, "0.95", ps[1]) diff --git a/metrics/prometheus/collector_test.go b/metrics/prometheus/collector_test.go index 3d7903d4ad..ea17aac458 100644 --- a/metrics/prometheus/collector_test.go +++ b/metrics/prometheus/collector_test.go @@ -55,10 +55,10 @@ func findFirstDiffPos(a, b string) string { yy := strings.Split(b, "\n") for i, x := range strings.Split(a, "\n") { if i >= len(yy) { - return fmt.Sprintf("a:%d: %s\nb:%d: ", i, x, i) + return fmt.Sprintf("have:%d: %s\nwant:%d: ", i, x, i) } if y := yy[i]; x != y { - return fmt.Sprintf("a:%d: %s\nb:%d: %s", i, x, i, y) + return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y) } } return "" diff --git a/metrics/prometheus/testdata/prometheus.want b/metrics/prometheus/testdata/prometheus.want index f35496e61d..861c5f5cf0 100644 --- a/metrics/prometheus/testdata/prometheus.want +++ b/metrics/prometheus/testdata/prometheus.want @@ -1,3 +1,25 @@ +# TYPE system_cpu_schedlatency_count counter +system_cpu_schedlatency_count 5645 + +# TYPE system_cpu_schedlatency summary +system_cpu_schedlatency {quantile="0.5"} 0 +system_cpu_schedlatency {quantile="0.75"} 7168 +system_cpu_schedlatency {quantile="0.95"} 1.6777216e+07 +system_cpu_schedlatency {quantile="0.99"} 2.9360128e+07 +system_cpu_schedlatency {quantile="0.999"} 3.3554432e+07 +system_cpu_schedlatency {quantile="0.9999"} 3.3554432e+07 + +# TYPE system_memory_pauses_count counter +system_memory_pauses_count 14 + +# TYPE system_memory_pauses summary +system_memory_pauses {quantile="0.5"} 32768 +system_memory_pauses {quantile="0.75"} 57344 +system_memory_pauses {quantile="0.95"} 196608 +system_memory_pauses {quantile="0.99"} 196608 +system_memory_pauses {quantile="0.999"} 196608 +system_memory_pauses {quantile="0.9999"} 196608 + # TYPE test_counter gauge test_counter 12345 @@ -31,9 +53,9 @@ test_meter 0 test_resetting_timer_count 6 # TYPE test_resetting_timer summary -test_resetting_timer {quantile="0.50"} 12000000 -test_resetting_timer {quantile="0.95"} 120000000 -test_resetting_timer {quantile="0.99"} 120000000 +test_resetting_timer {quantile="0.50"} 1.25e+07 +test_resetting_timer {quantile="0.95"} 1.2e+08 +test_resetting_timer {quantile="0.99"} 1.2e+08 # TYPE test_timer_count counter test_timer_count 6 diff --git a/metrics/registry.go b/metrics/registry.go index 66dbc890c0..8bfbc08042 100644 --- a/metrics/registry.go +++ b/metrics/registry.go @@ -150,13 +150,13 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { values := make(map[string]interface{}) switch metric := i.(type) { case Counter: - values["count"] = metric.Count() + values["count"] = metric.Snapshot().Count() case CounterFloat64: - values["count"] = metric.Count() + values["count"] = metric.Snapshot().Count() case Gauge: - values["value"] = metric.Value() + values["value"] = metric.Snapshot().Value() case GaugeFloat64: - values["value"] = metric.Value() + values["value"] = metric.Snapshot().Value() case Healthcheck: values["error"] = nil metric.Check() diff --git a/metrics/registry_test.go b/metrics/registry_test.go index 7cc5cf14fe..75012dd4ac 100644 --- a/metrics/registry_test.go +++ b/metrics/registry_test.go @@ -85,11 +85,11 @@ func TestRegistryDuplicate(t *testing.T) { func TestRegistryGet(t *testing.T) { r := NewRegistry() r.Register("foo", NewCounter()) - if count := r.Get("foo").(Counter).Count(); count != 0 { + if count := r.Get("foo").(Counter).Snapshot().Count(); count != 0 { t.Fatal(count) } r.Get("foo").(Counter).Inc(1) - if count := r.Get("foo").(Counter).Count(); count != 1 { + if count := r.Get("foo").(Counter).Snapshot().Count(); count != 1 { t.Fatal(count) } } diff --git a/metrics/resetting_sample.go b/metrics/resetting_sample.go index 43c1129cd0..c38ffcd3ec 100644 --- a/metrics/resetting_sample.go +++ b/metrics/resetting_sample.go @@ -17,7 +17,7 @@ type resettingSample struct { } // Snapshot returns a read-only copy of the sample with the original reset. -func (rs *resettingSample) Snapshot() Sample { +func (rs *resettingSample) Snapshot() SampleSnapshot { s := rs.Sample.Snapshot() rs.Sample.Clear() return s diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go index 8e23c8eeea..6802e3fcea 100644 --- a/metrics/resetting_timer.go +++ b/metrics/resetting_timer.go @@ -1,22 +1,24 @@ package metrics import ( - "math" "sync" "time" - - "golang.org/x/exp/slices" ) // Initial slice capacity for the values stored in a ResettingTimer const InitialResettingTimerSliceCap = 10 +type ResettingTimerSnapshot interface { + Count() int + Mean() float64 + Max() int64 + Min() int64 + Percentiles([]float64) []float64 +} + // ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval. type ResettingTimer interface { - Values() []int64 - Snapshot() ResettingTimer - Percentiles([]float64) []int64 - Mean() float64 + Snapshot() ResettingTimerSnapshot Time(func()) Update(time.Duration) UpdateSince(time.Time) @@ -52,70 +54,40 @@ func NewResettingTimer() ResettingTimer { } // NilResettingTimer is a no-op ResettingTimer. -type NilResettingTimer struct { -} - -// Values is a no-op. -func (NilResettingTimer) Values() []int64 { return nil } - -// Snapshot is a no-op. -func (NilResettingTimer) Snapshot() ResettingTimer { - return &ResettingTimerSnapshot{ - values: []int64{}, - } -} - -// Time is a no-op. -func (NilResettingTimer) Time(f func()) { f() } - -// Update is a no-op. -func (NilResettingTimer) Update(time.Duration) {} - -// Percentiles panics. -func (NilResettingTimer) Percentiles([]float64) []int64 { - panic("Percentiles called on a NilResettingTimer") -} - -// Mean panics. -func (NilResettingTimer) Mean() float64 { - panic("Mean called on a NilResettingTimer") -} - -// UpdateSince is a no-op. -func (NilResettingTimer) UpdateSince(time.Time) {} +type NilResettingTimer struct{} + +func (NilResettingTimer) Values() []int64 { return nil } +func (n NilResettingTimer) Snapshot() ResettingTimerSnapshot { return n } +func (NilResettingTimer) Time(f func()) { f() } +func (NilResettingTimer) Update(time.Duration) {} +func (NilResettingTimer) Percentiles([]float64) []float64 { return nil } +func (NilResettingTimer) Mean() float64 { return 0.0 } +func (NilResettingTimer) Max() int64 { return 0 } +func (NilResettingTimer) Min() int64 { return 0 } +func (NilResettingTimer) UpdateSince(time.Time) {} +func (NilResettingTimer) Count() int { return 0 } // StandardResettingTimer is the standard implementation of a ResettingTimer. // and Meter. type StandardResettingTimer struct { values []int64 - mutex sync.Mutex -} + sum int64 // sum is a running count of the total sum, used later to calculate mean -// Values returns a slice with all measurements. -func (t *StandardResettingTimer) Values() []int64 { - return t.values + mutex sync.Mutex } // Snapshot resets the timer and returns a read-only copy of its contents. -func (t *StandardResettingTimer) Snapshot() ResettingTimer { +func (t *StandardResettingTimer) Snapshot() ResettingTimerSnapshot { t.mutex.Lock() defer t.mutex.Unlock() - currentValues := t.values - t.values = make([]int64, 0, InitialResettingTimerSliceCap) - - return &ResettingTimerSnapshot{ - values: currentValues, + snapshot := &resettingTimerSnapshot{} + if len(t.values) > 0 { + snapshot.mean = float64(t.sum) / float64(len(t.values)) + snapshot.values = t.values + t.values = make([]int64, 0, InitialResettingTimerSliceCap) } -} - -// Percentiles panics. -func (t *StandardResettingTimer) Percentiles([]float64) []int64 { - panic("Percentiles called on a StandardResettingTimer") -} - -// Mean panics. -func (t *StandardResettingTimer) Mean() float64 { - panic("Mean called on a StandardResettingTimer") + t.sum = 0 + return snapshot } // Record the duration of the execution of the given function. @@ -130,106 +102,70 @@ func (t *StandardResettingTimer) Update(d time.Duration) { t.mutex.Lock() defer t.mutex.Unlock() t.values = append(t.values, int64(d)) + t.sum += int64(d) } // Record the duration of an event that started at a time and ends now. func (t *StandardResettingTimer) UpdateSince(ts time.Time) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.values = append(t.values, int64(time.Since(ts))) + t.Update(time.Since(ts)) } -// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer. -type ResettingTimerSnapshot struct { +// resettingTimerSnapshot is a point-in-time copy of another ResettingTimer. +type resettingTimerSnapshot struct { values []int64 mean float64 - thresholdBoundaries []int64 + max int64 + min int64 + thresholdBoundaries []float64 calculated bool } -// Snapshot returns the snapshot. -func (t *ResettingTimerSnapshot) Snapshot() ResettingTimer { return t } - -// Time panics. -func (*ResettingTimerSnapshot) Time(func()) { - panic("Time called on a ResettingTimerSnapshot") -} - -// Update panics. -func (*ResettingTimerSnapshot) Update(time.Duration) { - panic("Update called on a ResettingTimerSnapshot") -} - -// UpdateSince panics. -func (*ResettingTimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a ResettingTimerSnapshot") -} - -// Values returns all values from snapshot. -func (t *ResettingTimerSnapshot) Values() []int64 { - return t.values +// Count return the length of the values from snapshot. +func (t *resettingTimerSnapshot) Count() int { + return len(t.values) } // Percentiles returns the boundaries for the input percentiles. -func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []int64 { +// note: this method is not thread safe +func (t *resettingTimerSnapshot) Percentiles(percentiles []float64) []float64 { t.calc(percentiles) - return t.thresholdBoundaries } // Mean returns the mean of the snapshotted values -func (t *ResettingTimerSnapshot) Mean() float64 { +// note: this method is not thread safe +func (t *resettingTimerSnapshot) Mean() float64 { if !t.calculated { - t.calc([]float64{}) + t.calc(nil) } return t.mean } -func (t *ResettingTimerSnapshot) calc(percentiles []float64) { - slices.Sort(t.values) - - count := len(t.values) - if count > 0 { - min := t.values[0] - max := t.values[count-1] - - cumulativeValues := make([]int64, count) - cumulativeValues[0] = min - for i := 1; i < count; i++ { - cumulativeValues[i] = t.values[i] + cumulativeValues[i-1] - } - - t.thresholdBoundaries = make([]int64, len(percentiles)) - - thresholdBoundary := max - - for i, pct := range percentiles { - if count > 1 { - var abs float64 - if pct >= 0 { - abs = pct - } else { - abs = 100 + pct - } - // poor man's math.Round(x): - // math.Floor(x + 0.5) - indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5)) - if pct >= 0 && indexOfPerc > 0 { - indexOfPerc -= 1 // index offset=0 - } - thresholdBoundary = t.values[indexOfPerc] - } - - t.thresholdBoundaries[i] = thresholdBoundary - } - - sum := cumulativeValues[count-1] - t.mean = float64(sum) / float64(count) - } else { - t.thresholdBoundaries = make([]int64, len(percentiles)) - t.mean = 0 +// Max returns the max of the snapshotted values +// note: this method is not thread safe +func (t *resettingTimerSnapshot) Max() int64 { + if !t.calculated { + t.calc(nil) + } + return t.max +} + +// Min returns the min of the snapshotted values +// note: this method is not thread safe +func (t *resettingTimerSnapshot) Min() int64 { + if !t.calculated { + t.calc(nil) } + return t.min +} - t.calculated = true +func (t *resettingTimerSnapshot) calc(percentiles []float64) { + scores := CalculatePercentiles(t.values, percentiles) + t.thresholdBoundaries = scores + if len(t.values) == 0 { + return + } + t.min = t.values[0] + t.max = t.values[len(t.values)-1] } diff --git a/metrics/resetting_timer_test.go b/metrics/resetting_timer_test.go index 77c49dc386..4571fc8eb0 100644 --- a/metrics/resetting_timer_test.go +++ b/metrics/resetting_timer_test.go @@ -10,9 +10,9 @@ func TestResettingTimer(t *testing.T) { values []int64 start int end int - wantP50 int64 - wantP95 int64 - wantP99 int64 + wantP50 float64 + wantP95 float64 + wantP99 float64 wantMean float64 wantMin int64 wantMax int64 @@ -21,14 +21,14 @@ func TestResettingTimer(t *testing.T) { values: []int64{}, start: 1, end: 11, - wantP50: 5, wantP95: 10, wantP99: 10, + wantP50: 5.5, wantP95: 10, wantP99: 10, wantMin: 1, wantMax: 10, wantMean: 5.5, }, { values: []int64{}, start: 1, end: 101, - wantP50: 50, wantP95: 95, wantP99: 99, + wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99, wantMin: 1, wantMax: 100, wantMean: 50.5, }, { @@ -56,11 +56,11 @@ func TestResettingTimer(t *testing.T) { values: []int64{1, 10}, start: 0, end: 0, - wantP50: 1, wantP95: 10, wantP99: 10, + wantP50: 5.5, wantP95: 10, wantP99: 10, wantMin: 1, wantMax: 10, wantMean: 5.5, }, } - for ind, tt := range tests { + for i, tt := range tests { timer := NewResettingTimer() for i := tt.start; i < tt.end; i++ { @@ -70,37 +70,27 @@ func TestResettingTimer(t *testing.T) { for _, v := range tt.values { timer.Update(time.Duration(v)) } - snap := timer.Snapshot() - ps := snap.Percentiles([]float64{50, 95, 99}) - - val := snap.Values() + ps := snap.Percentiles([]float64{0.50, 0.95, 0.99}) - if len(val) > 0 { - if tt.wantMin != val[0] { - t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin) - } - - if tt.wantMax != val[len(val)-1] { - t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax) - } + if have, want := snap.Min(), tt.wantMin; have != want { + t.Fatalf("%d: min: have %d, want %d", i, have, want) } - - if tt.wantMean != snap.Mean() { - t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean) + if have, want := snap.Max(), tt.wantMax; have != want { + t.Fatalf("%d: max: have %d, want %d", i, have, want) } - - if tt.wantP50 != ps[0] { - t.Fatalf("%d: p50: got %d, want %d", ind, ps[0], tt.wantP50) + if have, want := snap.Mean(), tt.wantMean; have != want { + t.Fatalf("%d: mean: have %v, want %v", i, have, want) } - - if tt.wantP95 != ps[1] { - t.Fatalf("%d: p95: got %d, want %d", ind, ps[1], tt.wantP95) + if have, want := ps[0], tt.wantP50; have != want { + t.Errorf("%d: p50: have %v, want %v", i, have, want) } - - if tt.wantP99 != ps[2] { - t.Fatalf("%d: p99: got %d, want %d", ind, ps[2], tt.wantP99) + if have, want := ps[1], tt.wantP95; have != want { + t.Errorf("%d: p95: have %v, want %v", i, have, want) + } + if have, want := ps[2], tt.wantP99; have != want { + t.Errorf("%d: p99: have %v, want %v", i, have, want) } } } @@ -110,11 +100,11 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) { values []int64 start int end int - wantP05 int64 - wantP20 int64 - wantP50 int64 - wantP95 int64 - wantP99 int64 + wantP05 float64 + wantP20 float64 + wantP50 float64 + wantP95 float64 + wantP99 float64 wantMean float64 wantMin int64 wantMax int64 @@ -123,14 +113,14 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) { values: []int64{}, start: 1, end: 11, - wantP05: 1, wantP20: 2, wantP50: 5, wantP95: 10, wantP99: 10, + wantP05: 1, wantP20: 2.2, wantP50: 5.5, wantP95: 10, wantP99: 10, wantMin: 1, wantMax: 10, wantMean: 5.5, }, { values: []int64{}, start: 1, end: 101, - wantP05: 5, wantP20: 20, wantP50: 50, wantP95: 95, wantP99: 99, + wantP05: 5.050000000000001, wantP20: 20.200000000000003, wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99, wantMin: 1, wantMax: 100, wantMean: 50.5, }, { @@ -158,7 +148,7 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) { values: []int64{1, 10}, start: 0, end: 0, - wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 10, wantP99: 10, + wantP05: 1, wantP20: 1, wantP50: 5.5, wantP95: 10, wantP99: 10, wantMin: 1, wantMax: 10, wantMean: 5.5, }, } @@ -175,42 +165,33 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) { snap := timer.Snapshot() - ps := snap.Percentiles([]float64{5, 20, 50, 95, 99}) - - val := snap.Values() + ps := snap.Percentiles([]float64{0.05, 0.20, 0.50, 0.95, 0.99}) - if len(val) > 0 { - if tt.wantMin != val[0] { - t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin) - } + if tt.wantMin != snap.Min() { + t.Errorf("%d: min: got %d, want %d", ind, snap.Min(), tt.wantMin) + } - if tt.wantMax != val[len(val)-1] { - t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax) - } + if tt.wantMax != snap.Max() { + t.Errorf("%d: max: got %d, want %d", ind, snap.Max(), tt.wantMax) } if tt.wantMean != snap.Mean() { - t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean) + t.Errorf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean) } - if tt.wantP05 != ps[0] { - t.Fatalf("%d: p05: got %d, want %d", ind, ps[0], tt.wantP05) + t.Errorf("%d: p05: got %v, want %v", ind, ps[0], tt.wantP05) } - if tt.wantP20 != ps[1] { - t.Fatalf("%d: p20: got %d, want %d", ind, ps[1], tt.wantP20) + t.Errorf("%d: p20: got %v, want %v", ind, ps[1], tt.wantP20) } - if tt.wantP50 != ps[2] { - t.Fatalf("%d: p50: got %d, want %d", ind, ps[2], tt.wantP50) + t.Errorf("%d: p50: got %v, want %v", ind, ps[2], tt.wantP50) } - if tt.wantP95 != ps[3] { - t.Fatalf("%d: p95: got %d, want %d", ind, ps[3], tt.wantP95) + t.Errorf("%d: p95: got %v, want %v", ind, ps[3], tt.wantP95) } - if tt.wantP99 != ps[4] { - t.Fatalf("%d: p99: got %d, want %d", ind, ps[4], tt.wantP99) + t.Errorf("%d: p99: got %v, want %v", ind, ps[4], tt.wantP99) } } } diff --git a/metrics/runtimehistogram.go b/metrics/runtimehistogram.go index c68939af1e..92fcbcc281 100644 --- a/metrics/runtimehistogram.go +++ b/metrics/runtimehistogram.go @@ -17,13 +17,19 @@ func getOrRegisterRuntimeHistogram(name string, scale float64, r Registry) *runt // runtimeHistogram wraps a runtime/metrics histogram. type runtimeHistogram struct { - v atomic.Value + v atomic.Value // v is a pointer to a metrics.Float64Histogram scaleFactor float64 } func newRuntimeHistogram(scale float64) *runtimeHistogram { h := &runtimeHistogram{scaleFactor: scale} - h.update(&metrics.Float64Histogram{}) + h.update(new(metrics.Float64Histogram)) + return h +} + +func RuntimeHistogramFromData(scale float64, hist *metrics.Float64Histogram) *runtimeHistogram { + h := &runtimeHistogram{scaleFactor: scale} + h.update(hist) return h } @@ -35,130 +41,107 @@ func (h *runtimeHistogram) update(mh *metrics.Float64Histogram) { return } - s := runtimeHistogramSnapshot{ + s := metrics.Float64Histogram{ Counts: make([]uint64, len(mh.Counts)), Buckets: make([]float64, len(mh.Buckets)), } copy(s.Counts, mh.Counts) - copy(s.Buckets, mh.Buckets) - for i, b := range s.Buckets { + for i, b := range mh.Buckets { s.Buckets[i] = b * h.scaleFactor } h.v.Store(&s) } -func (h *runtimeHistogram) load() *runtimeHistogramSnapshot { - return h.v.Load().(*runtimeHistogramSnapshot) -} - func (h *runtimeHistogram) Clear() { panic("runtimeHistogram does not support Clear") } func (h *runtimeHistogram) Update(int64) { panic("runtimeHistogram does not support Update") } -func (h *runtimeHistogram) Sample() Sample { - return NilSample{} -} - -// Snapshot returns a non-changing cop of the histogram. -func (h *runtimeHistogram) Snapshot() Histogram { - return h.load() -} - -// Count returns the sample count. -func (h *runtimeHistogram) Count() int64 { - return h.load().Count() -} - -// Mean returns an approximation of the mean. -func (h *runtimeHistogram) Mean() float64 { - return h.load().Mean() -} - -// StdDev approximates the standard deviation of the histogram. -func (h *runtimeHistogram) StdDev() float64 { - return h.load().StdDev() -} - -// Variance approximates the variance of the histogram. -func (h *runtimeHistogram) Variance() float64 { - return h.load().Variance() -} - -// Percentile computes the p'th percentile value. -func (h *runtimeHistogram) Percentile(p float64) float64 { - return h.load().Percentile(p) -} -// Percentiles computes all requested percentile values. -func (h *runtimeHistogram) Percentiles(ps []float64) []float64 { - return h.load().Percentiles(ps) -} - -// Max returns the highest sample value. -func (h *runtimeHistogram) Max() int64 { - return h.load().Max() +// Snapshot returns a non-changing copy of the histogram. +func (h *runtimeHistogram) Snapshot() HistogramSnapshot { + hist := h.v.Load().(*metrics.Float64Histogram) + return newRuntimeHistogramSnapshot(hist) } -// Min returns the lowest sample value. -func (h *runtimeHistogram) Min() int64 { - return h.load().Min() -} - -// Sum returns the sum of all sample values. -func (h *runtimeHistogram) Sum() int64 { - return h.load().Sum() +type runtimeHistogramSnapshot struct { + internal *metrics.Float64Histogram + calculated bool + // The following fields are (lazily) calculated based on 'internal' + mean float64 + count int64 + min int64 // min is the lowest sample value. + max int64 // max is the highest sample value. + variance float64 } -type runtimeHistogramSnapshot metrics.Float64Histogram - -func (h *runtimeHistogramSnapshot) Clear() { - panic("runtimeHistogram does not support Clear") -} -func (h *runtimeHistogramSnapshot) Update(int64) { - panic("runtimeHistogram does not support Update") -} -func (h *runtimeHistogramSnapshot) Sample() Sample { - return NilSample{} +func newRuntimeHistogramSnapshot(h *metrics.Float64Histogram) *runtimeHistogramSnapshot { + return &runtimeHistogramSnapshot{ + internal: h, + } } -func (h *runtimeHistogramSnapshot) Snapshot() Histogram { - return h +// calc calculates the values for the snapshot. This method is not threadsafe. +func (h *runtimeHistogramSnapshot) calc() { + h.calculated = true + var ( + count int64 // number of samples + sum float64 // approx sum of all sample values + min int64 + max float64 + ) + if len(h.internal.Counts) == 0 { + return + } + for i, c := range h.internal.Counts { + if c == 0 { + continue + } + if count == 0 { // Set min only first loop iteration + min = int64(math.Floor(h.internal.Buckets[i])) + } + count += int64(c) + sum += h.midpoint(i) * float64(c) + // Set max on every iteration + edge := h.internal.Buckets[i+1] + if math.IsInf(edge, 1) { + edge = h.internal.Buckets[i] + } + if edge > max { + max = edge + } + } + h.min = min + h.max = int64(max) + h.mean = sum / float64(count) + h.count = count } // Count returns the sample count. func (h *runtimeHistogramSnapshot) Count() int64 { - var count int64 - for _, c := range h.Counts { - count += int64(c) + if !h.calculated { + h.calc() } - return count + return h.count } -// Mean returns an approximation of the mean. -func (h *runtimeHistogramSnapshot) Mean() float64 { - if len(h.Counts) == 0 { - return 0 - } - mean, _ := h.mean() - return mean +// Size returns the size of the sample at the time the snapshot was taken. +func (h *runtimeHistogramSnapshot) Size() int { + return len(h.internal.Counts) } -// mean computes the mean and also the total sample count. -func (h *runtimeHistogramSnapshot) mean() (mean, totalCount float64) { - var sum float64 - for i, c := range h.Counts { - midpoint := h.midpoint(i) - sum += midpoint * float64(c) - totalCount += float64(c) +// Mean returns an approximation of the mean. +func (h *runtimeHistogramSnapshot) Mean() float64 { + if !h.calculated { + h.calc() } - return sum / totalCount, totalCount + return h.mean } func (h *runtimeHistogramSnapshot) midpoint(bucket int) float64 { - high := h.Buckets[bucket+1] - low := h.Buckets[bucket] + high := h.internal.Buckets[bucket+1] + low := h.internal.Buckets[bucket] if math.IsInf(high, 1) { // The edge of the highest bucket can be +Inf, and it's supposed to mean that this // bucket contains all remaining samples > low. We can't get the middle of an @@ -180,23 +163,31 @@ func (h *runtimeHistogramSnapshot) StdDev() float64 { // Variance approximates the variance of the histogram. func (h *runtimeHistogramSnapshot) Variance() float64 { - if len(h.Counts) == 0 { + if len(h.internal.Counts) == 0 { return 0 } - - mean, totalCount := h.mean() - if totalCount <= 1 { + if !h.calculated { + h.calc() + } + if h.count <= 1 { // There is no variance when there are zero or one items. return 0 } - + // Variance is not calculated in 'calc', because it requires a second iteration. + // Therefore we calculate it lazily in this method, triggered either by + // a direct call to Variance or via StdDev. + if h.variance != 0.0 { + return h.variance + } var sum float64 - for i, c := range h.Counts { + + for i, c := range h.internal.Counts { midpoint := h.midpoint(i) - d := midpoint - mean + d := midpoint - h.mean sum += float64(c) * (d * d) } - return sum / (totalCount - 1) + h.variance = sum / float64(h.count-1) + return h.variance } // Percentile computes the p'th percentile value. @@ -231,11 +222,11 @@ func (h *runtimeHistogramSnapshot) Percentiles(ps []float64) []float64 { func (h *runtimeHistogramSnapshot) computePercentiles(thresh []float64) { var totalCount float64 - for i, count := range h.Counts { + for i, count := range h.internal.Counts { totalCount += float64(count) for len(thresh) > 0 && thresh[0] < totalCount { - thresh[0] = h.Buckets[i] + thresh[0] = h.internal.Buckets[i] thresh = thresh[1:] } if len(thresh) == 0 { @@ -250,34 +241,25 @@ func (h *runtimeHistogramSnapshot) computePercentiles(thresh []float64) { // Max returns the highest sample value. func (h *runtimeHistogramSnapshot) Max() int64 { - for i := len(h.Counts) - 1; i >= 0; i-- { - count := h.Counts[i] - if count > 0 { - edge := h.Buckets[i+1] - if math.IsInf(edge, 1) { - edge = h.Buckets[i] - } - return int64(math.Ceil(edge)) - } + if !h.calculated { + h.calc() } - return 0 + return h.max } // Min returns the lowest sample value. func (h *runtimeHistogramSnapshot) Min() int64 { - for i, count := range h.Counts { - if count > 0 { - return int64(math.Floor(h.Buckets[i])) - } + if !h.calculated { + h.calc() } - return 0 + return h.min } // Sum returns the sum of all sample values. func (h *runtimeHistogramSnapshot) Sum() int64 { var sum float64 - for i := range h.Counts { - sum += h.Buckets[i] * float64(h.Counts[i]) + for i := range h.internal.Counts { + sum += h.internal.Buckets[i] * float64(h.internal.Counts[i]) } return int64(math.Ceil(sum)) } diff --git a/metrics/runtimehistogram_test.go b/metrics/runtimehistogram_test.go index d53a014383..cf7e36420a 100644 --- a/metrics/runtimehistogram_test.go +++ b/metrics/runtimehistogram_test.go @@ -1,11 +1,14 @@ package metrics import ( + "bytes" + "encoding/gob" "fmt" "math" "reflect" "runtime/metrics" "testing" + "time" ) var _ Histogram = (*runtimeHistogram)(nil) @@ -74,7 +77,7 @@ func TestRuntimeHistogramStats(t *testing.T) { for i, test := range tests { t.Run(fmt.Sprint(i), func(t *testing.T) { - s := runtimeHistogramSnapshot(test.h) + s := RuntimeHistogramFromData(1.0, &test.h).Snapshot() if v := s.Count(); v != test.Count { t.Errorf("Count() = %v, want %v", v, test.Count) @@ -121,13 +124,39 @@ func approxEqual(x, y, ε float64) bool { // This test verifies that requesting Percentiles in unsorted order // returns them in the requested order. func TestRuntimeHistogramStatsPercentileOrder(t *testing.T) { - p := runtimeHistogramSnapshot{ + s := RuntimeHistogramFromData(1.0, &metrics.Float64Histogram{ Counts: []uint64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, Buckets: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - } - result := p.Percentiles([]float64{1, 0.2, 0.5, 0.1, 0.2}) + }).Snapshot() + result := s.Percentiles([]float64{1, 0.2, 0.5, 0.1, 0.2}) expected := []float64{10, 2, 5, 1, 2} if !reflect.DeepEqual(result, expected) { t.Fatal("wrong result:", result) } } + +func BenchmarkRuntimeHistogramSnapshotRead(b *testing.B) { + var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00" + + dserialize := func(data string) *metrics.Float64Histogram { + var res metrics.Float64Histogram + if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil { + panic(err) + } + return &res + } + latency := RuntimeHistogramFromData(float64(time.Second), dserialize(sLatency)) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + snap := latency.Snapshot() + // These are the fields that influxdb accesses + _ = snap.Count() + _ = snap.Max() + _ = snap.Mean() + _ = snap.Min() + _ = snap.StdDev() + _ = snap.Variance() + _ = snap.Percentiles([]float64{0.25, 0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) + } +} diff --git a/metrics/sample.go b/metrics/sample.go index 252a878f58..5398dd42d5 100644 --- a/metrics/sample.go +++ b/metrics/sample.go @@ -11,10 +11,7 @@ import ( const rescaleThreshold = time.Hour -// Samples maintain a statistically-significant selection of values from -// a stream. -type Sample interface { - Clear() +type SampleSnapshot interface { Count() int64 Max() int64 Mean() float64 @@ -22,14 +19,19 @@ type Sample interface { Percentile(float64) float64 Percentiles([]float64) []float64 Size() int - Snapshot() Sample StdDev() float64 Sum() int64 - Update(int64) - Values() []int64 Variance() float64 } +// Samples maintain a statistically-significant selection of values from +// a stream. +type Sample interface { + Snapshot() SampleSnapshot + Clear() + Update(int64) +} + // ExpDecaySample is an exponentially-decaying sample using a forward-decaying // priority reservoir. See Cormode et al's "Forward Decay: A Practical Time // Decay Model for Streaming Systems". @@ -77,72 +79,29 @@ func (s *ExpDecaySample) Clear() { s.values.Clear() } -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *ExpDecaySample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *ExpDecaySample) Max() int64 { - return SampleMax(s.Values()) -} - -// Mean returns the mean of the values in the sample. -func (s *ExpDecaySample) Mean() float64 { - return SampleMean(s.Values()) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *ExpDecaySample) Min() int64 { - return SampleMin(s.Values()) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *ExpDecaySample) Percentile(p float64) float64 { - return SamplePercentile(s.Values(), p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.Values(), ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *ExpDecaySample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.values.Size() -} - // Snapshot returns a read-only copy of the sample. -func (s *ExpDecaySample) Snapshot() Sample { +func (s *ExpDecaySample) Snapshot() SampleSnapshot { s.mutex.Lock() defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return &SampleSnapshot{ - count: s.count, - values: values, + var ( + samples = s.values.Values() + values = make([]int64, len(samples)) + max int64 = math.MinInt64 + min int64 = math.MaxInt64 + sum int64 + ) + for i, item := range samples { + v := item.v + values[i] = v + sum += v + if v > max { + max = v + } + if v < min { + min = v + } } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *ExpDecaySample) StdDev() float64 { - return SampleStdDev(s.Values()) -} - -// Sum returns the sum of the values in the sample. -func (s *ExpDecaySample) Sum() int64 { - return SampleSum(s.Values()) + return newSampleSnapshotPrecalculated(s.count, values, min, max, sum) } // Update samples a new value. @@ -150,23 +109,6 @@ func (s *ExpDecaySample) Update(v int64) { s.update(time.Now(), v) } -// Values returns a copy of the values in the sample. -func (s *ExpDecaySample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return values -} - -// Variance returns the variance of the values in the sample. -func (s *ExpDecaySample) Variance() float64 { - return SampleVariance(s.Values()) -} - // update samples a new value at a particular timestamp. This is a method all // its own to facilitate testing. func (s *ExpDecaySample) update(t time.Time, v int64) { @@ -202,207 +144,160 @@ func (s *ExpDecaySample) update(t time.Time, v int64) { // NilSample is a no-op Sample. type NilSample struct{} -// Clear is a no-op. -func (NilSample) Clear() {} - -// Count is a no-op. -func (NilSample) Count() int64 { return 0 } - -// Max is a no-op. -func (NilSample) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilSample) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilSample) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilSample) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilSample) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Size is a no-op. -func (NilSample) Size() int { return 0 } - -// Sample is a no-op. -func (NilSample) Snapshot() Sample { return NilSample{} } - -// StdDev is a no-op. -func (NilSample) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilSample) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilSample) Update(v int64) {} - -// Values is a no-op. -func (NilSample) Values() []int64 { return []int64{} } - -// Variance is a no-op. -func (NilSample) Variance() float64 { return 0.0 } - -// SampleMax returns the maximum value of the slice of int64. -func SampleMax(values []int64) int64 { - if len(values) == 0 { - return 0 - } - var max int64 = math.MinInt64 - for _, v := range values { - if max < v { - max = v - } - } - return max -} - -// SampleMean returns the mean value of the slice of int64. -func SampleMean(values []int64) float64 { - if len(values) == 0 { - return 0.0 - } - return float64(SampleSum(values)) / float64(len(values)) -} - -// SampleMin returns the minimum value of the slice of int64. -func SampleMin(values []int64) int64 { - if len(values) == 0 { - return 0 - } - var min int64 = math.MaxInt64 - for _, v := range values { - if min > v { - min = v - } - } - return min -} +func (NilSample) Clear() {} +func (NilSample) Snapshot() SampleSnapshot { return (*emptySnapshot)(nil) } +func (NilSample) Update(v int64) {} // SamplePercentiles returns an arbitrary percentile of the slice of int64. func SamplePercentile(values []int64, p float64) float64 { - return SamplePercentiles(values, []float64{p})[0] + return CalculatePercentiles(values, []float64{p})[0] } -// SamplePercentiles returns a slice of arbitrary percentiles of the slice of -// int64. -func SamplePercentiles(values []int64, ps []float64) []float64 { +// CalculatePercentiles returns a slice of arbitrary percentiles of the slice of +// int64. This method returns interpolated results, so e.g if there are only two +// values, [0, 10], a 50% percentile will land between them. +// +// Note: As a side-effect, this method will also sort the slice of values. +// Note2: The input format for percentiles is NOT percent! To express 50%, use 0.5, not 50. +func CalculatePercentiles(values []int64, ps []float64) []float64 { scores := make([]float64, len(ps)) size := len(values) - if size > 0 { - slices.Sort(values) - for i, p := range ps { - pos := p * float64(size+1) - if pos < 1.0 { - scores[i] = float64(values[0]) - } else if pos >= float64(size) { - scores[i] = float64(values[size-1]) - } else { - lower := float64(values[int(pos)-1]) - upper := float64(values[int(pos)]) - scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) - } + if size == 0 { + return scores + } + slices.Sort(values) + for i, p := range ps { + pos := p * float64(size+1) + + if pos < 1.0 { + scores[i] = float64(values[0]) + } else if pos >= float64(size) { + scores[i] = float64(values[size-1]) + } else { + lower := float64(values[int(pos)-1]) + upper := float64(values[int(pos)]) + scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) } } return scores } -// SampleSnapshot is a read-only copy of another Sample. -type SampleSnapshot struct { +// sampleSnapshot is a read-only copy of another Sample. +type sampleSnapshot struct { count int64 values []int64 + + max int64 + min int64 + mean float64 + sum int64 + variance float64 } -func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { - return &SampleSnapshot{ +// newSampleSnapshotPrecalculated creates a read-only sampleSnapShot, using +// precalculated sums to avoid iterating the values +func newSampleSnapshotPrecalculated(count int64, values []int64, min, max, sum int64) *sampleSnapshot { + if len(values) == 0 { + return &sampleSnapshot{ + count: count, + values: values, + } + } + return &sampleSnapshot{ count: count, values: values, + max: max, + min: min, + mean: float64(sum) / float64(len(values)), + sum: sum, } } -// Clear panics. -func (*SampleSnapshot) Clear() { - panic("Clear called on a SampleSnapshot") +// newSampleSnapshot creates a read-only sampleSnapShot, and calculates some +// numbers. +func newSampleSnapshot(count int64, values []int64) *sampleSnapshot { + var ( + max int64 = math.MinInt64 + min int64 = math.MaxInt64 + sum int64 + ) + for _, v := range values { + sum += v + if v > max { + max = v + } + if v < min { + min = v + } + } + return newSampleSnapshotPrecalculated(count, values, min, max, sum) } // Count returns the count of inputs at the time the snapshot was taken. -func (s *SampleSnapshot) Count() int64 { return s.count } +func (s *sampleSnapshot) Count() int64 { return s.count } // Max returns the maximal value at the time the snapshot was taken. -func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } +func (s *sampleSnapshot) Max() int64 { return s.max } // Mean returns the mean value at the time the snapshot was taken. -func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } +func (s *sampleSnapshot) Mean() float64 { return s.mean } // Min returns the minimal value at the time the snapshot was taken. -func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } +func (s *sampleSnapshot) Min() int64 { return s.min } // Percentile returns an arbitrary percentile of values at the time the // snapshot was taken. -func (s *SampleSnapshot) Percentile(p float64) float64 { +func (s *sampleSnapshot) Percentile(p float64) float64 { return SamplePercentile(s.values, p) } // Percentiles returns a slice of arbitrary percentiles of values at the time // the snapshot was taken. -func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.values, ps) +func (s *sampleSnapshot) Percentiles(ps []float64) []float64 { + return CalculatePercentiles(s.values, ps) } // Size returns the size of the sample at the time the snapshot was taken. -func (s *SampleSnapshot) Size() int { return len(s.values) } +func (s *sampleSnapshot) Size() int { return len(s.values) } // Snapshot returns the snapshot. -func (s *SampleSnapshot) Snapshot() Sample { return s } +func (s *sampleSnapshot) Snapshot() SampleSnapshot { return s } // StdDev returns the standard deviation of values at the time the snapshot was // taken. -func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } +func (s *sampleSnapshot) StdDev() float64 { + if s.variance == 0.0 { + s.variance = SampleVariance(s.mean, s.values) + } + return math.Sqrt(s.variance) +} // Sum returns the sum of values at the time the snapshot was taken. -func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } - -// Update panics. -func (*SampleSnapshot) Update(int64) { - panic("Update called on a SampleSnapshot") -} +func (s *sampleSnapshot) Sum() int64 { return s.sum } // Values returns a copy of the values in the sample. -func (s *SampleSnapshot) Values() []int64 { +func (s *sampleSnapshot) Values() []int64 { values := make([]int64, len(s.values)) copy(values, s.values) return values } // Variance returns the variance of values at the time the snapshot was taken. -func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } - -// SampleStdDev returns the standard deviation of the slice of int64. -func SampleStdDev(values []int64) float64 { - return math.Sqrt(SampleVariance(values)) -} - -// SampleSum returns the sum of the slice of int64. -func SampleSum(values []int64) int64 { - var sum int64 - for _, v := range values { - sum += v +func (s *sampleSnapshot) Variance() float64 { + if s.variance == 0.0 { + s.variance = SampleVariance(s.mean, s.values) } - return sum + return s.variance } // SampleVariance returns the variance of the slice of int64. -func SampleVariance(values []int64) float64 { +func SampleVariance(mean float64, values []int64) float64 { if len(values) == 0 { return 0.0 } - m := SampleMean(values) var sum float64 for _, v := range values { - d := float64(v) - m + d := float64(v) - mean sum += d * d } return sum / float64(len(values)) @@ -445,83 +340,14 @@ func (s *UniformSample) Clear() { s.values = make([]int64, 0, s.reservoirSize) } -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *UniformSample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *UniformSample) Max() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMax(s.values) -} - -// Mean returns the mean of the values in the sample. -func (s *UniformSample) Mean() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMean(s.values) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *UniformSample) Min() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMin(s.values) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *UniformSample) Percentile(p float64) float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *UniformSample) Percentiles(ps []float64) []float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *UniformSample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - // Snapshot returns a read-only copy of the sample. -func (s *UniformSample) Snapshot() Sample { +func (s *UniformSample) Snapshot() SampleSnapshot { s.mutex.Lock() - defer s.mutex.Unlock() values := make([]int64, len(s.values)) copy(values, s.values) - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *UniformSample) StdDev() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleStdDev(s.values) -} - -// Sum returns the sum of the values in the sample. -func (s *UniformSample) Sum() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleSum(s.values) + count := s.count + s.mutex.Unlock() + return newSampleSnapshot(count, values) } // Update samples a new value. @@ -544,22 +370,6 @@ func (s *UniformSample) Update(v int64) { } } -// Values returns a copy of the values in the sample. -func (s *UniformSample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of the values in the sample. -func (s *UniformSample) Variance() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleVariance(s.values) -} - // expDecaySample represents an individual sample in a heap. type expDecaySample struct { k float64 diff --git a/metrics/sample_test.go b/metrics/sample_test.go index 3ae128d56f..7967357055 100644 --- a/metrics/sample_test.go +++ b/metrics/sample_test.go @@ -8,28 +8,36 @@ import ( "time" ) +const epsilonPercentile = .00000000001 + // Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively // expensive computations like Variance, the cost of copying the Sample, as // approximated by a make and copy, is much greater than the cost of the // computation for small samples and only slightly less for large samples. func BenchmarkCompute1000(b *testing.B) { s := make([]int64, 1000) + var sum int64 for i := 0; i < len(s); i++ { s[i] = int64(i) + sum += int64(i) } + mean := float64(sum) / float64(len(s)) b.ResetTimer() for i := 0; i < b.N; i++ { - SampleVariance(s) + SampleVariance(mean, s) } } func BenchmarkCompute1000000(b *testing.B) { s := make([]int64, 1000000) + var sum int64 for i := 0; i < len(s); i++ { s[i] = int64(i) + sum += int64(i) } + mean := float64(sum) / float64(len(s)) b.ResetTimer() for i := 0; i < b.N; i++ { - SampleVariance(s) + SampleVariance(mean, s) } } func BenchmarkCopy1000(b *testing.B) { @@ -79,65 +87,42 @@ func BenchmarkUniformSample1028(b *testing.B) { benchmarkSample(b, NewUniformSample(1028)) } -func TestExpDecaySample10(t *testing.T) { - s := NewExpDecaySample(100, 0.99) - for i := 0; i < 10; i++ { - s.Update(int64(i)) - } - if size := s.Count(); size != 10 { - t.Errorf("s.Count(): 10 != %v\n", size) - } - if size := s.Size(); size != 10 { - t.Errorf("s.Size(): 10 != %v\n", size) - } - if l := len(s.Values()); l != 10 { - t.Errorf("len(s.Values()): 10 != %v\n", l) - } - for _, v := range s.Values() { - if v > 10 || v < 0 { - t.Errorf("out of range [0, 10): %v\n", v) - } +func min(a, b int) int { + if a < b { + return a } + return b } -func TestExpDecaySample100(t *testing.T) { - s := NewExpDecaySample(1000, 0.01) - for i := 0; i < 100; i++ { - s.Update(int64(i)) - } - if size := s.Count(); size != 100 { - t.Errorf("s.Count(): 100 != %v\n", size) - } - if size := s.Size(); size != 100 { - t.Errorf("s.Size(): 100 != %v\n", size) - } - if l := len(s.Values()); l != 100 { - t.Errorf("len(s.Values()): 100 != %v\n", l) - } - for _, v := range s.Values() { - if v > 100 || v < 0 { - t.Errorf("out of range [0, 100): %v\n", v) +func TestExpDecaySample(t *testing.T) { + for _, tc := range []struct { + reservoirSize int + alpha float64 + updates int + }{ + {100, 0.99, 10}, + {1000, 0.01, 100}, + {100, 0.99, 1000}, + } { + sample := NewExpDecaySample(tc.reservoirSize, tc.alpha) + for i := 0; i < tc.updates; i++ { + sample.Update(int64(i)) } - } -} - -func TestExpDecaySample1000(t *testing.T) { - s := NewExpDecaySample(100, 0.99) - for i := 0; i < 1000; i++ { - s.Update(int64(i)) - } - if size := s.Count(); size != 1000 { - t.Errorf("s.Count(): 1000 != %v\n", size) - } - if size := s.Size(); size != 100 { - t.Errorf("s.Size(): 100 != %v\n", size) - } - if l := len(s.Values()); l != 100 { - t.Errorf("len(s.Values()): 100 != %v\n", l) - } - for _, v := range s.Values() { - if v > 1000 || v < 0 { - t.Errorf("out of range [0, 1000): %v\n", v) + snap := sample.Snapshot() + if have, want := int(snap.Count()), tc.updates; have != want { + t.Errorf("have %d want %d", have, want) + } + if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want { + t.Errorf("have %d want %d", have, want) + } + values := snap.(*sampleSnapshot).values + if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want { + t.Errorf("have %d want %d", have, want) + } + for _, v := range values { + if v > int64(tc.updates) || v < 0 { + t.Errorf("out of range [0, %d): %v", tc.updates, v) + } } } } @@ -147,15 +132,16 @@ func TestExpDecaySample1000(t *testing.T) { // The priority becomes +Inf quickly after starting if this is done, // effectively freezing the set of samples until a rescale step happens. func TestExpDecaySampleNanosecondRegression(t *testing.T) { - s := NewExpDecaySample(100, 0.99) + sw := NewExpDecaySample(100, 0.99) for i := 0; i < 100; i++ { - s.Update(10) + sw.Update(10) } time.Sleep(1 * time.Millisecond) for i := 0; i < 100; i++ { - s.Update(20) + sw.Update(20) } - v := s.Values() + s := sw.Snapshot() + v := s.(*sampleSnapshot).values avg := float64(0) for i := 0; i < len(v); i++ { avg += float64(v[i]) @@ -194,24 +180,27 @@ func TestExpDecaySampleStatistics(t *testing.T) { for i := 1; i <= 10000; i++ { s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) } - testExpDecaySampleStatistics(t, s) + testExpDecaySampleStatistics(t, s.Snapshot()) } func TestUniformSample(t *testing.T) { - s := NewUniformSample(100) + sw := NewUniformSample(100) for i := 0; i < 1000; i++ { - s.Update(int64(i)) + sw.Update(int64(i)) } + s := sw.Snapshot() if size := s.Count(); size != 1000 { t.Errorf("s.Count(): 1000 != %v\n", size) } if size := s.Size(); size != 100 { t.Errorf("s.Size(): 100 != %v\n", size) } - if l := len(s.Values()); l != 100 { + values := s.(*sampleSnapshot).values + + if l := len(values); l != 100 { t.Errorf("len(s.Values()): 100 != %v\n", l) } - for _, v := range s.Values() { + for _, v := range values { if v > 1000 || v < 0 { t.Errorf("out of range [0, 100): %v\n", v) } @@ -219,12 +208,13 @@ func TestUniformSample(t *testing.T) { } func TestUniformSampleIncludesTail(t *testing.T) { - s := NewUniformSample(100) + sw := NewUniformSample(100) max := 100 for i := 0; i < max; i++ { - s.Update(int64(i)) + sw.Update(int64(i)) } - v := s.Values() + s := sw.Snapshot() + v := s.(*sampleSnapshot).values sum := 0 exp := (max - 1) * max / 2 for i := 0; i < len(v); i++ { @@ -250,7 +240,7 @@ func TestUniformSampleStatistics(t *testing.T) { for i := 1; i <= 10000; i++ { s.Update(int64(i)) } - testUniformSampleStatistics(t, s) + testUniformSampleStatistics(t, s.Snapshot()) } func benchmarkSample(b *testing.B, s Sample) { @@ -267,7 +257,7 @@ func benchmarkSample(b *testing.B, s Sample) { b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N) } -func testExpDecaySampleStatistics(t *testing.T, s Sample) { +func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) { if count := s.Count(); count != 10000 { t.Errorf("s.Count(): 10000 != %v\n", count) } @@ -295,7 +285,7 @@ func testExpDecaySampleStatistics(t *testing.T, s Sample) { } } -func testUniformSampleStatistics(t *testing.T, s Sample) { +func testUniformSampleStatistics(t *testing.T, s SampleSnapshot) { if count := s.Count(); count != 10000 { t.Errorf("s.Count(): 10000 != %v\n", count) } @@ -349,8 +339,22 @@ func TestUniformSampleConcurrentUpdateCount(t *testing.T) { } }() for i := 0; i < 1000; i++ { - s.Count() + s.Snapshot().Count() time.Sleep(5 * time.Millisecond) } quit <- struct{}{} } + +func BenchmarkCalculatePercentiles(b *testing.B) { + pss := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} + var vals []int64 + for i := 0; i < 1000; i++ { + vals = append(vals, int64(rand.Int31())) + } + v := make([]int64, len(vals)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + copy(v, vals) + _ = CalculatePercentiles(v, pss) + } +} diff --git a/metrics/syslog.go b/metrics/syslog.go index 76c8490567..fd856d6973 100644 --- a/metrics/syslog.go +++ b/metrics/syslog.go @@ -16,15 +16,15 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) { r.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: - w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Snapshot().Count())) case CounterFloat64: - w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Count())) + w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Snapshot().Count())) case Gauge: - w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) + w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Snapshot().Value())) case GaugeFloat64: - w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Snapshot().Value())) case GaugeInfo: - w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Value())) + w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Snapshot().Value())) case Healthcheck: metric.Check() w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) diff --git a/metrics/testdata/opentsb.want b/metrics/testdata/opentsb.want index c8e40a5250..43fe1b2ac2 100644 --- a/metrics/testdata/opentsb.want +++ b/metrics/testdata/opentsb.want @@ -1,4 +1,4 @@ -put pre.elite.count 978307200 0 host=hal9000 +put pre.elite.count 978307200 1337 host=hal9000 put pre.elite.one-minute 978307200 0.00 host=hal9000 put pre.elite.five-minute 978307200 0.00 host=hal9000 put pre.elite.fifteen-minute 978307200 0.00 host=hal9000 diff --git a/metrics/timer.go b/metrics/timer.go index 2e1a9be472..576ad8aa3e 100644 --- a/metrics/timer.go +++ b/metrics/timer.go @@ -5,26 +5,18 @@ import ( "time" ) +type TimerSnapshot interface { + HistogramSnapshot + MeterSnapshot +} + // Timers capture the duration and rate of events. type Timer interface { - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Timer - StdDev() float64 + Snapshot() TimerSnapshot Stop() - Sum() int64 Time(func()) - Update(time.Duration) UpdateSince(time.Time) - Variance() float64 + Update(time.Duration) } // GetOrRegisterTimer returns an existing Timer or constructs and registers a @@ -78,61 +70,11 @@ func NewTimer() Timer { // NilTimer is a no-op Timer. type NilTimer struct{} -// Count is a no-op. -func (NilTimer) Count() int64 { return 0 } - -// Max is a no-op. -func (NilTimer) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilTimer) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilTimer) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilTimer) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilTimer) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Rate1 is a no-op. -func (NilTimer) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilTimer) Rate5() float64 { return 0.0 } - -// Rate15 is a no-op. -func (NilTimer) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilTimer) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilTimer) Snapshot() Timer { return NilTimer{} } - -// StdDev is a no-op. -func (NilTimer) StdDev() float64 { return 0.0 } - -// Stop is a no-op. -func (NilTimer) Stop() {} - -// Sum is a no-op. -func (NilTimer) Sum() int64 { return 0 } - -// Time is a no-op. -func (NilTimer) Time(f func()) { f() } - -// Update is a no-op. -func (NilTimer) Update(time.Duration) {} - -// UpdateSince is a no-op. -func (NilTimer) UpdateSince(time.Time) {} - -// Variance is a no-op. -func (NilTimer) Variance() float64 { return 0.0 } +func (NilTimer) Snapshot() TimerSnapshot { return (*emptySnapshot)(nil) } +func (NilTimer) Stop() {} +func (NilTimer) Time(f func()) { f() } +func (NilTimer) Update(time.Duration) {} +func (NilTimer) UpdateSince(time.Time) {} // StandardTimer is the standard implementation of a Timer and uses a Histogram // and Meter. @@ -142,82 +84,21 @@ type StandardTimer struct { mutex sync.Mutex } -// Count returns the number of events recorded. -func (t *StandardTimer) Count() int64 { - return t.histogram.Count() -} - -// Max returns the maximum value in the sample. -func (t *StandardTimer) Max() int64 { - return t.histogram.Max() -} - -// Mean returns the mean of the values in the sample. -func (t *StandardTimer) Mean() float64 { - return t.histogram.Mean() -} - -// Min returns the minimum value in the sample. -func (t *StandardTimer) Min() int64 { - return t.histogram.Min() -} - -// Percentile returns an arbitrary percentile of the values in the sample. -func (t *StandardTimer) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (t *StandardTimer) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (t *StandardTimer) Rate1() float64 { - return t.meter.Rate1() -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (t *StandardTimer) Rate5() float64 { - return t.meter.Rate5() -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (t *StandardTimer) Rate15() float64 { - return t.meter.Rate15() -} - -// RateMean returns the meter's mean rate of events per second. -func (t *StandardTimer) RateMean() float64 { - return t.meter.RateMean() -} - // Snapshot returns a read-only copy of the timer. -func (t *StandardTimer) Snapshot() Timer { +func (t *StandardTimer) Snapshot() TimerSnapshot { t.mutex.Lock() defer t.mutex.Unlock() - return &TimerSnapshot{ - histogram: t.histogram.Snapshot().(*HistogramSnapshot), - meter: t.meter.Snapshot().(*MeterSnapshot), + return &timerSnapshot{ + histogram: t.histogram.Snapshot(), + meter: t.meter.Snapshot(), } } -// StdDev returns the standard deviation of the values in the sample. -func (t *StandardTimer) StdDev() float64 { - return t.histogram.StdDev() -} - // Stop stops the meter. func (t *StandardTimer) Stop() { t.meter.Stop() } -// Sum returns the sum in the sample. -func (t *StandardTimer) Sum() int64 { - return t.histogram.Sum() -} - // Record the duration of the execution of the given function. func (t *StandardTimer) Time(f func()) { ts := time.Now() @@ -241,86 +122,63 @@ func (t *StandardTimer) UpdateSince(ts time.Time) { t.meter.Mark(1) } -// Variance returns the variance of the values in the sample. -func (t *StandardTimer) Variance() float64 { - return t.histogram.Variance() -} - -// TimerSnapshot is a read-only copy of another Timer. -type TimerSnapshot struct { - histogram *HistogramSnapshot - meter *MeterSnapshot +// timerSnapshot is a read-only copy of another Timer. +type timerSnapshot struct { + histogram HistogramSnapshot + meter MeterSnapshot } // Count returns the number of events recorded at the time the snapshot was // taken. -func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } +func (t *timerSnapshot) Count() int64 { return t.histogram.Count() } // Max returns the maximum value at the time the snapshot was taken. -func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } +func (t *timerSnapshot) Max() int64 { return t.histogram.Max() } + +// Size returns the size of the sample at the time the snapshot was taken. +func (t *timerSnapshot) Size() int { return t.histogram.Size() } // Mean returns the mean value at the time the snapshot was taken. -func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } +func (t *timerSnapshot) Mean() float64 { return t.histogram.Mean() } // Min returns the minimum value at the time the snapshot was taken. -func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } +func (t *timerSnapshot) Min() int64 { return t.histogram.Min() } // Percentile returns an arbitrary percentile of sampled values at the time the // snapshot was taken. -func (t *TimerSnapshot) Percentile(p float64) float64 { +func (t *timerSnapshot) Percentile(p float64) float64 { return t.histogram.Percentile(p) } // Percentiles returns a slice of arbitrary percentiles of sampled values at // the time the snapshot was taken. -func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { +func (t *timerSnapshot) Percentiles(ps []float64) []float64 { return t.histogram.Percentiles(ps) } // Rate1 returns the one-minute moving average rate of events per second at the // time the snapshot was taken. -func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } +func (t *timerSnapshot) Rate1() float64 { return t.meter.Rate1() } // Rate5 returns the five-minute moving average rate of events per second at // the time the snapshot was taken. -func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } +func (t *timerSnapshot) Rate5() float64 { return t.meter.Rate5() } // Rate15 returns the fifteen-minute moving average rate of events per second // at the time the snapshot was taken. -func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } +func (t *timerSnapshot) Rate15() float64 { return t.meter.Rate15() } // RateMean returns the meter's mean rate of events per second at the time the // snapshot was taken. -func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } - -// Snapshot returns the snapshot. -func (t *TimerSnapshot) Snapshot() Timer { return t } +func (t *timerSnapshot) RateMean() float64 { return t.meter.RateMean() } // StdDev returns the standard deviation of the values at the time the snapshot // was taken. -func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } - -// Stop is a no-op. -func (t *TimerSnapshot) Stop() {} +func (t *timerSnapshot) StdDev() float64 { return t.histogram.StdDev() } // Sum returns the sum at the time the snapshot was taken. -func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } - -// Time panics. -func (*TimerSnapshot) Time(func()) { - panic("Time called on a TimerSnapshot") -} - -// Update panics. -func (*TimerSnapshot) Update(time.Duration) { - panic("Update called on a TimerSnapshot") -} - -// UpdateSince panics. -func (*TimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a TimerSnapshot") -} +func (t *timerSnapshot) Sum() int64 { return t.histogram.Sum() } // Variance returns the variance of the values at the time the snapshot was // taken. -func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } +func (t *timerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/metrics/timer_test.go b/metrics/timer_test.go index 903e8e8d49..f10de16c9c 100644 --- a/metrics/timer_test.go +++ b/metrics/timer_test.go @@ -18,7 +18,7 @@ func BenchmarkTimer(b *testing.B) { func TestGetOrRegisterTimer(t *testing.T) { r := NewRegistry() NewRegisteredTimer("foo", r).Update(47) - if tm := GetOrRegisterTimer("foo", r); tm.Count() != 1 { + if tm := GetOrRegisterTimer("foo", r).Snapshot(); tm.Count() != 1 { t.Fatal(tm) } } @@ -27,7 +27,7 @@ func TestTimerExtremes(t *testing.T) { tm := NewTimer() tm.Update(math.MaxInt64) tm.Update(0) - if stdDev := tm.StdDev(); stdDev != 4.611686018427388e+18 { + if stdDev := tm.Snapshot().StdDev(); stdDev != 4.611686018427388e+18 { t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev) } } @@ -56,7 +56,7 @@ func TestTimerFunc(t *testing.T) { }) var ( drift = time.Millisecond * 2 - measured = time.Duration(tm.Max()) + measured = time.Duration(tm.Snapshot().Max()) ceil = actualTime + drift floor = actualTime - drift ) @@ -66,7 +66,7 @@ func TestTimerFunc(t *testing.T) { } func TestTimerZero(t *testing.T) { - tm := NewTimer() + tm := NewTimer().Snapshot() if count := tm.Count(); count != 0 { t.Errorf("tm.Count(): 0 != %v\n", count) } @@ -110,5 +110,5 @@ func ExampleGetOrRegisterTimer() { m := "account.create.latency" t := GetOrRegisterTimer(m, nil) t.Update(47) - fmt.Println(t.Max()) // Output: 47 + fmt.Println(t.Snapshot().Max()) // Output: 47 } diff --git a/metrics/writer.go b/metrics/writer.go index ec2e4f8c6a..098da45c27 100644 --- a/metrics/writer.go +++ b/metrics/writer.go @@ -29,19 +29,19 @@ func WriteOnce(r Registry, w io.Writer) { switch metric := namedMetric.m.(type) { case Counter: fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", metric.Count()) + fmt.Fprintf(w, " count: %9d\n", metric.Snapshot().Count()) case CounterFloat64: fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %f\n", metric.Count()) + fmt.Fprintf(w, " count: %f\n", metric.Snapshot().Count()) case Gauge: fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %9d\n", metric.Value()) + fmt.Fprintf(w, " value: %9d\n", metric.Snapshot().Value()) case GaugeFloat64: fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %f\n", metric.Value()) + fmt.Fprintf(w, " value: %f\n", metric.Snapshot().Value()) case GaugeInfo: fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %s\n", metric.Value().String()) + fmt.Fprintf(w, " value: %s\n", metric.Snapshot().Value().String()) case Healthcheck: metric.Check() fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) From eb7438997ba5816964d3e09914a185f2750773f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 13 Sep 2023 20:17:55 +0300 Subject: [PATCH 05/98] cmd/geth: rename the protocols field in the metrics gague (#28102) --- cmd/geth/config.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 2761ee7453..a5d628d8af 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -185,10 +185,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { protos = append(protos, fmt.Sprintf("%v/%d", p.Name, p.Version)) } metrics.NewRegisteredGaugeInfo("geth/info", nil).Update(metrics.GaugeInfoValue{ - "arch": runtime.GOARCH, - "os": runtime.GOOS, - "version": cfg.Node.Version, - "eth_protocols": strings.Join(protos, ","), + "arch": runtime.GOARCH, + "os": runtime.GOOS, + "version": cfg.Node.Version, + "protocols": strings.Join(protos, ","), }) } From b9b99a12e5159c746ef04b7c8febc4db66817b72 Mon Sep 17 00:00:00 2001 From: Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Date: Thu, 14 Sep 2023 09:10:37 +0200 Subject: [PATCH 06/98] eth: abort on api operations not available in pbss-mode (#28104) eth: abort on api calls not supporting pbss --- eth/api_debug.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/eth/api_debug.go b/eth/api_debug.go index 22dd6b0cf7..dc9f568146 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -362,6 +362,9 @@ func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]c // The (from, to) parameters are the sequence of blocks to search, which can go // either forwards or backwards func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) { + if api.eth.blockchain.TrieDB().Scheme() == rawdb.PathScheme { + return 0, errors.New("state history is not yet available in path-based scheme") + } db := api.eth.ChainDb() var pivot uint64 if p := rawdb.ReadLastPivotNumber(db); p != nil { @@ -422,6 +425,9 @@ func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error // If the value is shorter than the block generation time, or even 0 or negative, // the node will flush trie after processing each block (effectively archive mode). func (api *DebugAPI) SetTrieFlushInterval(interval string) error { + if api.eth.blockchain.TrieDB().Scheme() == rawdb.PathScheme { + return errors.New("trie flush interval is undefined for path-based scheme") + } t, err := time.ParseDuration(interval) if err != nil { return err @@ -431,6 +437,9 @@ func (api *DebugAPI) SetTrieFlushInterval(interval string) error { } // GetTrieFlushInterval gets the current value of in-memory trie flush interval -func (api *DebugAPI) GetTrieFlushInterval() string { - return api.eth.blockchain.GetTrieFlushInterval().String() +func (api *DebugAPI) GetTrieFlushInterval() (string, error) { + if api.eth.blockchain.TrieDB().Scheme() == rawdb.PathScheme { + return "", errors.New("trie flush interval is undefined for path-based scheme") + } + return api.eth.blockchain.GetTrieFlushInterval().String(), nil } From d9fbb71d631d1ad0fb1846042e4c50ab893a6fbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 14 Sep 2023 10:33:59 +0300 Subject: [PATCH 07/98] cmd/geth, internal/flags, go.mod: colorize cli help, support env vars (#28103) * cmd/geth, internal/flags, go.mod: colorize cli help, support env vars * internal/flags: use stdout, not stderr for terminal detection --- cmd/geth/main.go | 1 + go.mod | 2 +- go.sum | 6 ++-- internal/flags/flags.go | 9 +++-- internal/flags/helpers.go | 73 ++++++++++++++++++++++++++++++++++----- 5 files changed, 76 insertions(+), 15 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 09730a7716..a1d148d805 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -244,6 +244,7 @@ func init() { debug.Flags, metricsFlags, ) + flags.AutoEnvVars(app.Flags, "GETH") app.Before = func(ctx *cli.Context) error { maxprocs.Set() // Automatically set GOMAXPROCS to match Linux container CPU quota. diff --git a/go.mod b/go.mod index cf242e9d2d..8061220aa6 100644 --- a/go.mod +++ b/go.mod @@ -61,7 +61,7 @@ require ( github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.1.0 - github.com/urfave/cli/v2 v2.24.1 + github.com/urfave/cli/v2 v2.25.7 go.uber.org/automaxprocs v1.5.2 golang.org/x/crypto v0.12.0 golang.org/x/exp v0.0.0-20230810033253-352e893a4cad diff --git a/go.sum b/go.sum index 4cb3a1bb88..9c6fd74e4a 100644 --- a/go.sum +++ b/go.sum @@ -39,7 +39,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= @@ -551,8 +551,8 @@ github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3C github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU= -github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= diff --git a/internal/flags/flags.go b/internal/flags/flags.go index b0756b4e0a..1e98a2b812 100644 --- a/internal/flags/flags.go +++ b/internal/flags/flags.go @@ -68,6 +68,7 @@ type DirectoryFlag struct { Value DirectoryString Aliases []string + EnvVars []string } // For cli.Flag: @@ -102,7 +103,7 @@ func (f *DirectoryFlag) GetCategory() string { return f.Category } func (f *DirectoryFlag) TakesValue() bool { return true } func (f *DirectoryFlag) GetUsage() string { return f.Usage } func (f *DirectoryFlag) GetValue() string { return f.Value.String() } -func (f *DirectoryFlag) GetEnvVars() []string { return nil } // env not supported +func (f *DirectoryFlag) GetEnvVars() []string { return f.EnvVars } func (f *DirectoryFlag) GetDefaultText() string { if f.DefaultText != "" { @@ -156,6 +157,7 @@ type TextMarshalerFlag struct { Value TextMarshaler Aliases []string + EnvVars []string } // For cli.Flag: @@ -187,7 +189,7 @@ func (f *TextMarshalerFlag) GetCategory() string { return f.Category } func (f *TextMarshalerFlag) TakesValue() bool { return true } func (f *TextMarshalerFlag) GetUsage() string { return f.Usage } -func (f *TextMarshalerFlag) GetEnvVars() []string { return nil } // env not supported +func (f *TextMarshalerFlag) GetEnvVars() []string { return f.EnvVars } func (f *TextMarshalerFlag) GetValue() string { t, err := f.Value.MarshalText() @@ -237,6 +239,7 @@ type BigFlag struct { Value *big.Int Aliases []string + EnvVars []string } // For cli.Flag: @@ -271,7 +274,7 @@ func (f *BigFlag) GetCategory() string { return f.Category } func (f *BigFlag) TakesValue() bool { return true } func (f *BigFlag) GetUsage() string { return f.Usage } func (f *BigFlag) GetValue() string { return f.Value.String() } -func (f *BigFlag) GetEnvVars() []string { return nil } // env not supported +func (f *BigFlag) GetEnvVars() []string { return f.EnvVars } func (f *BigFlag) GetDefaultText() string { if f.DefaultText != "" { diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go index f210e729dd..bd50498286 100644 --- a/internal/flags/helpers.go +++ b/internal/flags/helpers.go @@ -18,13 +18,20 @@ package flags import ( "fmt" + "os" + "regexp" "strings" "github.com/ethereum/go-ethereum/internal/version" "github.com/ethereum/go-ethereum/params" + "github.com/mattn/go-isatty" "github.com/urfave/cli/v2" ) +// usecolor defines whether the CLI help should use colored output or normal dumb +// colorless terminal formatting. +var usecolor = (isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())) && os.Getenv("TERM") != "dumb" + // NewApp creates an app with sane defaults. func NewApp(usage string) *cli.App { git, _ := version.VCS() @@ -129,6 +136,14 @@ func doMigrateFlags(ctx *cli.Context) { } func init() { + if usecolor { + // Annotate all help categories with colors + cli.AppHelpTemplate = regexp.MustCompile("[A-Z ]+:").ReplaceAllString(cli.AppHelpTemplate, "\u001B[33m$0\u001B[0m") + + // Annotate flag categories with colors (private template, so need to + // copy-paste the entire thing here...) + cli.AppHelpTemplate = strings.ReplaceAll(cli.AppHelpTemplate, "{{template \"visibleFlagCategoryTemplate\" .}}", "{{range .VisibleFlagCategories}}\n {{if .Name}}\u001B[33m{{.Name}}\u001B[0m\n\n {{end}}{{$flglen := len .Flags}}{{range $i, $e := .Flags}}{{if eq (subtract $flglen $i) 1}}{{$e}}\n{{else}}{{$e}}\n {{end}}{{end}}{{end}}") + } cli.FlagStringer = FlagString } @@ -138,30 +153,31 @@ func FlagString(f cli.Flag) string { if !ok { return "" } - needsPlaceholder := df.TakesValue() placeholder := "" if needsPlaceholder { placeholder = "value" } - namesText := pad(cli.FlagNamePrefixer(df.Names(), placeholder), 30) + namesText := cli.FlagNamePrefixer(df.Names(), placeholder) defaultValueString := "" if s := df.GetDefaultText(); s != "" { defaultValueString = " (default: " + s + ")" } - - usage := strings.TrimSpace(df.GetUsage()) envHint := strings.TrimSpace(cli.FlagEnvHinter(df.GetEnvVars(), "")) - if len(envHint) > 0 { - usage += " " + envHint + if envHint != "" { + envHint = " (" + envHint[1:len(envHint)-1] + ")" } - + usage := strings.TrimSpace(df.GetUsage()) usage = wordWrap(usage, 80) usage = indent(usage, 10) - return fmt.Sprintf("\n %s%s\n%s", namesText, defaultValueString, usage) + if usecolor { + return fmt.Sprintf("\n \u001B[32m%-35s%-35s\u001B[0m%s\n%s", namesText, defaultValueString, envHint, usage) + } else { + return fmt.Sprintf("\n %-35s%-35s%s\n%s", namesText, defaultValueString, envHint, usage) + } } func pad(s string, length int) string { @@ -213,3 +229,44 @@ func wordWrap(s string, width int) string { return output.String() } + +// AutoEnvVars extens all the specific CLI flags with automatically generated +// env vars by capitalizing the flag, replacing . with _ and prefixing it with +// the specified string. +// +// Note, the prefix should *not* contain the separator underscore, that will be +// added automatically. +func AutoEnvVars(flags []cli.Flag, prefix string) { + for _, flag := range flags { + envvar := strings.ToUpper(prefix + "_" + strings.ReplaceAll(strings.ReplaceAll(flag.Names()[0], ".", "_"), "-", "_")) + + switch flag := flag.(type) { + case *cli.StringFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *cli.BoolFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *cli.IntFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *cli.Uint64Flag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *cli.DurationFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *cli.PathFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *BigFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *TextMarshalerFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + + case *DirectoryFlag: + flag.EnvVars = append(flag.EnvVars, envvar) + } + } +} From 636c64caa9682257d5193295a3bcb816716e185b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Thu, 14 Sep 2023 09:34:52 +0200 Subject: [PATCH 08/98] build: upgrade -dlgo version to Go 1.21.1 (#28113) --- build/checksums.txt | 28 ++++++++++++++-------------- build/ci.go | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index adfa0bc34d..17fb6dce27 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -4,20 +4,20 @@ 24bac679f3a2d8240d8e08e7f6a70b70c2dabf673317d924cf1d1887b9fe1f81 fixtures.tar.gz # https://go.dev/dl/ -818d46ede85682dd551ad378ef37a4d247006f12ec59b5b755601d2ce114369a go1.21.0.src.tar.gz -b314de9f704ab122c077d2ec8e67e3670affe8865479d1f01991e7ac55d65e70 go1.21.0.darwin-amd64.tar.gz -3aca44de55c5e098de2f406e98aba328898b05d509a2e2a356416faacf2c4566 go1.21.0.darwin-arm64.tar.gz -312a0065714a50862af714e7a5b3fbbd70fe68f905ffb9bcc56d42eadf6bffab go1.21.0.freebsd-386.tar.gz -b8eaa36654625df799654f77f4af0ea7bd9e5e760ebe86e68fe7c484748ae995 go1.21.0.freebsd-amd64.tar.gz -0e6f378d9b072fab0a3d9ff4d5e990d98487d47252dba8160015a61e6bd0bcba go1.21.0.linux-386.tar.gz -d0398903a16ba2232b389fb31032ddf57cac34efda306a0eebac34f0965a0742 go1.21.0.linux-amd64.tar.gz -f3d4548edf9b22f26bbd49720350bbfe59d75b7090a1a2bff1afad8214febaf3 go1.21.0.linux-arm64.tar.gz -e377a0004957c8c560a3ff99601bce612330a3d95ba3b0a2ae144165fc87deb1 go1.21.0.linux-armv6l.tar.gz -e938ffc81d8ebe5efc179240960ba22da6a841ff05d5cab7ce2547112b14a47f go1.21.0.linux-ppc64le.tar.gz -be7338df8e5d5472dfa307b0df2b446d85d001b0a2a3cdb1a14048d751b70481 go1.21.0.linux-s390x.tar.gz -af920fbb74fc3d173118dc3cc35f02a709c1de642700e92a91a7d16981df3fec go1.21.0.windows-386.zip -732121e64e0ecb07c77fdf6cc1bc5ce7b242c2d40d4ac29021ad4c64a08731f6 go1.21.0.windows-amd64.zip -41342f5a0f8c083b14c68bde738ddcd313a4f53a5854bfdfab47f0e88247de12 go1.21.0.windows-arm64.zip +bfa36bf75e9a1e9cbbdb9abcf9d1707e479bd3a07880a8ae3564caee5711cb99 go1.21.1.src.tar.gz +809f5b0ef4f7dcdd5f51e9630a5b2e5a1006f22a047126d61560cdc365678a19 go1.21.1.darwin-amd64.tar.gz +ffd40391a1e995855488b008ad9326ff8c2e81803a6e80894401003bae47fcf1 go1.21.1.darwin-arm64.tar.gz +9919a9a4dc82371aba3da5b7c830bcb6249fc1502cd26d959eb340a60e41ee01 go1.21.1.freebsd-386.tar.gz +2571f10f6047e04d87c1f5986a05e5e8f7b511faf98803ef12b66d563845d2a1 go1.21.1.freebsd-amd64.tar.gz +b93850666cdadbd696a986cf7b03111fe99db8c34a9aaa113d7c96d0081e1901 go1.21.1.linux-386.tar.gz +b3075ae1ce5dab85f89bc7905d1632de23ca196bd8336afd93fa97434cfa55ae go1.21.1.linux-amd64.tar.gz +7da1a3936a928fd0b2602ed4f3ef535b8cd1990f1503b8d3e1acc0fa0759c967 go1.21.1.linux-arm64.tar.gz +f3716a43f59ae69999841d6007b42c9e286e8d8ce470656fb3e70d7be2d7ca85 go1.21.1.linux-armv6l.tar.gz +eddf018206f8a5589bda75252b72716d26611efebabdca5d0083ec15e9e41ab7 go1.21.1.linux-ppc64le.tar.gz +a83b3e8eb4dbf76294e773055eb51397510ff4d612a247bad9903560267bba6d go1.21.1.linux-s390x.tar.gz +170256c820f466f29d64876f25f4dfa4029ed9902a0a9095d8bd603aecf4d83b go1.21.1.windows-386.zip +10a4f5b63215d11d1770453733dbcbf024f3f74872f84e28d7ea59f0250316c6 go1.21.1.windows-amd64.zip +41135ce6e0ced4bc1e459cb96bd4090c9dc2062e24179c3f337d855af9b560ef go1.21.1.windows-arm64.zip # https://github.com/golangci/golangci-lint/releases fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz diff --git a/build/ci.go b/build/ci.go index 6f77e5df60..400c8bdd68 100644 --- a/build/ci.go +++ b/build/ci.go @@ -139,7 +139,7 @@ var ( // This is the version of Go that will be downloaded by // // go run ci.go install -dlgo - dlgoVersion = "1.21.0" + dlgoVersion = "1.21.1" // This is the version of Go that will be used to bootstrap the PPA builder. // From 86bc2cdf331602abe6beb8948186815949bd9b4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 14 Sep 2023 10:58:13 +0300 Subject: [PATCH 09/98] internal/flags: fix linter --- internal/flags/helpers.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go index bd50498286..bdeb7e38e0 100644 --- a/internal/flags/helpers.go +++ b/internal/flags/helpers.go @@ -180,13 +180,6 @@ func FlagString(f cli.Flag) string { } } -func pad(s string, length int) string { - if len(s) < length { - s += strings.Repeat(" ", length-len(s)) - } - return s -} - func indent(s string, nspace int) string { ind := strings.Repeat(" ", nspace) return ind + strings.ReplaceAll(s, "\n", "\n"+ind) From 8514d665eea096ad4aa6e2090b631b94dc6b7df4 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 14 Sep 2023 16:23:16 +0800 Subject: [PATCH 10/98] graphql: add 4844 blob fields (#27963) This adds block and receipt fields for EIP-4844. --------- Signed-off-by: jsvisa Co-authored-by: Sina Mahmoodi --- graphql/graphql.go | 88 +++++++++++++++++++++++++++++++++++++++++----- graphql/schema.go | 80 +++++++++++++++++++++++------------------ 2 files changed, 126 insertions(+), 42 deletions(-) diff --git a/graphql/graphql.go b/graphql/graphql.go index 6417fc9ed3..8304a64cf4 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -272,8 +272,6 @@ func (t *Transaction) GasPrice(ctx context.Context) hexutil.Big { return hexutil.Big{} } switch tx.Type() { - case types.AccessListTxType: - return hexutil.Big(*tx.GasPrice()) case types.DynamicFeeTxType: if block != nil { if baseFee, _ := block.BaseFeePerGas(ctx); baseFee != nil { @@ -312,9 +310,7 @@ func (t *Transaction) MaxFeePerGas(ctx context.Context) *hexutil.Big { return nil } switch tx.Type() { - case types.AccessListTxType: - return nil - case types.DynamicFeeTxType: + case types.DynamicFeeTxType, types.BlobTxType: return (*hexutil.Big)(tx.GasFeeCap()) default: return nil @@ -327,15 +323,33 @@ func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) *hexutil.Big { return nil } switch tx.Type() { - case types.AccessListTxType: - return nil - case types.DynamicFeeTxType: + case types.DynamicFeeTxType, types.BlobTxType: return (*hexutil.Big)(tx.GasTipCap()) default: return nil } } +func (t *Transaction) MaxFeePerBlobGas(ctx context.Context) *hexutil.Big { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil + } + return (*hexutil.Big)(tx.BlobGasFeeCap()) +} + +func (t *Transaction) BlobVersionedHashes(ctx context.Context) *[]common.Hash { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil + } + if tx.Type() != types.BlobTxType { + return nil + } + blobHashes := tx.BlobHashes() + return &blobHashes +} + func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) { tx, block := t.resolve(ctx) if tx == nil { @@ -468,6 +482,40 @@ func (t *Transaction) CumulativeGasUsed(ctx context.Context) (*hexutil.Uint64, e return &ret, nil } +func (t *Transaction) BlobGasUsed(ctx context.Context) (*hexutil.Uint64, error) { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil, nil + } + if tx.Type() != types.BlobTxType { + return nil, nil + } + + receipt, err := t.getReceipt(ctx) + if err != nil || receipt == nil { + return nil, err + } + ret := hexutil.Uint64(receipt.BlobGasUsed) + return &ret, nil +} + +func (t *Transaction) BlobGasPrice(ctx context.Context) (*hexutil.Big, error) { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil, nil + } + if tx.Type() != types.BlobTxType { + return nil, nil + } + + receipt, err := t.getReceipt(ctx) + if err != nil || receipt == nil { + return nil, err + } + ret := (*hexutil.Big)(receipt.BlobGasPrice) + return ret, nil +} + func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) (*Account, error) { receipt, err := t.getReceipt(ctx) if err != nil || receipt == nil || receipt.ContractAddress == (common.Address{}) { @@ -1019,6 +1067,30 @@ func (b *Block) Withdrawals(ctx context.Context) (*[]*Withdrawal, error) { return &ret, nil } +func (b *Block) BlobGasUsed(ctx context.Context) (*hexutil.Uint64, error) { + header, err := b.resolveHeader(ctx) + if err != nil { + return nil, err + } + if header.BlobGasUsed == nil { + return nil, nil + } + ret := hexutil.Uint64(*header.BlobGasUsed) + return &ret, nil +} + +func (b *Block) ExcessBlobGas(ctx context.Context) (*hexutil.Uint64, error) { + header, err := b.resolveHeader(ctx) + if err != nil { + return nil, err + } + if header.ExcessBlobGas == nil { + return nil, nil + } + ret := hexutil.Uint64(*header.ExcessBlobGas) + return &ret, nil +} + // BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside // a block. type BlockFilterCriteria struct { diff --git a/graphql/schema.go b/graphql/schema.go index 5de5bad305..5738923fc1 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -71,8 +71,8 @@ const schema string = ` transaction: Transaction! } - #EIP-2718 - type AccessTuple{ + # EIP-2718 + type AccessTuple { address: Address! storageKeys : [Bytes32!]! } @@ -112,6 +112,8 @@ const schema string = ` maxFeePerGas: BigInt # MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in wei. maxPriorityFeePerGas: BigInt + # MaxFeePerBlobGas is the maximum blob gas fee cap per blob the sender is willing to pay for blob transaction, in wei. + maxFeePerBlobGas: BigInt # EffectiveTip is the actual amount of reward going to miner after considering the max fee cap. effectiveTip: BigInt # Gas is the maximum amount of gas this transaction can consume. @@ -141,6 +143,10 @@ const schema string = ` # coerced into the EIP-1559 format by setting both maxFeePerGas and # maxPriorityFeePerGas as the transaction's gas price. effectiveGasPrice: BigInt + # BlobGasUsed is the amount of blob gas used by this transaction. + blobGasUsed: Long + # blobGasPrice is the actual value per blob gas deducted from the senders account. + blobGasPrice: BigInt # CreatedContract is the account that was created by a contract creation # transaction. If the transaction was not a contract creation transaction, # or it has not yet been mined, this field will be null. @@ -162,6 +168,8 @@ const schema string = ` # RawReceipt is the canonical encoding of the receipt. For post EIP-2718 typed transactions # this is equivalent to TxType || ReceiptEncoding. rawReceipt: Bytes! + # BlobVersionedHashes is a set of hash outputs from the blobs in the transaction. + blobVersionedHashes: [Bytes32!] } # BlockFilterCriteria encapsulates log filter criteria for a filter applied @@ -171,16 +179,16 @@ const schema string = ` # empty, results will not be filtered by address. addresses: [Address!] # Topics list restricts matches to particular event topics. Each event has a list - # of topics. Topics matches a prefix of that list. An empty element array matches any - # topic. Non-empty elements represent an alternative that matches any of the - # contained topics. - # - # Examples: - # - [] or nil matches any topic list - # - [[A]] matches topic A in first position - # - [[], [B]] matches any topic in first position, B in second position - # - [[A], [B]] matches topic A in first position, B in second position - # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position + # of topics. Topics matches a prefix of that list. An empty element array matches any + # topic. Non-empty elements represent an alternative that matches any of the + # contained topics. + # + # Examples: + # - [] or nil matches any topic list + # - [[A]] matches topic A in first position + # - [[], [B]] matches any topic in first position, B in second position + # - [[A], [B]] matches topic A in first position, B in second position + # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position topics: [[Bytes32!]!] } @@ -267,6 +275,10 @@ const schema string = ` # Withdrawals is a list of withdrawals associated with this block. If # withdrawals are unavailable for this block, this field will be null. withdrawals: [Withdrawal!] + # BlobGasUsed is the total amount of gas used by the transactions. + blobGasUsed: Long + # ExcessBlobGas is a running total of blob gas consumed in excess of the target, prior to the block. + excessBlobGas: Long } # CallData represents the data associated with a local contract call. @@ -312,21 +324,21 @@ const schema string = ` # empty, results will not be filtered by address. addresses: [Address!] # Topics list restricts matches to particular event topics. Each event has a list - # of topics. Topics matches a prefix of that list. An empty element array matches any - # topic. Non-empty elements represent an alternative that matches any of the - # contained topics. - # - # Examples: - # - [] or nil matches any topic list - # - [[A]] matches topic A in first position - # - [[], [B]] matches any topic in first position, B in second position - # - [[A], [B]] matches topic A in first position, B in second position - # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position + # of topics. Topics matches a prefix of that list. An empty element array matches any + # topic. Non-empty elements represent an alternative that matches any of the + # contained topics. + # + # Examples: + # - [] or nil matches any topic list + # - [[A]] matches topic A in first position + # - [[], [B]] matches any topic in first position, B in second position + # - [[A], [B]] matches topic A in first position, B in second position + # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position topics: [[Bytes32!]!] } # SyncState contains the current synchronisation state of the client. - type SyncState{ + type SyncState { # StartingBlock is the block number at which synchronisation started. startingBlock: Long! # CurrentBlock is the point at which synchronisation has presently reached. @@ -337,17 +349,17 @@ const schema string = ` # Pending represents the current pending state. type Pending { - # TransactionCount is the number of transactions in the pending state. - transactionCount: Long! - # Transactions is a list of transactions in the current pending state. - transactions: [Transaction!] - # Account fetches an Ethereum account for the pending state. - account(address: Address!): Account! - # Call executes a local call operation for the pending state. - call(data: CallData!): CallResult - # EstimateGas estimates the amount of gas that will be required for - # successful execution of a transaction for the pending state. - estimateGas(data: CallData!): Long! + # TransactionCount is the number of transactions in the pending state. + transactionCount: Long! + # Transactions is a list of transactions in the current pending state. + transactions: [Transaction!] + # Account fetches an Ethereum account for the pending state. + account(address: Address!): Account! + # Call executes a local call operation for the pending state. + call(data: CallData!): CallResult + # EstimateGas estimates the amount of gas that will be required for + # successful execution of a transaction for the pending state. + estimateGas(data: CallData!): Long! } type Query { From ee654626ad187629a184f3dac0ecaf448c64a3d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 14 Sep 2023 12:43:58 +0300 Subject: [PATCH 11/98] internal/flags: fix loading env vars for custom flags (#28117) --- internal/flags/flags.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/internal/flags/flags.go b/internal/flags/flags.go index 1e98a2b812..69e9743556 100644 --- a/internal/flags/flags.go +++ b/internal/flags/flags.go @@ -20,11 +20,13 @@ import ( "encoding" "errors" "flag" + "fmt" "math/big" "os" "os/user" "path/filepath" "strings" + "syscall" "github.com/ethereum/go-ethereum/common/math" "github.com/urfave/cli/v2" @@ -80,6 +82,14 @@ func (f *DirectoryFlag) String() string { return cli.FlagStringer(f) } // Apply called by cli library, grabs variable from environment (if in env) // and adds variable to flag set for parsing. func (f *DirectoryFlag) Apply(set *flag.FlagSet) error { + for _, envVar := range f.EnvVars { + envVar = strings.TrimSpace(envVar) + if value, found := syscall.Getenv(envVar); found { + f.Value.Set(value) + f.HasBeenSet = true + break + } + } eachName(f, func(name string) { set.Var(&f.Value, f.Name, f.Usage) }) @@ -167,6 +177,16 @@ func (f *TextMarshalerFlag) IsSet() bool { return f.HasBeenSet } func (f *TextMarshalerFlag) String() string { return cli.FlagStringer(f) } func (f *TextMarshalerFlag) Apply(set *flag.FlagSet) error { + for _, envVar := range f.EnvVars { + envVar = strings.TrimSpace(envVar) + if value, found := syscall.Getenv(envVar); found { + if err := f.Value.UnmarshalText([]byte(value)); err != nil { + return fmt.Errorf("could not parse %q from environment variable %q for flag %s: %s", value, envVar, f.Name, err) + } + f.HasBeenSet = true + break + } + } eachName(f, func(name string) { set.Var(textMarshalerVal{f.Value}, f.Name, f.Usage) }) @@ -249,6 +269,16 @@ func (f *BigFlag) IsSet() bool { return f.HasBeenSet } func (f *BigFlag) String() string { return cli.FlagStringer(f) } func (f *BigFlag) Apply(set *flag.FlagSet) error { + for _, envVar := range f.EnvVars { + envVar = strings.TrimSpace(envVar) + if value, found := syscall.Getenv(envVar); found { + if _, ok := f.Value.SetString(value, 10); !ok { + return fmt.Errorf("could not parse %q from environment variable %q for flag %s", value, envVar, f.Name) + } + f.HasBeenSet = true + break + } + } eachName(f, func(name string) { f.Value = new(big.Int) set.Var((*bigValue)(f.Value), f.Name, f.Usage) From 909dd4a109c35ef7a12ecbed7168efc6c97d0a83 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 14 Sep 2023 12:28:40 +0200 Subject: [PATCH 12/98] rlp/rlpgen: remove build tag (#28106) * rlp/rlpgen: remove build tag This tag was supposed to prevent unstable output when types reference each other. Imagine there are two struct types A and B, where a reference to type B is in A. If I run rlpgen on type B first, and then on type A, the generator will see the B.EncodeRLP method and call it. However, if I run rlpgen on type A first, it will inline the encoding of B. The solution I chose for the initial release of rlpgen was to just ignore methods generated by rlpgen using a build tag. But there is a problem with this: if any code in the package calls EncodeRLP explicitly, the package can't be loaded without errors anymore in rlpgen, because the loader ignores it. Would be nice if there was a way to just make it ignore invalid functions during type checking (they're not necessary for rlpgen), but golang.org/x/tools/go/packages does not provide a way of ignoring them. Luckily, the types we use rlpgen with do not reference each other right now, so we can just remove the build tags for now. --- core/types/gen_account_rlp.go | 3 --- core/types/gen_header_rlp.go | 3 --- core/types/gen_log_rlp.go | 3 --- core/types/gen_withdrawal_rlp.go | 3 --- rlp/rlpgen/main.go | 7 ++----- 5 files changed, 2 insertions(+), 17 deletions(-) diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go index 5181d88411..3fb36f4038 100644 --- a/core/types/gen_account_rlp.go +++ b/core/types/gen_account_rlp.go @@ -1,8 +1,5 @@ // Code generated by rlpgen. DO NOT EDIT. -//go:build !norlpgen -// +build !norlpgen - package types import "github.com/ethereum/go-ethereum/rlp" diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go index b91a255a55..ed6a1a002c 100644 --- a/core/types/gen_header_rlp.go +++ b/core/types/gen_header_rlp.go @@ -1,8 +1,5 @@ // Code generated by rlpgen. DO NOT EDIT. -//go:build !norlpgen -// +build !norlpgen - package types import "github.com/ethereum/go-ethereum/rlp" diff --git a/core/types/gen_log_rlp.go b/core/types/gen_log_rlp.go index cbdb6736e2..7e89629668 100644 --- a/core/types/gen_log_rlp.go +++ b/core/types/gen_log_rlp.go @@ -1,8 +1,5 @@ // Code generated by rlpgen. DO NOT EDIT. -//go:build !norlpgen -// +build !norlpgen - package types import "github.com/ethereum/go-ethereum/rlp" diff --git a/core/types/gen_withdrawal_rlp.go b/core/types/gen_withdrawal_rlp.go index d0b4e0147a..6a97c04c81 100644 --- a/core/types/gen_withdrawal_rlp.go +++ b/core/types/gen_withdrawal_rlp.go @@ -1,8 +1,5 @@ // Code generated by rlpgen. DO NOT EDIT. -//go:build !norlpgen -// +build !norlpgen - package types import "github.com/ethereum/go-ethereum/rlp" diff --git a/rlp/rlpgen/main.go b/rlp/rlpgen/main.go index 25d4393cc6..b3a74b9df1 100644 --- a/rlp/rlpgen/main.go +++ b/rlp/rlpgen/main.go @@ -73,9 +73,8 @@ type Config struct { func (cfg *Config) process() (code []byte, err error) { // Load packages. pcfg := &packages.Config{ - Mode: packages.NeedName | packages.NeedTypes | packages.NeedImports | packages.NeedDeps, - Dir: cfg.Dir, - BuildFlags: []string{"-tags", "norlpgen"}, + Mode: packages.NeedName | packages.NeedTypes, + Dir: cfg.Dir, } ps, err := packages.Load(pcfg, pathOfPackageRLP, ".") if err != nil { @@ -117,8 +116,6 @@ func (cfg *Config) process() (code []byte, err error) { // This is done here to avoid processing these lines with gofmt. var header bytes.Buffer fmt.Fprint(&header, "// Code generated by rlpgen. DO NOT EDIT.\n\n") - fmt.Fprint(&header, "//go:build !norlpgen\n") - fmt.Fprint(&header, "// +build !norlpgen\n\n") return append(header.Bytes(), code...), nil } From 65a17c00c7a903f314db2e53381bed19716d50db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 14 Sep 2023 13:56:06 +0300 Subject: [PATCH 13/98] metrics: add support for enabling metrics from env vars (#28118) --- metrics/metrics.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/metrics/metrics.go b/metrics/metrics.go index 97f03fa31d..9ca8f115c0 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -9,7 +9,9 @@ import ( "os" "runtime/metrics" "runtime/pprof" + "strconv" "strings" + "syscall" "time" "github.com/ethereum/go-ethereum/log" @@ -30,13 +32,35 @@ var EnabledExpensive = false // enablerFlags is the CLI flag names to use to enable metrics collections. var enablerFlags = []string{"metrics"} +// enablerEnvVars is the env var names to use to enable metrics collections. +var enablerEnvVars = []string{"GETH_METRICS"} + // expensiveEnablerFlags is the CLI flag names to use to enable metrics collections. var expensiveEnablerFlags = []string{"metrics.expensive"} +// expensiveEnablerEnvVars is the env var names to use to enable metrics collections. +var expensiveEnablerEnvVars = []string{"GETH_METRICS_EXPENSIVE"} + // Init enables or disables the metrics system. Since we need this to run before // any other code gets to create meters and timers, we'll actually do an ugly hack // and peek into the command line args for the metrics flag. func init() { + for _, enabler := range enablerEnvVars { + if val, found := syscall.Getenv(enabler); found && !Enabled { + if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later + log.Info("Enabling metrics collection") + Enabled = true + } + } + } + for _, enabler := range expensiveEnablerEnvVars { + if val, found := syscall.Getenv(enabler); found && !EnabledExpensive { + if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later + log.Info("Enabling expensive metrics collection") + EnabledExpensive = true + } + } + } for _, arg := range os.Args { flag := strings.TrimLeft(arg, "-") From 48fdb79de5a3dc02be1c44479247515275b8b76a Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Thu, 14 Sep 2023 23:09:07 -0700 Subject: [PATCH 14/98] core/state: check err for iter.Error in fastDeleteStorage (#28122) core/state: check err for iter.Error --- core/state/statedb.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index c1b5b0874c..a59de16a70 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -973,7 +973,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo return true, size, nil, nil, nil } slot := common.CopyBytes(iter.Slot()) - if iter.Error() != nil { // error might occur after Slot function + if err := iter.Error(); err != nil { // error might occur after Slot function return false, 0, nil, nil, err } size += common.StorageSize(common.HashLength + len(slot)) @@ -983,7 +983,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo return false, 0, nil, nil, err } } - if iter.Error() != nil { // error might occur during iteration + if err := iter.Error(); err != nil { // error might occur during iteration return false, 0, nil, nil, err } if stack.Hash() != root { From 4fa3db49a1e485b8d110c87de6a44f460b45bb9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 15 Sep 2023 15:06:25 +0300 Subject: [PATCH 15/98] eth/downloader: prevent pivot moves after state commit (#28126) --- eth/downloader/downloader.go | 37 +++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 9a805396c4..33c0a2bcad 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1606,17 +1606,30 @@ func (d *Downloader) processSnapSyncContent() error { // To cater for moving pivot points, track the pivot block and subsequently // accumulated download results separately. + // + // These will be nil up to the point where we reach the pivot, and will only + // be set temporarily if the synced blocks are piling up, but the pivot is + // still busy downloading. In that case, we need to occasionally check for + // pivot moves, so need to unblock the loop. These fields will accumulate + // the results in the meantime. + // + // Note, there's no issue with memory piling up since after 64 blocks the + // pivot will forcefully move so these accumulators will be dropped. var ( oldPivot *fetchResult // Locked in pivot block, might change eventually oldTail []*fetchResult // Downloaded content after the pivot ) for { - // Wait for the next batch of downloaded data to be available, and if the pivot - // block became stale, move the goalpost - results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness + // Wait for the next batch of downloaded data to be available. If we have + // not yet reached the pivot point, wait blockingly as there's no need to + // spin-loop check for pivot moves. If we reached the pivot but have not + // yet processed it, check for results async, so we might notice pivot + // moves while state syncing. If the pivot was passed fully, block again + // as there's no more reason to check for pivot moves at all. + results := d.queue.Results(oldPivot == nil) if len(results) == 0 { // If pivot sync is done, stop - if oldPivot == nil { + if d.committed.Load() { d.reportSnapSyncProgress(true) return sync.Cancel() } @@ -1639,21 +1652,23 @@ func (d *Downloader) processSnapSyncContent() error { pivot := d.pivotHeader d.pivotLock.RUnlock() - if oldPivot == nil { - if pivot.Root != sync.root { - sync.Cancel() - sync = d.syncState(pivot.Root) + if oldPivot == nil { // no results piling up, we can move the pivot + if !d.committed.Load() { // not yet passed the pivot, we can move the pivot + if pivot.Root != sync.root { // pivot position changed, we can move the pivot + sync.Cancel() + sync = d.syncState(pivot.Root) - go closeOnErr(sync) + go closeOnErr(sync) + } } - } else { + } else { // results already piled up, consume before handling pivot move results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) } // Split around the pivot block and process the two sides via snap/full sync if !d.committed.Load() { latest := results[len(results)-1].Header // If the height is above the pivot block by 2 sets, it means the pivot - // become stale in the network and it was garbage collected, move to a + // become stale in the network, and it was garbage collected, move to a // new pivot. // // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those From 16cd1a7561155a264b1a1a2a5850b11c47dc18d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 15 Sep 2023 15:52:53 +0300 Subject: [PATCH 16/98] cmd/geth, internal/flags: print envvar config source and bad names (#28119) --- cmd/geth/main.go | 6 +++++- internal/flags/helpers.go | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a1d148d805..f6fa47ad2e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -249,7 +249,11 @@ func init() { app.Before = func(ctx *cli.Context) error { maxprocs.Set() // Automatically set GOMAXPROCS to match Linux container CPU quota. flags.MigrateGlobalFlags(ctx) - return debug.Setup(ctx) + if err := debug.Setup(ctx); err != nil { + return err + } + flags.CheckEnvVars(ctx, app.Flags, "GETH") + return nil } app.After = func(ctx *cli.Context) error { debug.Exit() diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go index bdeb7e38e0..b97f96d59e 100644 --- a/internal/flags/helpers.go +++ b/internal/flags/helpers.go @@ -20,9 +20,11 @@ import ( "fmt" "os" "regexp" + "sort" "strings" "github.com/ethereum/go-ethereum/internal/version" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/mattn/go-isatty" "github.com/urfave/cli/v2" @@ -263,3 +265,37 @@ func AutoEnvVars(flags []cli.Flag, prefix string) { } } } + +// CheckEnvVars iterates over all the environment variables and checks if any of +// them look like a CLI flag but is not consumed. This can be used to detect old +// or mistyped names. +func CheckEnvVars(ctx *cli.Context, flags []cli.Flag, prefix string) { + known := make(map[string]string) + for _, flag := range flags { + docflag, ok := flag.(cli.DocGenerationFlag) + if !ok { + continue + } + for _, envvar := range docflag.GetEnvVars() { + known[envvar] = flag.Names()[0] + } + } + keyvals := os.Environ() + sort.Strings(keyvals) + + for _, keyval := range keyvals { + key := strings.Split(keyval, "=")[0] + if !strings.HasPrefix(key, prefix) { + continue + } + if flag, ok := known[key]; ok { + if ctx.Count(flag) > 0 { + log.Info("Config environment variable found", "envvar", key, "shadowedby", "--"+flag) + } else { + log.Info("Config environment variable found", "envvar", key) + } + } else { + log.Warn("Unknown config environment variable", "envvar", key) + } + } +} From 9a9db3d2650513f4645217705e1f9eeea1e0393f Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Sun, 17 Sep 2023 21:50:18 +0800 Subject: [PATCH 17/98] eth/catalyst: fix engine API (#28135) --- eth/catalyst/api.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 9690d43307..08cce0558b 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -207,21 +207,21 @@ func (api *ConsensusAPI) verifyPayloadAttributes(attr *engine.PayloadAttributes) c := api.eth.BlockChain().Config() // Verify withdrawals attribute for Shanghai. - if err := checkAttribute(c.IsShanghai, attr.Withdrawals != nil, attr.Timestamp); err != nil { + if err := checkAttribute(c.IsShanghai, attr.Withdrawals != nil, c.LondonBlock, attr.Timestamp); err != nil { return fmt.Errorf("invalid withdrawals: %w", err) } // Verify beacon root attribute for Cancun. - if err := checkAttribute(c.IsCancun, attr.BeaconRoot != nil, attr.Timestamp); err != nil { + if err := checkAttribute(c.IsCancun, attr.BeaconRoot != nil, c.LondonBlock, attr.Timestamp); err != nil { return fmt.Errorf("invalid parent beacon block root: %w", err) } return nil } -func checkAttribute(active func(*big.Int, uint64) bool, exists bool, time uint64) error { - if active(common.Big0, time) && !exists { +func checkAttribute(active func(*big.Int, uint64) bool, exists bool, block *big.Int, time uint64) error { + if active(block, time) && !exists { return errors.New("fork active, missing expected attribute") } - if !active(common.Big0, time) && exists { + if !active(block, time) && exists { return errors.New("fork inactive, unexpected attribute set") } return nil From 217719347dd2ac76e44cff677d00bf63e8edefcd Mon Sep 17 00:00:00 2001 From: cam-schultz <78878559+cam-schultz@users.noreply.github.com> Date: Sun, 17 Sep 2023 09:00:04 -0500 Subject: [PATCH 18/98] internal/ethapi: correctly calculate effective gas price (#28130) correctly calculate effective gas price --- internal/ethapi/api.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index c37e716ded..733e671e0a 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1520,8 +1520,8 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber func effectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int { fee := tx.GasTipCap() fee = fee.Add(fee, baseFee) - if tx.GasTipCapIntCmp(fee) < 0 { - return tx.GasTipCap() + if tx.GasFeeCapIntCmp(fee) < 0 { + return tx.GasFeeCap() } return fee } From 52234eb17299dbccb108f74cf9ac94cc44bc6d6a Mon Sep 17 00:00:00 2001 From: phenix3443 Date: Sun, 17 Sep 2023 22:02:11 +0800 Subject: [PATCH 19/98] internal/flags: fix typo (#28133) fix(flag): one typo --- internal/flags/helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go index b97f96d59e..d4b8e373cc 100644 --- a/internal/flags/helpers.go +++ b/internal/flags/helpers.go @@ -225,7 +225,7 @@ func wordWrap(s string, width int) string { return output.String() } -// AutoEnvVars extens all the specific CLI flags with automatically generated +// AutoEnvVars extends all the specific CLI flags with automatically generated // env vars by capitalizing the flag, replacing . with _ and prefixing it with // the specified string. // From d8a351b58f147fc8e1527695ff7a3d19e6f3420b Mon Sep 17 00:00:00 2001 From: phenix3443 Date: Sun, 17 Sep 2023 22:02:48 +0800 Subject: [PATCH 20/98] params: fix typo in comment (#28129) fix: typo --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index 5ce091fc40..f503862422 100644 --- a/params/config.go +++ b/params/config.go @@ -358,7 +358,7 @@ func (c *CliqueConfig) String() string { func (c *ChainConfig) Description() string { var banner string - // Create some basinc network config output + // Create some basic network config output network := NetworkNames[c.ChainID.String()] if network == "" { network = "unknown" From c53b0fef2ab8e2a00257b898cad5174e6b73f5fc Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Sun, 17 Sep 2023 22:35:09 +0800 Subject: [PATCH 21/98] core, eth/downloader: fix genesis state missing due to state sync (#28124) * core: fix chain repair corner case in path-based scheme * eth/downloader: disable trie database whenever state sync is launched --- core/blockchain.go | 97 ++++++++++++++++++++---------------- eth/downloader/downloader.go | 9 +++- 2 files changed, 63 insertions(+), 43 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 3e9440fed7..e371e8d926 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -340,28 +340,38 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis // Make sure the state associated with the block is available head := bc.CurrentBlock() if !bc.HasState(head.Root) { - // Head state is missing, before the state recovery, find out the - // disk layer point of snapshot(if it's enabled). Make sure the - // rewound point is lower than disk layer. - var diskRoot common.Hash - if bc.cacheConfig.SnapshotLimit > 0 { - diskRoot = rawdb.ReadSnapshotRoot(bc.db) - } - if diskRoot != (common.Hash{}) { - log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot) - - snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true) - if err != nil { - return nil, err - } - // Chain rewound, persist old snapshot number to indicate recovery procedure - if snapDisk != 0 { - rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) - } + if head.Number.Uint64() == 0 { + // The genesis state is missing, which is only possible in the path-based + // scheme. This situation occurs when the state syncer overwrites it. + // + // The solution is to reset the state to the genesis state. Although it may not + // match the sync target, the state healer will later address and correct any + // inconsistencies. + bc.resetState() } else { - log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash()) - if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil { - return nil, err + // Head state is missing, before the state recovery, find out the + // disk layer point of snapshot(if it's enabled). Make sure the + // rewound point is lower than disk layer. + var diskRoot common.Hash + if bc.cacheConfig.SnapshotLimit > 0 { + diskRoot = rawdb.ReadSnapshotRoot(bc.db) + } + if diskRoot != (common.Hash{}) { + log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot) + + snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true) + if err != nil { + return nil, err + } + // Chain rewound, persist old snapshot number to indicate recovery procedure + if snapDisk != 0 { + rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) + } + } else { + log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash()) + if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil { + return nil, err + } } } } @@ -620,6 +630,28 @@ func (bc *BlockChain) SetSafe(header *types.Header) { } } +// resetState resets the persistent state to genesis state if it's not present. +func (bc *BlockChain) resetState() { + // Short circuit if the genesis state is already present. + root := bc.genesisBlock.Root() + if bc.HasState(root) { + return + } + // Reset the state database to empty for committing genesis state. + // Note, it should only happen in path-based scheme and Reset function + // is also only call-able in this mode. + if bc.triedb.Scheme() == rawdb.PathScheme { + if err := bc.triedb.Reset(types.EmptyRootHash); err != nil { + log.Crit("Failed to clean state", "err", err) // Shouldn't happen + } + } + // Write genesis state into database. + if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { + log.Crit("Failed to commit genesis state", "err", err) + } + log.Info("Reset state to genesis", "root", root) +} + // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition // that the rewind must pass the specified state root. This method is meant to be // used when rewinding with snapshots enabled to ensure that we go back further than @@ -646,25 +678,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha pivot := rawdb.ReadLastPivotNumber(bc.db) frozen, _ := bc.db.Ancients() - // resetState resets the persistent state to genesis if it's not available. - resetState := func() { - // Short circuit if the genesis state is already present. - if bc.HasState(bc.genesisBlock.Root()) { - return - } - // Reset the state database to empty for committing genesis state. - // Note, it should only happen in path-based scheme and Reset function - // is also only call-able in this mode. - if bc.triedb.Scheme() == rawdb.PathScheme { - if err := bc.triedb.Reset(types.EmptyRootHash); err != nil { - log.Crit("Failed to clean state", "err", err) // Shouldn't happen - } - } - // Write genesis state into database. - if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { - log.Crit("Failed to commit genesis state", "err", err) - } - } updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) { // Rewind the blockchain, ensuring we don't end up with a stateless head // block. Note, depth equality is permitted to allow using SetHead as a @@ -674,7 +687,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha if newHeadBlock == nil { log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock - resetState() + bc.resetState() } else { // Block exists, keep rewinding until we find one with state, // keeping rewinding until we exceed the optional threshold @@ -703,7 +716,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha } if beyondRoot || newHeadBlock.NumberU64() == 0 { if newHeadBlock.NumberU64() == 0 { - resetState() + bc.resetState() } else if !bc.HasState(newHeadBlock.Root()) { // Rewind to a block with recoverable state. If the state is // missing, run the state recovery here. diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 33c0a2bcad..1e4f35ccd1 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -398,7 +398,14 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, log.Info("Block synchronisation started") } if mode == SnapSync { - // Snap sync uses the snapshot namespace to store potentially flakey data until + // Snap sync will directly modify the persistent state, making the entire + // trie database unusable until the state is fully synced. To prevent any + // subsequent state reads, explicitly disable the trie database and state + // syncer is responsible to address and correct any state missing. + if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme { + d.blockchain.TrieDB().Reset(types.EmptyRootHash) + } + // Snap sync uses the snapshot namespace to store potentially flaky data until // sync completely heals and finishes. Pause snapshot maintenance in the mean- // time to prevent access. if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests From 3f40e65c484486dea6cff80b7db178985d21a2c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Sun, 17 Sep 2023 17:54:33 +0300 Subject: [PATCH 22/98] params: release Geth v1.13.1 --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index c18c650790..d75165f3c9 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 13 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 13 // Minor version component of the current release + VersionPatch = 1 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 90d5bd85bcf2919ac2735a47fde675213348a0a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Sun, 17 Sep 2023 17:55:46 +0300 Subject: [PATCH 23/98] params: begin Geth v1.13.2 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index d75165f3c9..56d5a99a80 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 13 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 13 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From e9f78db79d39cf0382208bf904ac03ccdb860c86 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 19 Sep 2023 13:41:16 +0200 Subject: [PATCH 24/98] cmd/evm: fix some issues with the evm run command (#28109) * cmd/evm: improve flags handling This fixes some issues with flags in cmd/evm. The supported flags did not actually show up in help output because they weren't categorized. I'm also adding the VM-related flags to the run command here so they can be given after the subcommand name. So it can be run like this now: ./evm run --code 6001 --debug * cmd/evm: enable all forks by default in run command The default genesis was just empty with no forks at all, which is annoying because contracts will be relying on opcodes introduced in a fork. So this changes the default to have all forks enabled. * core/asm: fix some issues in the assembler This fixes minor bugs in the old assembler: - It is now possible to have comments on the same line as an instruction. - Errors for invalid numbers in the jump instruction are reported better - Line numbers in errors were off by one --- cmd/evm/blockrunner.go | 6 +- cmd/evm/main.go | 185 +++++++++++++++++++---------------- cmd/evm/runner.go | 96 ++++++------------ cmd/evm/staterunner.go | 6 -- core/asm/compiler.go | 183 ++++++++++++++++++---------------- core/asm/compiler_test.go | 8 ++ core/asm/lex_test.go | 10 ++ core/asm/lexer.go | 40 +++----- core/asm/tokentype_string.go | 31 ++++++ 9 files changed, 295 insertions(+), 270 deletions(-) create mode 100644 core/asm/tokentype_string.go diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go index 0be5f69711..6612680dc4 100644 --- a/cmd/evm/blockrunner.go +++ b/cmd/evm/blockrunner.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" "github.com/urfave/cli/v2" ) @@ -41,10 +40,7 @@ func blockTestCmd(ctx *cli.Context) error { if len(ctx.Args().First()) == 0 { return errors.New("path-to-test argument required") } - // Configure the go-ethereum logger - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) + var tracer vm.EVMLogger // Configure the EVM logger if ctx.Bool(MachineFlag.Name) { diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 024be62b9c..1f6500b78c 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -23,107 +23,116 @@ import ( "os" "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" + "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/flags" "github.com/urfave/cli/v2" ) var ( DebugFlag = &cli.BoolFlag{ - Name: "debug", - Usage: "output full trace logs", - } - MemProfileFlag = &cli.StringFlag{ - Name: "memprofile", - Usage: "creates a memory profile at the given path", - } - CPUProfileFlag = &cli.StringFlag{ - Name: "cpuprofile", - Usage: "creates a CPU profile at the given path", + Name: "debug", + Usage: "output full trace logs", + Category: flags.VMCategory, } StatDumpFlag = &cli.BoolFlag{ - Name: "statdump", - Usage: "displays stack and heap memory information", + Name: "statdump", + Usage: "displays stack and heap memory information", + Category: flags.VMCategory, } CodeFlag = &cli.StringFlag{ - Name: "code", - Usage: "EVM code", + Name: "code", + Usage: "EVM code", + Category: flags.VMCategory, } CodeFileFlag = &cli.StringFlag{ - Name: "codefile", - Usage: "File containing EVM code. If '-' is specified, code is read from stdin ", + Name: "codefile", + Usage: "File containing EVM code. If '-' is specified, code is read from stdin ", + Category: flags.VMCategory, } GasFlag = &cli.Uint64Flag{ - Name: "gas", - Usage: "gas limit for the evm", - Value: 10000000000, + Name: "gas", + Usage: "gas limit for the evm", + Value: 10000000000, + Category: flags.VMCategory, } PriceFlag = &flags.BigFlag{ - Name: "price", - Usage: "price set for the evm", - Value: new(big.Int), + Name: "price", + Usage: "price set for the evm", + Value: new(big.Int), + Category: flags.VMCategory, } ValueFlag = &flags.BigFlag{ - Name: "value", - Usage: "value set for the evm", - Value: new(big.Int), + Name: "value", + Usage: "value set for the evm", + Value: new(big.Int), + Category: flags.VMCategory, } DumpFlag = &cli.BoolFlag{ - Name: "dump", - Usage: "dumps the state after the run", + Name: "dump", + Usage: "dumps the state after the run", + Category: flags.VMCategory, } InputFlag = &cli.StringFlag{ - Name: "input", - Usage: "input for the EVM", + Name: "input", + Usage: "input for the EVM", + Category: flags.VMCategory, } InputFileFlag = &cli.StringFlag{ - Name: "inputfile", - Usage: "file containing input for the EVM", - } - VerbosityFlag = &cli.IntFlag{ - Name: "verbosity", - Usage: "sets the verbosity level", + Name: "inputfile", + Usage: "file containing input for the EVM", + Category: flags.VMCategory, } BenchFlag = &cli.BoolFlag{ - Name: "bench", - Usage: "benchmark the execution", + Name: "bench", + Usage: "benchmark the execution", + Category: flags.VMCategory, } CreateFlag = &cli.BoolFlag{ - Name: "create", - Usage: "indicates the action should be create rather than call", + Name: "create", + Usage: "indicates the action should be create rather than call", + Category: flags.VMCategory, } GenesisFlag = &cli.StringFlag{ - Name: "prestate", - Usage: "JSON file with prestate (genesis) config", + Name: "prestate", + Usage: "JSON file with prestate (genesis) config", + Category: flags.VMCategory, } MachineFlag = &cli.BoolFlag{ - Name: "json", - Usage: "output trace logs in machine readable format (json)", + Name: "json", + Usage: "output trace logs in machine readable format (json)", + Category: flags.VMCategory, } SenderFlag = &cli.StringFlag{ - Name: "sender", - Usage: "The transaction origin", + Name: "sender", + Usage: "The transaction origin", + Category: flags.VMCategory, } ReceiverFlag = &cli.StringFlag{ - Name: "receiver", - Usage: "The transaction receiver (execution context)", + Name: "receiver", + Usage: "The transaction receiver (execution context)", + Category: flags.VMCategory, } DisableMemoryFlag = &cli.BoolFlag{ - Name: "nomemory", - Value: true, - Usage: "disable memory output", + Name: "nomemory", + Value: true, + Usage: "disable memory output", + Category: flags.VMCategory, } DisableStackFlag = &cli.BoolFlag{ - Name: "nostack", - Usage: "disable stack output", + Name: "nostack", + Usage: "disable stack output", + Category: flags.VMCategory, } DisableStorageFlag = &cli.BoolFlag{ - Name: "nostorage", - Usage: "disable storage output", + Name: "nostorage", + Usage: "disable storage output", + Category: flags.VMCategory, } DisableReturnDataFlag = &cli.BoolFlag{ - Name: "noreturndata", - Value: true, - Usage: "enable return data output", + Name: "noreturndata", + Value: true, + Usage: "enable return data output", + Category: flags.VMCategory, } ) @@ -183,34 +192,38 @@ var blockBuilderCommand = &cli.Command{ }, } +// vmFlags contains flags related to running the EVM. +var vmFlags = []cli.Flag{ + CodeFlag, + CodeFileFlag, + CreateFlag, + GasFlag, + PriceFlag, + ValueFlag, + InputFlag, + InputFileFlag, + GenesisFlag, + SenderFlag, + ReceiverFlag, +} + +// traceFlags contains flags that configure tracing output. +var traceFlags = []cli.Flag{ + BenchFlag, + DebugFlag, + DumpFlag, + MachineFlag, + StatDumpFlag, + DisableMemoryFlag, + DisableStackFlag, + DisableStorageFlag, + DisableReturnDataFlag, +} + var app = flags.NewApp("the evm command line interface") func init() { - app.Flags = []cli.Flag{ - BenchFlag, - CreateFlag, - DebugFlag, - VerbosityFlag, - CodeFlag, - CodeFileFlag, - GasFlag, - PriceFlag, - ValueFlag, - DumpFlag, - InputFlag, - InputFileFlag, - MemProfileFlag, - CPUProfileFlag, - StatDumpFlag, - GenesisFlag, - MachineFlag, - SenderFlag, - ReceiverFlag, - DisableMemoryFlag, - DisableStackFlag, - DisableStorageFlag, - DisableReturnDataFlag, - } + app.Flags = flags.Merge(vmFlags, traceFlags, debug.Flags) app.Commands = []*cli.Command{ compileCommand, disasmCommand, @@ -221,6 +234,14 @@ func init() { transactionCommand, blockBuilderCommand, } + app.Before = func(ctx *cli.Context) error { + flags.MigrateGlobalFlags(ctx) + return debug.Setup(ctx) + } + app.After = func(ctx *cli.Context) error { + debug.Exit() + return nil + } } func main() { diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index ac8432badb..017388efb5 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -24,7 +24,6 @@ import ( "math/big" "os" goruntime "runtime" - "runtime/pprof" "testing" "time" @@ -34,12 +33,10 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/internal/flags" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" @@ -52,6 +49,7 @@ var runCommand = &cli.Command{ Usage: "run arbitrary evm binary", ArgsUsage: "", Description: `The run command runs arbitrary EVM code.`, + Flags: flags.Merge(vmFlags, traceFlags), } // readGenesis will read the given JSON format genesis file and return @@ -109,9 +107,6 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []by } func runCmd(ctx *cli.Context) error { - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) logconfig := &logger.Config{ EnableMemory: !ctx.Bool(DisableMemoryFlag.Name), DisableStack: ctx.Bool(DisableStackFlag.Name), @@ -121,15 +116,14 @@ func runCmd(ctx *cli.Context) error { } var ( - tracer vm.EVMLogger - debugLogger *logger.StructLogger - statedb *state.StateDB - chainConfig *params.ChainConfig - sender = common.BytesToAddress([]byte("sender")) - receiver = common.BytesToAddress([]byte("receiver")) - genesisConfig *core.Genesis - preimages = ctx.Bool(DumpFlag.Name) - blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests + tracer vm.EVMLogger + debugLogger *logger.StructLogger + statedb *state.StateDB + chainConfig *params.ChainConfig + sender = common.BytesToAddress([]byte("sender")) + receiver = common.BytesToAddress([]byte("receiver")) + preimages = ctx.Bool(DumpFlag.Name) + blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests ) if ctx.Bool(MachineFlag.Name) { tracer = logger.NewJSONLogger(logconfig, os.Stdout) @@ -139,30 +133,30 @@ func runCmd(ctx *cli.Context) error { } else { debugLogger = logger.NewStructLogger(logconfig) } + + initialGas := ctx.Uint64(GasFlag.Name) + genesisConfig := new(core.Genesis) + genesisConfig.GasLimit = initialGas if ctx.String(GenesisFlag.Name) != "" { - gen := readGenesis(ctx.String(GenesisFlag.Name)) - genesisConfig = gen - db := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(db, &trie.Config{ - Preimages: preimages, - HashDB: hashdb.Defaults, - }) - defer triedb.Close() - genesis := gen.MustCommit(db, triedb) - sdb := state.NewDatabaseWithNodeDB(db, triedb) - statedb, _ = state.New(genesis.Root(), sdb, nil) - chainConfig = gen.Config + genesisConfig = readGenesis(ctx.String(GenesisFlag.Name)) + if genesisConfig.GasLimit != 0 { + initialGas = genesisConfig.GasLimit + } } else { - db := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(db, &trie.Config{ - Preimages: preimages, - HashDB: hashdb.Defaults, - }) - defer triedb.Close() - sdb := state.NewDatabaseWithNodeDB(db, triedb) - statedb, _ = state.New(types.EmptyRootHash, sdb, nil) - genesisConfig = new(core.Genesis) + genesisConfig.Config = params.AllEthashProtocolChanges } + + db := rawdb.NewMemoryDatabase() + triedb := trie.NewDatabase(db, &trie.Config{ + Preimages: preimages, + HashDB: hashdb.Defaults, + }) + defer triedb.Close() + genesis := genesisConfig.MustCommit(db, triedb) + sdb := state.NewDatabaseWithNodeDB(db, triedb) + statedb, _ = state.New(genesis.Root(), sdb, nil) + chainConfig = genesisConfig.Config + if ctx.String(SenderFlag.Name) != "" { sender = common.HexToAddress(ctx.String(SenderFlag.Name)) } @@ -216,10 +210,6 @@ func runCmd(ctx *cli.Context) error { } code = common.Hex2Bytes(bin) } - initialGas := ctx.Uint64(GasFlag.Name) - if genesisConfig.GasLimit != 0 { - initialGas = genesisConfig.GasLimit - } runtimeConfig := runtime.Config{ Origin: sender, State: statedb, @@ -236,19 +226,6 @@ func runCmd(ctx *cli.Context) error { }, } - if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" { - f, err := os.Create(cpuProfilePath) - if err != nil { - fmt.Println("could not create CPU profile: ", err) - os.Exit(1) - } - if err := pprof.StartCPUProfile(f); err != nil { - fmt.Println("could not start CPU profile: ", err) - os.Exit(1) - } - defer pprof.StopCPUProfile() - } - if chainConfig != nil { runtimeConfig.ChainConfig = chainConfig } else { @@ -296,19 +273,6 @@ func runCmd(ctx *cli.Context) error { fmt.Println(string(statedb.Dump(nil))) } - if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" { - f, err := os.Create(memProfilePath) - if err != nil { - fmt.Println("could not create memory profile: ", err) - os.Exit(1) - } - if err := pprof.WriteHeapProfile(f); err != nil { - fmt.Println("could not write memory profile: ", err) - os.Exit(1) - } - f.Close() - } - if ctx.Bool(DebugFlag.Name) { if debugLogger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 85931f0406..8a07fccdf8 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" "github.com/urfave/cli/v2" ) @@ -52,11 +51,6 @@ type StatetestResult struct { } func stateTestCmd(ctx *cli.Context) error { - // Configure the go-ethereum logger - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) - // Configure the EVM logger config := &logger.Config{ EnableMemory: !ctx.Bool(DisableMemoryFlag.Name), diff --git a/core/asm/compiler.go b/core/asm/compiler.go index 4b1d379206..75bf726c96 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -17,6 +17,8 @@ package asm import ( + "encoding/hex" + "errors" "fmt" "math/big" "os" @@ -30,7 +32,7 @@ import ( // and holds the tokens for the program. type Compiler struct { tokens []token - binary []interface{} + out []byte labels map[string]int @@ -50,12 +52,10 @@ func NewCompiler(debug bool) *Compiler { // Feed feeds tokens in to ch and are interpreted by // the compiler. // -// feed is the first pass in the compile stage as it -// collects the used labels in the program and keeps a -// program counter which is used to determine the locations -// of the jump dests. The labels can than be used in the -// second stage to push labels and determine the right -// position. +// feed is the first pass in the compile stage as it collects the used labels in the +// program and keeps a program counter which is used to determine the locations of the +// jump dests. The labels can than be used in the second stage to push labels and +// determine the right position. func (c *Compiler) Feed(ch <-chan token) { var prev token for i := range ch { @@ -79,7 +79,6 @@ func (c *Compiler) Feed(ch <-chan token) { c.pc++ } } - c.tokens = append(c.tokens, i) prev = i } @@ -88,12 +87,11 @@ func (c *Compiler) Feed(ch <-chan token) { } } -// Compile compiles the current tokens and returns a -// binary string that can be interpreted by the EVM -// and an error if it failed. +// Compile compiles the current tokens and returns a binary string that can be interpreted +// by the EVM and an error if it failed. // -// compile is the second stage in the compile phase -// which compiles the tokens to EVM instructions. +// compile is the second stage in the compile phase which compiles the tokens to EVM +// instructions. func (c *Compiler) Compile() (string, []error) { var errors []error // continue looping over the tokens until @@ -105,16 +103,8 @@ func (c *Compiler) Compile() (string, []error) { } // turn the binary to hex - var bin strings.Builder - for _, v := range c.binary { - switch v := v.(type) { - case vm.OpCode: - bin.WriteString(fmt.Sprintf("%x", []byte{byte(v)})) - case []byte: - bin.WriteString(fmt.Sprintf("%x", v)) - } - } - return bin.String(), errors + h := hex.EncodeToString(c.out) + return h, errors } // next returns the next token and increments the @@ -156,87 +146,114 @@ func (c *Compiler) compileLine() error { return nil } -// compileNumber compiles the number to bytes -func (c *Compiler) compileNumber(element token) { - num := math.MustParseBig256(element.text).Bytes() - if len(num) == 0 { - num = []byte{0} +// parseNumber compiles the number to bytes +func parseNumber(tok token) ([]byte, error) { + if tok.typ != number { + panic("parseNumber of non-number token") + } + num, ok := math.ParseBig256(tok.text) + if !ok { + return nil, errors.New("invalid number") } - c.pushBin(num) + bytes := num.Bytes() + if len(bytes) == 0 { + bytes = []byte{0} + } + return bytes, nil } // compileElement compiles the element (push & label or both) // to a binary representation and may error if incorrect statements // where fed. func (c *Compiler) compileElement(element token) error { - // check for a jump. jumps must be read and compiled - // from right to left. - if isJump(element.text) { - rvalue := c.next() - switch rvalue.typ { - case number: - // TODO figure out how to return the error properly - c.compileNumber(rvalue) - case stringValue: - // strings are quoted, remove them. - c.pushBin(rvalue.text[1 : len(rvalue.text)-2]) - case label: - c.pushBin(vm.PUSH4) - pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes() - pos = append(make([]byte, 4-len(pos)), pos...) - c.pushBin(pos) - case lineEnd: - c.pos-- - default: - return compileErr(rvalue, rvalue.text, "number, string or label") - } - // push the operation - c.pushBin(toBinary(element.text)) + switch { + case isJump(element.text): + return c.compileJump(element.text) + case isPush(element.text): + return c.compilePush() + default: + c.outputOpcode(toBinary(element.text)) return nil - } else if isPush(element.text) { - // handle pushes. pushes are read from left to right. - var value []byte + } +} - rvalue := c.next() - switch rvalue.typ { - case number: - value = math.MustParseBig256(rvalue.text).Bytes() - if len(value) == 0 { - value = []byte{0} - } - case stringValue: - value = []byte(rvalue.text[1 : len(rvalue.text)-1]) - case label: - value = big.NewInt(int64(c.labels[rvalue.text])).Bytes() - value = append(make([]byte, 4-len(value)), value...) - default: - return compileErr(rvalue, rvalue.text, "number, string or label") +func (c *Compiler) compileJump(jumpType string) error { + rvalue := c.next() + switch rvalue.typ { + case number: + numBytes, err := parseNumber(rvalue) + if err != nil { + return err } + c.outputBytes(numBytes) - if len(value) > 32 { - return fmt.Errorf("%d type error: unsupported string or number with size > 32", rvalue.lineno) - } + case stringValue: + // strings are quoted, remove them. + str := rvalue.text[1 : len(rvalue.text)-2] + c.outputBytes([]byte(str)) + + case label: + c.outputOpcode(vm.PUSH4) + pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes() + pos = append(make([]byte, 4-len(pos)), pos...) + c.outputBytes(pos) + + case lineEnd: + // push without argument is supported, it just takes the destination from the stack. + c.pos-- - c.pushBin(vm.OpCode(int(vm.PUSH1) - 1 + len(value))) - c.pushBin(value) - } else { - c.pushBin(toBinary(element.text)) + default: + return compileErr(rvalue, rvalue.text, "number, string or label") } + // push the operation + c.outputOpcode(toBinary(jumpType)) + return nil +} +func (c *Compiler) compilePush() error { + // handle pushes. pushes are read from left to right. + var value []byte + rvalue := c.next() + switch rvalue.typ { + case number: + value = math.MustParseBig256(rvalue.text).Bytes() + if len(value) == 0 { + value = []byte{0} + } + case stringValue: + value = []byte(rvalue.text[1 : len(rvalue.text)-1]) + case label: + value = big.NewInt(int64(c.labels[rvalue.text])).Bytes() + value = append(make([]byte, 4-len(value)), value...) + default: + return compileErr(rvalue, rvalue.text, "number, string or label") + } + if len(value) > 32 { + return fmt.Errorf("%d: string or number size > 32 bytes", rvalue.lineno+1) + } + c.outputOpcode(vm.OpCode(int(vm.PUSH1) - 1 + len(value))) + c.outputBytes(value) return nil } // compileLabel pushes a jumpdest to the binary slice. func (c *Compiler) compileLabel() { - c.pushBin(vm.JUMPDEST) + c.outputOpcode(vm.JUMPDEST) +} + +func (c *Compiler) outputOpcode(op vm.OpCode) { + if c.debug { + fmt.Printf("%d: %v\n", len(c.out), op) + } + c.out = append(c.out, byte(op)) } -// pushBin pushes the value v to the binary stack. -func (c *Compiler) pushBin(v interface{}) { +// output pushes the value v to the binary stack. +func (c *Compiler) outputBytes(b []byte) { if c.debug { - fmt.Printf("%d: %v\n", len(c.binary), v) + fmt.Printf("%d: %x\n", len(c.out), b) } - c.binary = append(c.binary, v) + c.out = append(c.out, b...) } // isPush returns whether the string op is either any of @@ -263,13 +280,13 @@ type compileError struct { } func (err compileError) Error() string { - return fmt.Sprintf("%d syntax error: unexpected %v, expected %v", err.lineno, err.got, err.want) + return fmt.Sprintf("%d: syntax error: unexpected %v, expected %v", err.lineno, err.got, err.want) } func compileErr(c token, got, want string) error { return compileError{ got: got, want: want, - lineno: c.lineno, + lineno: c.lineno + 1, } } diff --git a/core/asm/compiler_test.go b/core/asm/compiler_test.go index ce9df436bd..3d64c96bc8 100644 --- a/core/asm/compiler_test.go +++ b/core/asm/compiler_test.go @@ -54,6 +54,14 @@ func TestCompiler(t *testing.T) { `, output: "6300000006565b", }, + { + input: ` + JUMP @label +label: ;; comment + ADD ;; comment +`, + output: "6300000006565b01", + }, } for _, test := range tests { ch := Lex([]byte(test.input), false) diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go index 53e05fbbba..173031521f 100644 --- a/core/asm/lex_test.go +++ b/core/asm/lex_test.go @@ -72,6 +72,16 @@ func TestLexer(t *testing.T) { input: "@label123", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, + // comment after label + { + input: "@label123 ;; comment", + tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, + }, + // comment after instruction + { + input: "push 3 ;; comment\nadd", + tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}}, + }, } for _, test := range tests { diff --git a/core/asm/lexer.go b/core/asm/lexer.go index d1b79a1fb9..e025c6f363 100644 --- a/core/asm/lexer.go +++ b/core/asm/lexer.go @@ -42,6 +42,8 @@ type token struct { // is able to parse and return. type tokenType int +//go:generate go run golang.org/x/tools/cmd/stringer -type tokenType + const ( eof tokenType = iota // end of file lineStart // emitted when a line starts @@ -52,31 +54,13 @@ const ( labelDef // label definition is emitted when a new label is found number // number is emitted when a number is found stringValue // stringValue is emitted when a string has been found - - Numbers = "1234567890" // characters representing any decimal number - HexadecimalNumbers = Numbers + "aAbBcCdDeEfF" // characters representing any hexadecimal - Alpha = "abcdefghijklmnopqrstuwvxyzABCDEFGHIJKLMNOPQRSTUWVXYZ" // characters representing alphanumeric ) -// String implements stringer -func (it tokenType) String() string { - if int(it) > len(stringtokenTypes) { - return "invalid" - } - return stringtokenTypes[it] -} - -var stringtokenTypes = []string{ - eof: "EOF", - lineStart: "new line", - lineEnd: "end of line", - invalidStatement: "invalid statement", - element: "element", - label: "label", - labelDef: "label definition", - number: "number", - stringValue: "string", -} +const ( + decimalNumbers = "1234567890" // characters representing any decimal number + hexNumbers = decimalNumbers + "aAbBcCdDeEfF" // characters representing any hexadecimal + alpha = "abcdefghijklmnopqrstuwvxyzABCDEFGHIJKLMNOPQRSTUWVXYZ" // characters representing alphanumeric +) // lexer is the basic construct for parsing // source code and turning them in to tokens. @@ -200,7 +184,6 @@ func lexLine(l *lexer) stateFn { l.emit(lineEnd) l.ignore() l.lineno++ - l.emit(lineStart) case r == ';' && l.peek() == ';': return lexComment @@ -225,6 +208,7 @@ func lexLine(l *lexer) stateFn { // of the line and discards the text. func lexComment(l *lexer) stateFn { l.acceptRunUntil('\n') + l.backup() l.ignore() return lexLine @@ -234,7 +218,7 @@ func lexComment(l *lexer) stateFn { // the lex text state function to advance the parsing // process. func lexLabel(l *lexer) stateFn { - l.acceptRun(Alpha + "_" + Numbers) + l.acceptRun(alpha + "_" + decimalNumbers) l.emit(label) @@ -253,9 +237,9 @@ func lexInsideString(l *lexer) stateFn { } func lexNumber(l *lexer) stateFn { - acceptance := Numbers + acceptance := decimalNumbers if l.accept("xX") { - acceptance = HexadecimalNumbers + acceptance = hexNumbers } l.acceptRun(acceptance) @@ -265,7 +249,7 @@ func lexNumber(l *lexer) stateFn { } func lexElement(l *lexer) stateFn { - l.acceptRun(Alpha + "_" + Numbers) + l.acceptRun(alpha + "_" + decimalNumbers) if l.peek() == ':' { l.emit(labelDef) diff --git a/core/asm/tokentype_string.go b/core/asm/tokentype_string.go new file mode 100644 index 0000000000..ade76aa360 --- /dev/null +++ b/core/asm/tokentype_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type tokenType"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[eof-0] + _ = x[lineStart-1] + _ = x[lineEnd-2] + _ = x[invalidStatement-3] + _ = x[element-4] + _ = x[label-5] + _ = x[labelDef-6] + _ = x[number-7] + _ = x[stringValue-8] +} + +const _tokenType_name = "eoflineStartlineEndinvalidStatementelementlabellabelDefnumberstringValue" + +var _tokenType_index = [...]uint8{0, 3, 12, 19, 35, 42, 47, 55, 61, 72} + +func (i tokenType) String() string { + if i < 0 || i >= tokenType(len(_tokenType_index)-1) { + return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tokenType_name[_tokenType_index[i]:_tokenType_index[i+1]] +} From ef76afad3596913495f1fbb012a5d61ef871f60e Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 19 Sep 2023 19:43:37 +0800 Subject: [PATCH 25/98] core/rawdb: fix typo in comment (#28140) --- core/rawdb/database.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 7a78456382..3839e949ed 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -253,7 +253,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st break } } - // We are about to exit on error. Print database metdata beore exiting + // We are about to exit on error. Print database metadata before exiting printChainMetadata(db) return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ", frozen-1, number, head) From 4b748b7a27ad8bbf5ccfd81492cefc9c814e732b Mon Sep 17 00:00:00 2001 From: bnovil Date: Tue, 19 Sep 2023 20:14:36 +0800 Subject: [PATCH 26/98] eth: fix typo in comment (#28146) --- eth/backend.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/backend.go b/eth/backend.go index 38c0fa9743..b99ae7655b 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -387,7 +387,7 @@ func (s *Ethereum) shouldPreserve(header *types.Header) bool { // r5 A [X] F G // r6 [X] // - // In the round5, the inturn signer E is offline, so the worst case + // In the round5, the in-turn signer E is offline, so the worst case // is A, F and G sign the block of round5 and reject the block of opponents // and in the round6, the last available signer B is offline, the whole // network is stuck. From 41a0ad9f03ae8e8389fbe40131f4e6930b5beac5 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 19 Sep 2023 20:18:29 +0800 Subject: [PATCH 27/98] cmd/devp2p: use bootnodes as crawl input (#28139) This PR makes the tool use the --bootnodes list as the input to devp2p crawl. The flag will take effect if the input/output.json file is missing or empty. --- cmd/devp2p/crawl.go | 12 ++++++++++-- cmd/devp2p/discv4cmd.go | 29 ++++++++++++++++++----------- cmd/devp2p/discv5cmd.go | 20 ++++++++++++-------- 3 files changed, 40 insertions(+), 21 deletions(-) diff --git a/cmd/devp2p/crawl.go b/cmd/devp2p/crawl.go index 8c0defff6d..4288a5feb8 100644 --- a/cmd/devp2p/crawl.go +++ b/cmd/devp2p/crawl.go @@ -17,6 +17,7 @@ package main import ( + "errors" "sync" "sync/atomic" "time" @@ -51,7 +52,14 @@ type resolver interface { RequestENR(*enode.Node) (*enode.Node, error) } -func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler { +func newCrawler(input nodeSet, bootnodes []*enode.Node, disc resolver, iters ...enode.Iterator) (*crawler, error) { + if len(input) == 0 { + input.add(bootnodes...) + } + if len(input) == 0 { + return nil, errors.New("no input nodes to start crawling") + } + c := &crawler{ input: input, output: make(nodeSet, len(input)), @@ -67,7 +75,7 @@ func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler for id, n := range input { c.output[id] = n } - return c + return c, nil } func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet { diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 0117c7eb82..37b139dea2 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -143,7 +143,7 @@ var discoveryNodeFlags = []cli.Flag{ func discv4Ping(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV4(ctx) + disc, _ := startV4(ctx) defer disc.Close() start := time.Now() @@ -156,7 +156,7 @@ func discv4Ping(ctx *cli.Context) error { func discv4RequestRecord(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV4(ctx) + disc, _ := startV4(ctx) defer disc.Close() respN, err := disc.RequestENR(n) @@ -169,7 +169,7 @@ func discv4RequestRecord(ctx *cli.Context) error { func discv4Resolve(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV4(ctx) + disc, _ := startV4(ctx) defer disc.Close() fmt.Println(disc.Resolve(n).String()) @@ -196,10 +196,13 @@ func discv4ResolveJSON(ctx *cli.Context) error { nodeargs = append(nodeargs, n) } - // Run the crawler. - disc := startV4(ctx) + disc, config := startV4(ctx) defer disc.Close() - c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs)) + + c, err := newCrawler(inputSet, config.Bootnodes, disc, enode.IterNodes(nodeargs)) + if err != nil { + return err + } c.revalidateInterval = 0 output := c.run(0, 1) writeNodesJSON(nodesFile, output) @@ -211,14 +214,18 @@ func discv4Crawl(ctx *cli.Context) error { return errors.New("need nodes file as argument") } nodesFile := ctx.Args().First() - var inputSet nodeSet + inputSet := make(nodeSet) if common.FileExist(nodesFile) { inputSet = loadNodesJSON(nodesFile) } - disc := startV4(ctx) + disc, config := startV4(ctx) defer disc.Close() - c := newCrawler(inputSet, disc, disc.RandomNodes()) + + c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes()) + if err != nil { + return err + } c.revalidateInterval = 10 * time.Minute output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name)) writeNodesJSON(nodesFile, output) @@ -238,14 +245,14 @@ func discv4Test(ctx *cli.Context) error { } // startV4 starts an ephemeral discovery V4 node. -func startV4(ctx *cli.Context) *discover.UDPv4 { +func startV4(ctx *cli.Context) (*discover.UDPv4, discover.Config) { ln, config := makeDiscoveryConfig(ctx) socket := listen(ctx, ln) disc, err := discover.ListenV4(socket, ln, config) if err != nil { exit(err) } - return disc + return disc, config } func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) { diff --git a/cmd/devp2p/discv5cmd.go b/cmd/devp2p/discv5cmd.go index c5e226f0d1..0dac945269 100644 --- a/cmd/devp2p/discv5cmd.go +++ b/cmd/devp2p/discv5cmd.go @@ -81,7 +81,7 @@ var ( func discv5Ping(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV5(ctx) + disc, _ := startV5(ctx) defer disc.Close() fmt.Println(disc.Ping(n)) @@ -90,7 +90,7 @@ func discv5Ping(ctx *cli.Context) error { func discv5Resolve(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV5(ctx) + disc, _ := startV5(ctx) defer disc.Close() fmt.Println(disc.Resolve(n)) @@ -102,14 +102,18 @@ func discv5Crawl(ctx *cli.Context) error { return errors.New("need nodes file as argument") } nodesFile := ctx.Args().First() - var inputSet nodeSet + inputSet := make(nodeSet) if common.FileExist(nodesFile) { inputSet = loadNodesJSON(nodesFile) } - disc := startV5(ctx) + disc, config := startV5(ctx) defer disc.Close() - c := newCrawler(inputSet, disc, disc.RandomNodes()) + + c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes()) + if err != nil { + return err + } c.revalidateInterval = 10 * time.Minute output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name)) writeNodesJSON(nodesFile, output) @@ -127,7 +131,7 @@ func discv5Test(ctx *cli.Context) error { } func discv5Listen(ctx *cli.Context) error { - disc := startV5(ctx) + disc, _ := startV5(ctx) defer disc.Close() fmt.Println(disc.Self()) @@ -135,12 +139,12 @@ func discv5Listen(ctx *cli.Context) error { } // startV5 starts an ephemeral discovery v5 node. -func startV5(ctx *cli.Context) *discover.UDPv5 { +func startV5(ctx *cli.Context) (*discover.UDPv5, discover.Config) { ln, config := makeDiscoveryConfig(ctx) socket := listen(ctx, ln) disc, err := discover.ListenV5(socket, ln, config) if err != nil { exit(err) } - return disc + return disc, config } From 30d5d7c1b366d290b6a8f7fc56eb015883f57c5c Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 19 Sep 2023 14:20:06 +0200 Subject: [PATCH 28/98] go.mod: use existing version of karalabe/usb (#28127) There is no 0.0.3 release of karalabe/usb. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8061220aa6..a43b1d3f8b 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 github.com/julienschmidt/httprouter v1.3.0 - github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c + github.com/karalabe/usb v0.0.2 github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.16 diff --git a/go.sum b/go.sum index 9c6fd74e4a..ca5617c2cc 100644 --- a/go.sum +++ b/go.sum @@ -362,8 +362,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs= -github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= +github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= From 7ed5bc021addd80c81af4b89c2713983a1775fbf Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 19 Sep 2023 14:47:24 +0200 Subject: [PATCH 29/98] trie: add getter for preimage store in trie.Database (#28155) --- trie/database.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/trie/database.go b/trie/database.go index 2915ff9484..535ad87d72 100644 --- a/trie/database.go +++ b/trie/database.go @@ -189,6 +189,15 @@ func (db *Database) WritePreimages() { } } +// Preimage retrieves a cached trie node pre-image from memory. If it cannot be +// found cached, the method queries the persistent database for the content. +func (db *Database) Preimage(hash common.Hash) []byte { + if db.preimages == nil { + return nil + } + return db.preimages.preimage(hash) +} + // Cap iteratively flushes old but still referenced trie nodes until the total // memory usage goes below the given threshold. The held pre-images accumulated // up to this point will be flushed in case the size exceeds the threshold. From 5c6f4b9f0d4270fcc56df681bf003e6a74f11a6b Mon Sep 17 00:00:00 2001 From: phenix3443 Date: Wed, 20 Sep 2023 03:20:18 +0800 Subject: [PATCH 30/98] cmd/utils: fix typo in comment (#28159) --- cmd/utils/flags.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index f5f131951a..1f0877cc6d 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -998,7 +998,7 @@ func MakeDataDir(ctx *cli.Context) string { // setNodeKey creates a node key from set command line flags, either loading it // from a file or as a specified hex value. If neither flags were provided, this -// method returns nil and an emphemeral key is to be generated. +// method returns nil and an ephemeral key is to be generated. func setNodeKey(ctx *cli.Context, cfg *p2p.Config) { var ( hex = ctx.String(NodeKeyHexFlag.Name) @@ -2121,7 +2121,7 @@ func DialRPCWithHeaders(endpoint string, headers []string) (*rpc.Client, error) } var opts []rpc.ClientOption if len(headers) > 0 { - var customHeaders = make(http.Header) + customHeaders := make(http.Header) for _, h := range headers { kv := strings.Split(h, ":") if len(kv) != 2 { From 5b9cbe30f8ca2487c8991e50e9c939d5e6ec3cc2 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 20 Sep 2023 18:39:46 +0800 Subject: [PATCH 31/98] cmd/clef: suppress fsnotify error if keydir not exists (#28160) As the keydir will be automatically created after an account is created, no error message if the watcher is failed. --- accounts/keystore/watch.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/accounts/keystore/watch.go b/accounts/keystore/watch.go index 3f64b89c58..a9f87e7c32 100644 --- a/accounts/keystore/watch.go +++ b/accounts/keystore/watch.go @@ -20,6 +20,7 @@ package keystore import ( + "os" "time" "github.com/ethereum/go-ethereum/log" @@ -77,7 +78,9 @@ func (w *watcher) loop() { } defer watcher.Close() if err := watcher.Add(w.ac.keydir); err != nil { - logger.Warn("Failed to watch keystore folder", "err", err) + if !os.IsNotExist(err) { + logger.Warn("Failed to watch keystore folder", "err", err) + } return } From 545f4c5547178bc8bde6af08b3ccaf68ca27f2c0 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 21 Sep 2023 16:05:55 +0800 Subject: [PATCH 32/98] core/rawdb: no need to run truncateFile for readonly mode (#28145) Avoid truncating files, if ancients are opened in readonly mode. With this change, we return error instead of trying (and failing) to repair --- core/rawdb/freezer_table.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index fc6316c953..cb32d61ae8 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -212,6 +212,9 @@ func (t *freezerTable) repair() error { } // Ensure the index is a multiple of indexEntrySize bytes if overflow := stat.Size() % indexEntrySize; overflow != 0 { + if t.readonly { + return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize) + } truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path } // Retrieve the file sizes and prepare for truncation @@ -270,6 +273,9 @@ func (t *freezerTable) repair() error { // Keep truncating both files until they come in sync contentExp = int64(lastIndex.offset) for contentExp != contentSize { + if t.readonly { + return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum) + } verbose = true // Truncate the head file to the last offset pointer if contentExp < contentSize { From 4773dcbc81aac9d330df29446283361f5a7062c7 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 22 Sep 2023 14:31:10 +0800 Subject: [PATCH 33/98] trie: remove internal nodes between shortNode and child in path mode (#28163) * trie: remove internal nodes between shortNode and child in path mode * trie: address comments * core/rawdb, trie: address comments * core/rawdb: delete unused func * trie: change comments * trie: add missing tests * trie: fix lint --- core/rawdb/accessors_trie.go | 20 +++++ trie/sync.go | 95 ++++++++++++++++---- trie/sync_test.go | 163 +++++++++++++++++++++++++++++------ 3 files changed, 238 insertions(+), 40 deletions(-) diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index f5c2f8899a..ea437b8114 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -89,6 +89,16 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) return h.hash(data) == hash } +// ExistsAccountTrieNode checks the presence of the account trie node with the +// specified node path, regardless of the node hash. +func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { + has, err := db.Has(accountTrieNodeKey(path)) + if err != nil { + return false + } + return has +} + // WriteAccountTrieNode writes the provided account trie node into database. func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { if err := db.Put(accountTrieNodeKey(path), node); err != nil { @@ -127,6 +137,16 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [ return h.hash(data) == hash } +// ExistsStorageTrieNode checks the presence of the storage trie node with the +// specified account hash and node path, regardless of the node hash. +func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { + has, err := db.Has(storageTrieNodeKey(accountHash, path)) + if err != nil { + return false + } + return has +} + // WriteStorageTrieNode writes the provided storage trie node into database. func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { diff --git a/trie/sync.go b/trie/sync.go index 4f55845991..9da0706075 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" ) // ErrNotRequested is returned by the trie sync when it's requested to process a @@ -42,6 +43,16 @@ var ErrAlreadyProcessed = errors.New("already processed") // memory if the node was configured with a significant number of peers. const maxFetchesPerDepth = 16384 +var ( + // deletionGauge is the metric to track how many trie node deletions + // are performed in total during the sync process. + deletionGauge = metrics.NewRegisteredGauge("trie/sync/delete", nil) + + // lookupGauge is the metric to track how many trie node lookups are + // performed to determine if node needs to be deleted. + lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil) +) + // SyncPath is a path tuple identifying a particular trie node either in a single // trie (account) or a layered trie (account -> storage). // @@ -93,9 +104,10 @@ type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Ha // nodeRequest represents a scheduled or already in-flight trie node retrieval request. type nodeRequest struct { - hash common.Hash // Hash of the trie node to retrieve - path []byte // Merkle path leading to this node for prioritization - data []byte // Data content of the node, cached until all subtrees complete + hash common.Hash // Hash of the trie node to retrieve + path []byte // Merkle path leading to this node for prioritization + data []byte // Data content of the node, cached until all subtrees complete + deletes [][]byte // List of internal path segments for trie nodes to delete parent *nodeRequest // Parent state node referencing this entry deps int // Number of dependencies before allowed to commit this node @@ -125,18 +137,20 @@ type CodeSyncResult struct { // syncMemBatch is an in-memory buffer of successfully downloaded but not yet // persisted data items. type syncMemBatch struct { - nodes map[string][]byte // In-memory membatch of recently completed nodes - hashes map[string]common.Hash // Hashes of recently completed nodes - codes map[common.Hash][]byte // In-memory membatch of recently completed codes - size uint64 // Estimated batch-size of in-memory data. + nodes map[string][]byte // In-memory membatch of recently completed nodes + hashes map[string]common.Hash // Hashes of recently completed nodes + deletes map[string]struct{} // List of paths for trie node to delete + codes map[common.Hash][]byte // In-memory membatch of recently completed codes + size uint64 // Estimated batch-size of in-memory data. } // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. func newSyncMemBatch() *syncMemBatch { return &syncMemBatch{ - nodes: make(map[string][]byte), - hashes: make(map[string]common.Hash), - codes: make(map[common.Hash][]byte), + nodes: make(map[string][]byte), + hashes: make(map[string]common.Hash), + deletes: make(map[string]struct{}), + codes: make(map[common.Hash][]byte), } } @@ -347,16 +361,23 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error { // Commit flushes the data stored in the internal membatch out to persistent // storage, returning any occurred error. func (s *Sync) Commit(dbw ethdb.Batch) error { - // Dump the membatch into a database dbw + // Flush the pending node writes into database batch. for path, value := range s.membatch.nodes { owner, inner := ResolvePath([]byte(path)) rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme) } + // Flush the pending node deletes into the database batch. + // Please note that each written and deleted node has a + // unique path, ensuring no duplication occurs. + for path := range s.membatch.deletes { + owner, inner := ResolvePath([]byte(path)) + rawdb.DeleteTrieNode(dbw, owner, inner, common.Hash{} /* unused */, s.scheme) + } + // Flush the pending code writes into database batch. for hash, value := range s.membatch.codes { rawdb.WriteCode(dbw, hash, value) } - // Drop the membatch data and return - s.membatch = newSyncMemBatch() + s.membatch = newSyncMemBatch() // reset the batch return nil } @@ -425,6 +446,39 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { node: node.Val, path: append(append([]byte(nil), req.path...), key...), }} + // Mark all internal nodes between shortNode and its **in disk** + // child as invalid. This is essential in the case of path mode + // scheme; otherwise, state healing might overwrite existing child + // nodes silently while leaving a dangling parent node within the + // range of this internal path on disk. This would break the + // guarantee for state healing. + // + // While it's possible for this shortNode to overwrite a previously + // existing full node, the other branches of the fullNode can be + // retained as they remain untouched and complete. + // + // This step is only necessary for path mode, as there is no deletion + // in hash mode at all. + if _, ok := node.Val.(hashNode); ok && s.scheme == rawdb.PathScheme { + owner, inner := ResolvePath(req.path) + for i := 1; i < len(key); i++ { + // While checking for a non-existent item in Pebble can be less efficient + // without a bloom filter, the relatively low frequency of lookups makes + // the performance impact negligible. + var exists bool + if owner == (common.Hash{}) { + exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...)) + } else { + exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...)) + } + if exists { + req.deletes = append(req.deletes, key[:i]) + deletionGauge.Inc(1) + log.Debug("Detected dangling node", "owner", owner, "path", append(inner, key[:i]...)) + } + } + lookupGauge.Inc(int64(len(key) - 1)) + } case *fullNode: for i := 0; i < 17; i++ { if node.Children[i] != nil { @@ -509,10 +563,19 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error { // Write the node content to the membatch s.membatch.nodes[string(req.path)] = req.data s.membatch.hashes[string(req.path)] = req.hash + // The size tracking refers to the db-batch, not the in-memory data. - // Therefore, we ignore the req.path, and account only for the hash+data - // which eventually is written to db. - s.membatch.size += common.HashLength + uint64(len(req.data)) + if s.scheme == rawdb.PathScheme { + s.membatch.size += uint64(len(req.path) + len(req.data)) + } else { + s.membatch.size += common.HashLength + uint64(len(req.data)) + } + // Delete the internal nodes which are marked as invalid + for _, segment := range req.deletes { + path := append(req.path, segment...) + s.membatch.deletes[string(path)] = struct{}{} + s.membatch.size += uint64(len(path)) + } delete(s.nodeReqs, string(req.path)) s.fetches[len(req.path)]-- diff --git a/trie/sync_test.go b/trie/sync_test.go index dd3506559d..3b7986ef67 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -70,31 +70,53 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str // checkTrieContents cross references a reconstructed trie with an expected data // content map. -func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) { +func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte, rawTrie bool) { // Check root availability and trie contents ndb := newTestDatabase(db, scheme) - trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) - if err != nil { - t.Fatalf("failed to create trie at %x: %v", root, err) - } - if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil { + if err := checkTrieConsistency(db, scheme, common.BytesToHash(root), rawTrie); err != nil { t.Fatalf("inconsistent trie at %x: %v", root, err) } + type reader interface { + MustGet(key []byte) []byte + } + var r reader + if rawTrie { + trie, err := New(TrieID(common.BytesToHash(root)), ndb) + if err != nil { + t.Fatalf("failed to create trie at %x: %v", root, err) + } + r = trie + } else { + trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) + if err != nil { + t.Fatalf("failed to create trie at %x: %v", root, err) + } + r = trie + } for key, val := range content { - if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) { + if have := r.MustGet([]byte(key)); !bytes.Equal(have, val) { t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) } } } // checkTrieConsistency checks that all nodes in a trie are indeed present. -func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error { +func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error { ndb := newTestDatabase(db, scheme) - trie, err := NewStateTrie(TrieID(root), ndb) - if err != nil { - return nil // Consider a non existent state consistent + var it NodeIterator + if rawTrie { + trie, err := New(TrieID(root), ndb) + if err != nil { + return nil // Consider a non existent state consistent + } + it = trie.MustNodeIterator(nil) + } else { + trie, err := NewStateTrie(TrieID(root), ndb) + if err != nil { + return nil // Consider a non existent state consistent + } + it = trie.MustNodeIterator(nil) } - it := trie.MustNodeIterator(nil) for it.Next(true) { } return it.Error() @@ -205,7 +227,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -271,7 +293,7 @@ func testIterativeDelayedSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that given a root hash, a trie can sync iteratively on a single thread, @@ -341,7 +363,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -413,7 +435,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that a trie sync will not request nodes multiple times, even if they @@ -484,7 +506,7 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that at any point in time during a sync, only complete sub-tries are in @@ -569,7 +591,7 @@ func testIncompleteSync(t *testing.T, scheme string) { nodeHash := addedHashes[i] value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme) rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme) - if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil { + if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root, false); err == nil { t.Fatalf("trie inconsistency not caught, missing: %x", path) } rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme) @@ -643,7 +665,7 @@ func testSyncOrdering(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) // Check that the trie nodes have been requested path-ordered for i := 0; i < len(reqs)-1; i++ { @@ -664,7 +686,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database // The code requests are ignored here since there is no code // at the testing trie. - paths, nodes, _ := sched.Missing(1) + paths, nodes, _ := sched.Missing(0) var elements []trieElement for i := 0; i < len(paths); i++ { elements = append(elements, trieElement{ @@ -698,7 +720,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database } batch.Write() - paths, nodes, _ = sched.Missing(1) + paths, nodes, _ = sched.Missing(0) elements = elements[:0] for i := 0; i < len(paths); i++ { elements = append(elements, trieElement{ @@ -724,7 +746,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) { // Create a destination trie and sync with the scheduler diskdb := rawdb.NewMemoryDatabase() syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) // Push more modifications into the src trie, to see if dest trie can still // sync with it(overwrite stale states) @@ -748,7 +770,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie, _ = NewStateTrie(TrieID(root), srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff, false) // Revert added modifications from the src trie, to see if dest trie can still // sync with it(overwrite reverted states) @@ -772,5 +794,98 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie, _ = NewStateTrie(TrieID(root), srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted, false) +} + +// Tests if state syncer can correctly catch up the pivot move. +func TestPivotMove(t *testing.T) { + testPivotMove(t, rawdb.HashScheme, true) + testPivotMove(t, rawdb.HashScheme, false) + testPivotMove(t, rawdb.PathScheme, true) + testPivotMove(t, rawdb.PathScheme, false) +} + +func testPivotMove(t *testing.T, scheme string, tiny bool) { + var ( + srcDisk = rawdb.NewMemoryDatabase() + srcTrieDB = newTestDatabase(srcDisk, scheme) + srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB) + + deleteFn = func(key []byte, tr *Trie, states map[string][]byte) { + tr.Delete(key) + delete(states, string(key)) + } + writeFn = func(key []byte, val []byte, tr *Trie, states map[string][]byte) { + if val == nil { + if tiny { + val = randBytes(4) + } else { + val = randBytes(32) + } + } + tr.Update(key, val) + states[string(key)] = common.CopyBytes(val) + } + copyStates = func(states map[string][]byte) map[string][]byte { + cpy := make(map[string][]byte) + for k, v := range states { + cpy[k] = v + } + return cpy + } + ) + stateA := make(map[string][]byte) + writeFn([]byte{0x01, 0x23}, nil, srcTrie, stateA) + writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateA) + writeFn([]byte{0x12, 0x33}, nil, srcTrie, stateA) + writeFn([]byte{0x12, 0x34}, nil, srcTrie, stateA) + writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateA) + writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA) + + rootA, nodesA, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootA, false); err != nil { + panic(err) + } + // Create a destination trie and sync with the scheduler + destDisk := rawdb.NewMemoryDatabase() + syncWith(t, rootA, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateA, true) + + // Delete element to collapse trie + stateB := copyStates(stateA) + srcTrie, _ = New(TrieID(rootA), srcTrieDB) + deleteFn([]byte{0x02, 0x34}, srcTrie, stateB) + deleteFn([]byte{0x13, 0x44}, srcTrie, stateB) + writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB) + + rootB, nodesB, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootB, false); err != nil { + panic(err) + } + syncWith(t, rootB, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateB, true) + + // Add elements to expand trie + stateC := copyStates(stateB) + srcTrie, _ = New(TrieID(rootB), srcTrieDB) + + writeFn([]byte{0x01, 0x24}, stateA[string([]byte{0x01, 0x24})], srcTrie, stateC) + writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateC) + writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC) + + rootC, nodesC, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootC, false); err != nil { + panic(err) + } + syncWith(t, rootC, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true) } From 03c2176a1d9802d705874c818ec2c83949b6c56f Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 22 Sep 2023 14:33:17 +0800 Subject: [PATCH 34/98] trie/triedb/pathdb: improve error log (#28177) --- trie/triedb/pathdb/difflayer.go | 2 +- trie/triedb/pathdb/disklayer.go | 2 +- trie/triedb/pathdb/errors.go | 9 +++++++-- trie/triedb/pathdb/nodebuffer.go | 2 +- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go index d25ac1c601..10567715d2 100644 --- a/trie/triedb/pathdb/difflayer.go +++ b/trie/triedb/pathdb/difflayer.go @@ -114,7 +114,7 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, dept if n.Hash != hash { dirtyFalseMeter.Mark(1) log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path) + return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path, n.Blob) } dirtyHitMeter.Mark(1) dirtyNodeHitDepthHist.Update(int64(depth)) diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go index 87718290f9..d3b6419cc5 100644 --- a/trie/triedb/pathdb/disklayer.go +++ b/trie/triedb/pathdb/disklayer.go @@ -150,7 +150,7 @@ func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]b if nHash != hash { diskFalseMeter.Mark(1) log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) - return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path) + return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path, nBlob) } if dl.cleans != nil && len(nBlob) > 0 { dl.cleans.Set(key, nBlob) diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go index f503a9c49d..f6ac0ec4a0 100644 --- a/trie/triedb/pathdb/errors.go +++ b/trie/triedb/pathdb/errors.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) var ( @@ -46,6 +47,10 @@ var ( errUnexpectedNode = errors.New("unexpected node") ) -func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error { - return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash) +func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte, blob []byte) error { + blobHex := "nil" + if len(blob) > 0 { + blobHex = hexutil.Encode(blob) + } + return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x, blob: %s", errUnexpectedNode, loc, owner, path, expHash, gotHash, blobHex) } diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go index 67de225b04..4a7d328b9a 100644 --- a/trie/triedb/pathdb/nodebuffer.go +++ b/trie/triedb/pathdb/nodebuffer.go @@ -71,7 +71,7 @@ func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*tr if n.Hash != hash { dirtyFalseMeter.Mark(1) log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path) + return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob) } return n, nil } From 83f3fc2e809207505ecf55b595c862b370289f36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 22 Sep 2023 10:27:58 +0300 Subject: [PATCH 35/98] core/state/snapshot: be very noisy if the generator hits a trie error (#28178) --- core/state/snapshot/generate.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 40264b092c..f54debebed 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -446,6 +446,10 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi internal += time.Since(istart) } if iter.Err != nil { + // Trie errors should never happen. Still, in case of a bug, expose the + // error here, as the outer code will presume errors are interrupts, not + // some deeper issues. + log.Error("State snapshotter failed to iterate trie", "err", err) return false, nil, iter.Err } // Delete all stale snapshot states remaining From d135bafdcb1d023c3ef74d11f3b8d4ebd06f253c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 22 Sep 2023 11:07:20 +0300 Subject: [PATCH 36/98] cmd/geth: print progress logs when iterating large contracts too (#28179) --- cmd/geth/snapshot.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 5e1c784730..b1db0c9d5a 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -332,6 +332,11 @@ func traverseState(ctx *cli.Context) error { storageIter := trie.NewIterator(storageIt) for storageIter.Next() { slots += 1 + + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } } if storageIter.Err != nil { log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err) @@ -486,6 +491,10 @@ func traverseRawState(ctx *cli.Context) error { if storageIter.Leaf() { slots += 1 } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } } if storageIter.Error() != nil { log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error()) From f1b2ec0833df47e3d3a781d7097ff99e5ffb5378 Mon Sep 17 00:00:00 2001 From: Delweng Date: Fri, 22 Sep 2023 18:10:50 +0800 Subject: [PATCH 37/98] core/rawdb: use readonly file lock in readonly mode (#28180) This allows using the freezer from multiple processes at once in read-only mode. Co-authored-by: Martin Holst Swende --- core/rawdb/freezer.go | 6 ++++- core/rawdb/freezer_test.go | 51 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index a9fe234321..b7824ddc0d 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -108,7 +108,11 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui // Leveldb uses LOCK as the filelock filename. To prevent the // name collision, we use FLOCK as the lock name. lock := flock.New(flockFile) - if locked, err := lock.TryLock(); err != nil { + tryLock := lock.TryLock + if readonly { + tryLock = lock.TryRLock + } + if locked, err := tryLock(); err != nil { return nil, err } else if !locked { return nil, errors.New("locking failed") diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 96d24cc947..b4bd6a382a 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -283,6 +283,57 @@ func TestFreezerReadonlyValidate(t *testing.T) { } } +func TestFreezerConcurrentReadonly(t *testing.T) { + t.Parallel() + + tables := map[string]bool{"a": true} + dir := t.TempDir() + + f, err := NewFreezer(dir, "", false, 2049, tables) + if err != nil { + t.Fatal("can't open freezer", err) + } + var item = make([]byte, 1024) + batch := f.tables["a"].newBatch() + items := uint64(10) + for i := uint64(0); i < items; i++ { + require.NoError(t, batch.AppendRaw(i, item)) + } + require.NoError(t, batch.commit()) + if loaded := f.tables["a"].items.Load(); loaded != items { + t.Fatalf("unexpected number of items in table, want: %d, have: %d", items, loaded) + } + require.NoError(t, f.Close()) + + var ( + wg sync.WaitGroup + fs = make([]*Freezer, 5) + errs = make([]error, 5) + ) + for i := 0; i < 5; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + f, err := NewFreezer(dir, "", true, 2049, tables) + if err == nil { + fs[i] = f + } else { + errs[i] = err + } + }(i) + } + + wg.Wait() + + for i := range fs { + if err := errs[i]; err != nil { + t.Fatal("failed to open freezer", err) + } + require.NoError(t, fs[i].Close()) + } +} + func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) { t.Helper() From 82ec555d709e5a3a2e0d22430f2ac70ebe814e88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 22 Sep 2023 14:56:48 +0300 Subject: [PATCH 38/98] cmd: add state.scheme to the database flag group for local flag handling (#28107) --- cmd/geth/chaincmd.go | 16 ++++++---------- cmd/geth/dbcmd.go | 27 +++++++++++++-------------- cmd/geth/main.go | 3 +-- cmd/geth/snapshot.go | 21 +++++++-------------- cmd/geth/verkle.go | 4 ++-- cmd/utils/flags.go | 7 ++++--- 6 files changed, 33 insertions(+), 45 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index fad2c71e68..204565af57 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -50,8 +50,7 @@ var ( ArgsUsage: "", Flags: flags.Merge([]cli.Flag{ utils.CachePreimagesFlag, - utils.StateSchemeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The init command initializes a new genesis block and definition for the network. This is a destructive action and changes the network in which you will be @@ -97,9 +96,8 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.MetricsInfluxDBOrganizationFlag, utils.TxLookupLimitFlag, utils.TransactionHistoryFlag, - utils.StateSchemeFlag, utils.StateHistoryFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The import command imports blocks from an RLP-encoded form. The form can be one file with several RLP-encoded blocks, or several files can be used. @@ -115,8 +113,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`, Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, - utils.StateSchemeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` Requires a first argument of the file to write to. Optional second and third arguments control the first and @@ -132,7 +129,7 @@ be gzipped.`, Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The import-preimages command imports hash preimages from an RLP encoded stream. It's deprecated, please use "geth db import" instead. @@ -146,7 +143,7 @@ It's deprecated, please use "geth db import" instead. Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The export-preimages command exports hash preimages to an RLP encoded stream. It's deprecated, please use "geth db export" instead. @@ -165,8 +162,7 @@ It's deprecated, please use "geth db export" instead. utils.IncludeIncompletesFlag, utils.StartKeyFlag, utils.DumpLimitFlag, - utils.StateSchemeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` This command dumps out the state for a given block (or latest, if none provided). `, diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index a1868eb8c3..5ba9acc1cf 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -48,7 +48,7 @@ var ( Name: "removedb", Usage: "Remove blockchain and state databases", ArgsUsage: "", - Flags: utils.DatabasePathFlags, + Flags: utils.DatabaseFlags, Description: ` Remove blockchain and state databases`, } @@ -77,7 +77,7 @@ Remove blockchain and state databases`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Usage: "Inspect the storage size for each type of data in the database", Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, } @@ -85,7 +85,7 @@ Remove blockchain and state databases`, Action: checkStateContent, Name: "check-state-content", ArgsUsage: "", - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Usage: "Verify that state data is cryptographically correct", Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes. For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates @@ -97,7 +97,7 @@ a data corruption.`, Usage: "Print leveldb statistics", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), } dbCompactCmd = &cli.Command{ Action: dbCompact, @@ -107,7 +107,7 @@ a data corruption.`, utils.SyncModeFlag, utils.CacheFlag, utils.CacheDatabaseFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: `This command performs a database compaction. WARNING: This operation may take a very long time to finish, and may cause database corruption if it is aborted during execution'!`, @@ -119,7 +119,7 @@ corruption if it is aborted during execution'!`, ArgsUsage: "", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "This command looks up the specified database key from the database.", } dbDeleteCmd = &cli.Command{ @@ -129,7 +129,7 @@ corruption if it is aborted during execution'!`, ArgsUsage: "", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: `This command deletes the specified database key from the database. WARNING: This is a low-level operation which may cause database corruption!`, } @@ -140,7 +140,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: `This command sets a given database key to the given value. WARNING: This is a low-level operation which may cause database corruption!`, } @@ -151,8 +151,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "This command looks up the specified database key from the database.", } dbDumpFreezerIndex = &cli.Command{ @@ -162,7 +161,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "This command displays information about the freezer index.", } dbImportCmd = &cli.Command{ @@ -172,7 +171,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", } dbMetadataCmd = &cli.Command{ @@ -191,7 +190,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, Usage: "Shows metadata about the chain status.", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "Shows metadata about the chain status.", } ) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index f6fa47ad2e..4b26de05a9 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -89,7 +89,6 @@ var ( utils.SnapshotFlag, utils.TxLookupLimitFlag, utils.TransactionHistoryFlag, - utils.StateSchemeFlag, utils.StateHistoryFlag, utils.LightServeFlag, utils.LightIngressFlag, @@ -145,7 +144,7 @@ var ( utils.GpoMaxGasPriceFlag, utils.GpoIgnoreGasPriceFlag, configFileFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags) + }, utils.NetworkFlags, utils.DatabaseFlags) rpcFlags = []cli.Flag{ utils.HTTPEnabledFlag, diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index b1db0c9d5a..4c8dfa84b6 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -51,7 +51,7 @@ var ( Action: pruneState, Flags: flags.Merge([]cli.Flag{ utils.BloomFilterSizeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot prune-state will prune historical state data with the help of the state snapshot. @@ -69,9 +69,7 @@ WARNING: it's only supported in hash mode(--state.scheme=hash)". Usage: "Recalculate state hash based on the snapshot for verification", ArgsUsage: "", Action: verifyState, - Flags: flags.Merge([]cli.Flag{ - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot verify-state will traverse the whole accounts and storages set based on the specified @@ -84,7 +82,7 @@ In other words, this command does the snapshot to trie conversion. Usage: "Check that there is no 'dangling' snap storage", ArgsUsage: "", Action: checkDanglingStorage, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot check-dangling-storage traverses the snap storage data, and verifies that all snapshot storage data has a corresponding account. @@ -95,7 +93,7 @@ data, and verifies that all snapshot storage data has a corresponding account. Usage: "Check all snapshot layers for the a specific account", ArgsUsage: "
", Action: checkAccount, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot inspect-account
checks all snapshot layers and prints out information about the specified address. @@ -106,9 +104,7 @@ information about the specified address. Usage: "Traverse the state with given root hash and perform quick verification", ArgsUsage: "", Action: traverseState, - Flags: flags.Merge([]cli.Flag{ - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot traverse-state will traverse the whole state from the given state root and will abort if any @@ -123,9 +119,7 @@ It's also usable without snapshot enabled. Usage: "Traverse the state with given root hash and perform detailed verification", ArgsUsage: "", Action: traverseRawState, - Flags: flags.Merge([]cli.Flag{ - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot traverse-rawstate will traverse the whole state from the given root and will abort if any referenced @@ -146,8 +140,7 @@ It's also usable without snapshot enabled. utils.ExcludeStorageFlag, utils.StartKeyFlag, utils.DumpLimitFlag, - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: ` This command is semantically equivalent to 'geth dump', but uses the snapshots as the backend data source, making this command a lot faster. diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 9ba2b41671..88f60276ea 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -45,7 +45,7 @@ var ( Usage: "verify the conversion of a MPT into a verkle tree", ArgsUsage: "", Action: verifyVerkle, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth verkle verify This command takes a root commitment and attempts to rebuild the tree. @@ -56,7 +56,7 @@ This command takes a root commitment and attempts to rebuild the tree. Usage: "Dump a verkle tree to a DOT file", ArgsUsage: " [ ...]", Action: expandVerkle, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth verkle dump [ ...] This command will produce a dot file representing the tree, rooted at . diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1f0877cc6d..653ae4d9eb 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -961,18 +961,19 @@ var ( // NetworkFlags is the flag group of all built-in supported networks. NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...) - // DatabasePathFlags is the flag group of all database path flags. - DatabasePathFlags = []cli.Flag{ + // DatabaseFlags is the flag group of all database flags. + DatabaseFlags = []cli.Flag{ DataDirFlag, AncientFlag, RemoteDBFlag, + StateSchemeFlag, HttpHeaderFlag, } ) func init() { if rawdb.PebbleEnabled { - DatabasePathFlags = append(DatabasePathFlags, DBEngineFlag) + DatabaseFlags = append(DatabaseFlags, DBEngineFlag) } } From 323542af505191c35d999a28e6b6c76ac6abfdcf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 25 Sep 2023 16:10:23 +0300 Subject: [PATCH 39/98] core, params: update Holesky testnet to relaunched spec (#28191) --- core/genesis.go | 3 +-- params/config.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 86a3e42a62..baace3f991 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -587,10 +587,9 @@ func DefaultHoleskyGenesisBlock() *Genesis { return &Genesis{ Config: params.HoleskyChainConfig, Nonce: 0x1234, - ExtraData: hexutil.MustDecode("0x686f77206d7563682069732074686520666973683f"), GasLimit: 0x17d7840, Difficulty: big.NewInt(0x01), - Timestamp: 1694786100, + Timestamp: 1695902100, Alloc: decodePrealloc(holeskyAllocData), } } diff --git a/params/config.go b/params/config.go index f503862422..bfcd5fe072 100644 --- a/params/config.go +++ b/params/config.go @@ -80,8 +80,7 @@ var ( TerminalTotalDifficulty: big.NewInt(0), TerminalTotalDifficultyPassed: true, MergeNetsplitBlock: nil, - ShanghaiTime: newUint64(1694790240), - CancunTime: newUint64(2000000000), + ShanghaiTime: newUint64(1696000704), Ethash: new(EthashConfig), } // SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network. From d051ea5e89a8f803b421bc32f0c7043f9d5d9aa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 25 Sep 2023 16:13:56 +0300 Subject: [PATCH 40/98] params: update hash for Holesky relaunch (#28192) --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index bfcd5fe072..ac55d3771a 100644 --- a/params/config.go +++ b/params/config.go @@ -26,7 +26,7 @@ import ( // Genesis hashes to enforce below configs on. var ( MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") - HoleskyGenesisHash = common.HexToHash("0xff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d") + HoleskyGenesisHash = common.HexToHash("0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4") SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") ) From c2cfe35f121cb88650b4d90c958bcc4214d0ce7f Mon Sep 17 00:00:00 2001 From: tokikuch Date: Mon, 25 Sep 2023 06:35:24 -0700 Subject: [PATCH 41/98] core/bloombits: fix deadlock when matcher session hits an error (#28184) When MatcherSession encounters an error, it attempts to close the session. Closing waits for all goroutines to finish, including the 'distributor'. However, the distributor will not exit until all requests have returned. This patch fixes the issue by delivering the (empty) result to the distributor before calling Close(). --- core/bloombits/matcher.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index d8f932041b..cf799c8324 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -630,13 +630,16 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx} result := <-request + + // Deliver a result before s.Close() to avoid a deadlock + s.deliverSections(result.Bit, result.Sections, result.Bitsets) + if result.Error != nil { s.errLock.Lock() s.err = result.Error s.errLock.Unlock() s.Close() } - s.deliverSections(result.Bit, result.Sections, result.Bitsets) } } } From 1fa3362ea7166f04896138201f21a038b2d18ad1 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 25 Sep 2023 16:02:19 +0200 Subject: [PATCH 42/98] core/forkid: add forkid test for holesky (#28193) --- core/forkid/forkid_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 3d49b2eced..db634bc14b 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -107,6 +107,16 @@ func TestCreation(t *testing.T) { {1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block }, }, + // Holesky test cases + { + params.HoleskyChainConfig, + core.DefaultHoleskyGenesisBlock().ToBlock(), + []testcase{ + {0, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris block + {123, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // First MergeNetsplit block + {123, 1696000704, ID{Hash: checksumToBytes(0xfd4f016b), Next: 0}}, // Last MergeNetsplit block + }, + }, } for i, tt := range tests { for j, ttt := range tt.cases { From c3742a9ae0822e220868a04c62ecb93305e9ed57 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 25 Sep 2023 16:02:44 +0200 Subject: [PATCH 43/98] internal/debug: add --log.rotate to the logging category (#28190) --- internal/debug/flags.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 52a6342452..736fede943 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -87,8 +87,9 @@ var ( Category: flags.LoggingCategory, } logRotateFlag = &cli.BoolFlag{ - Name: "log.rotate", - Usage: "Enables log file rotation", + Name: "log.rotate", + Usage: "Enables log file rotation", + Category: flags.LoggingCategory, } logMaxSizeMBsFlag = &cli.IntFlag{ Name: "log.maxsize", From 3d297fc2d7b309af2e6edc1aee7638389f957f23 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 26 Sep 2023 00:28:20 +0800 Subject: [PATCH 44/98] cmd/geth: ensure db is closed before exit (#28150) --- cmd/geth/chaincmd.go | 7 ++++++- cmd/geth/dbcmd.go | 4 ++++ cmd/geth/snapshot.go | 4 +++- cmd/geth/verkle.go | 2 ++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 204565af57..aebcc29eb8 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -336,7 +336,8 @@ func exportChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, _ := utils.MakeChain(ctx, stack, true) + chain, db := utils.MakeChain(ctx, stack, true) + defer db.Close() start := time.Now() var err error @@ -376,6 +377,7 @@ func importPreimages(ctx *cli.Context) error { defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() start := time.Now() if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { @@ -394,6 +396,7 @@ func exportPreimages(ctx *cli.Context) error { defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() start := time.Now() if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { @@ -405,6 +408,8 @@ func exportPreimages(ctx *cli.Context) error { func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + var header *types.Header if ctx.NArg() > 1 { return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 5ba9acc1cf..6f802716c5 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -594,6 +594,7 @@ func importLDBdata(ctx *cli.Context) error { close(stop) }() db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() return utils.ImportLDBData(db, fName, int64(start), stop) } @@ -690,6 +691,7 @@ func exportChaindata(ctx *cli.Context) error { close(stop) }() db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop) } @@ -697,6 +699,8 @@ func showMetaData(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + ancients, err := db.Ancients() if err != nil { fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err) diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 4c8dfa84b6..6413482511 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -245,7 +245,9 @@ func checkDanglingStorage(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - return snapshot.CheckDanglingStorage(utils.MakeChainDatabase(ctx, stack, true)) + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + return snapshot.CheckDanglingStorage(db) } // traverseState is a helper function used for pruning verification. diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 88f60276ea..aa79889e8c 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -115,6 +115,7 @@ func verifyVerkle(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true) + defer chaindb.Close() headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") @@ -163,6 +164,7 @@ func expandVerkle(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true) + defer chaindb.Close() var ( rootC common.Hash keylist [][]byte From f6f64cc43d1ae7bfc633452f36c086941abecbd8 Mon Sep 17 00:00:00 2001 From: buddho Date: Tue, 26 Sep 2023 01:17:39 +0800 Subject: [PATCH 45/98] cmd/utils: fix bootnodes config priority (#28095) This fixes an issue where the --bootnodes flag was overridden by the config file. --------- Co-authored-by: NathanBSC Co-authored-by: Felix Lange --- cmd/utils/flags.go | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 653ae4d9eb..c172d269c5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1032,35 +1032,45 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) { // setBootstrapNodes creates a list of bootstrap nodes from the command line // flags, reverting to pre-configured ones if none have been specified. +// Priority order for bootnodes configuration: +// +// 1. --bootnodes flag +// 2. Config file +// 3. Network preset flags (e.g. --goerli) +// 4. default to mainnet nodes func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { urls := params.MainnetBootnodes - switch { - case ctx.IsSet(BootnodesFlag.Name): + if ctx.IsSet(BootnodesFlag.Name) { urls = SplitAndTrim(ctx.String(BootnodesFlag.Name)) - case ctx.Bool(HoleskyFlag.Name): - urls = params.HoleskyBootnodes - case ctx.Bool(SepoliaFlag.Name): - urls = params.SepoliaBootnodes - case ctx.Bool(GoerliFlag.Name): - urls = params.GoerliBootnodes - } - - // don't apply defaults if BootstrapNodes is already set - if cfg.BootstrapNodes != nil { - return + } else { + if cfg.BootstrapNodes != nil { + return // Already set by config file, don't apply defaults. + } + switch { + case ctx.Bool(HoleskyFlag.Name): + urls = params.HoleskyBootnodes + case ctx.Bool(SepoliaFlag.Name): + urls = params.SepoliaBootnodes + case ctx.Bool(GoerliFlag.Name): + urls = params.GoerliBootnodes + } } + cfg.BootstrapNodes = mustParseBootnodes(urls) +} - cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls)) +func mustParseBootnodes(urls []string) []*enode.Node { + nodes := make([]*enode.Node, 0, len(urls)) for _, url := range urls { if url != "" { node, err := enode.Parse(enode.ValidSchemes, url) if err != nil { log.Crit("Bootstrap URL invalid", "enode", url, "err", err) - continue + return nil } - cfg.BootstrapNodes = append(cfg.BootstrapNodes, node) + nodes = append(nodes, node) } } + return nodes } // setBootstrapNodesV5 creates a list of bootstrap nodes from the command line From 4985d83b8faa5d32238429c57ff72ec39ef20720 Mon Sep 17 00:00:00 2001 From: Andryanau Kanstantsin Date: Mon, 25 Sep 2023 23:24:20 +0200 Subject: [PATCH 46/98] ethclient: fix BlockReceipts parameter encoding (#28087) Co-authored-by: Felix Lange --- ethclient/ethclient.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index a21d8ff67a..af373b9938 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -108,10 +108,10 @@ func (ec *Client) PeerCount(ctx context.Context) (uint64, error) { return uint64(result), err } -// BlockReceipts returns the receipts of a given block number or hash +// BlockReceipts returns the receipts of a given block number or hash. func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { var r []*types.Receipt - err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash) + err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash.String()) if err == nil && r == nil { return nil, ethereum.NotFound } From 4de89e92e41d08a814d31534e9db34ae0d95e966 Mon Sep 17 00:00:00 2001 From: hzysvilla Date: Tue, 26 Sep 2023 16:58:01 +0800 Subject: [PATCH 47/98] core/vm: minor code formatting (#28199) Adding a space beween function opOrigin() and opcCaller() in instruciton.go. Adding a space beween function opkeccak256() and opAddress() in instruciton.go. --- core/vm/instructions.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 2105201fce..56ff350201 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -251,6 +251,7 @@ func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( size.SetBytes(interpreter.hasherBuf[:]) return nil, nil } + func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes())) return nil, nil @@ -267,6 +268,7 @@ func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) return nil, nil } + func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes())) return nil, nil From 40219109b01b553f4adccf8ff3b34dea293ec7ed Mon Sep 17 00:00:00 2001 From: phenix3443 Date: Tue, 26 Sep 2023 16:59:41 +0800 Subject: [PATCH 48/98] eth/downloader: typo in comment (#28196) --- eth/downloader/skeleton.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index 59df82bd9e..a07e1695f5 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -423,7 +423,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) { for _, peer := range s.peers.AllPeers() { s.idles[peer.id] = peer } - // Nofity any tester listening for startup events + // Notify any tester listening for startup events if s.syncStarting != nil { s.syncStarting() } From 2b7bc2c36b0d0efc83e62ba8e13723b943c4fa6e Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 26 Sep 2023 13:12:44 +0200 Subject: [PATCH 49/98] eth/fetcher: allow underpriced transactions in after timeout (#28097) This PR will allow a previously underpriced transaction back in after a timeout of 5 minutes. This will block most transaction spam but allow for transactions to be re-broadcasted on networks with less transaction flow. --------- Co-authored-by: Felix Lange --- eth/fetcher/tx_fetcher.go | 38 ++++++++++++++++++++-------------- eth/fetcher/tx_fetcher_test.go | 4 ++-- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 95fef0cdee..a11b5e2164 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -24,8 +24,8 @@ import ( "sort" "time" - mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" @@ -53,6 +53,9 @@ const ( // re-request them. maxTxUnderpricedSetSize = 32768 + // maxTxUnderpricedTimeout is the max time a transaction should be stuck in the underpriced set. + maxTxUnderpricedTimeout = int64(5 * time.Minute) + // txArriveTimeout is the time allowance before an announced transaction is // explicitly requested. txArriveTimeout = 500 * time.Millisecond @@ -148,7 +151,7 @@ type TxFetcher struct { drop chan *txDrop quit chan struct{} - underpriced mapset.Set[common.Hash] // Transactions discarded as too cheap (don't re-fetch) + underpriced *lru.Cache[common.Hash, int64] // Transactions discarded as too cheap (don't re-fetch) // Stage 1: Waiting lists for newly discovered transactions that might be // broadcast without needing explicit request/reply round trips. @@ -202,7 +205,7 @@ func NewTxFetcherForTests( fetching: make(map[common.Hash]string), requests: make(map[string]*txRequest), alternates: make(map[common.Hash]map[string]struct{}), - underpriced: mapset.NewSet[common.Hash](), + underpriced: lru.NewCache[common.Hash, int64](maxTxUnderpricedSetSize), hasTx: hasTx, addTxs: addTxs, fetchTxs: fetchTxs, @@ -223,17 +226,16 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { // still valuable to check here because it runs concurrent to the internal // loop, so anything caught here is time saved internally. var ( - unknowns = make([]common.Hash, 0, len(hashes)) - duplicate, underpriced int64 + unknowns = make([]common.Hash, 0, len(hashes)) + duplicate int64 + underpriced int64 ) for _, hash := range hashes { switch { case f.hasTx(hash): duplicate++ - - case f.underpriced.Contains(hash): + case f.isKnownUnderpriced(hash): underpriced++ - default: unknowns = append(unknowns, hash) } @@ -245,10 +247,7 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { if len(unknowns) == 0 { return nil } - announce := &txAnnounce{ - origin: peer, - hashes: unknowns, - } + announce := &txAnnounce{origin: peer, hashes: unknowns} select { case f.notify <- announce: return nil @@ -257,6 +256,16 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { } } +// isKnownUnderpriced reports whether a transaction hash was recently found to be underpriced. +func (f *TxFetcher) isKnownUnderpriced(hash common.Hash) bool { + prevTime, ok := f.underpriced.Peek(hash) + if ok && prevTime+maxTxUnderpricedTimeout < time.Now().Unix() { + f.underpriced.Remove(hash) + return false + } + return ok +} + // Enqueue imports a batch of received transaction into the transaction pool // and the fetcher. This method may be called by both transaction broadcasts and // direct request replies. The differentiation is important so the fetcher can @@ -300,10 +309,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) // Avoid re-request this transaction when we receive another // announcement. if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) { - for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { - f.underpriced.Pop() - } - f.underpriced.Add(batch[j].Hash()) + f.underpriced.Add(batch[j].Hash(), batch[j].Time().Unix()) } // Track a few interesting failure types switch { diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index 1715def99c..980c1a6c26 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -1509,8 +1509,8 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { } case isUnderpriced: - if fetcher.underpriced.Cardinality() != int(step) { - t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step) + if fetcher.underpriced.Len() != int(step) { + t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Len(), step) } default: From adb9b319c9c61f092755000bf0fc4b3349f5cbbc Mon Sep 17 00:00:00 2001 From: Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Date: Tue, 26 Sep 2023 14:22:11 +0200 Subject: [PATCH 50/98] internal/ethapi: eth_call block parameter is optional (#28165) So apparently in the spec the base block parameter of eth_call is optional. I agree that "latest" is a sane default for this that most people would use. --- internal/ethapi/api.go | 8 ++++++-- internal/ethapi/api_test.go | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 733e671e0a..e2911c6b16 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1158,8 +1158,12 @@ func (e *revertError) ErrorData() interface{} { // // Note, this function doesn't make and changes in the state/blockchain and is // useful to execute and retrieve values. -func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) { - result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) +func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) { + if blockNrOrHash == nil { + latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) + blockNrOrHash = &latest + } + result, err := DoCall(ctx, s.b, args, *blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) if err != nil { return nil, err } diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index fc135c3779..846a4347a7 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -846,7 +846,7 @@ func TestCall(t *testing.T) { }, } for i, tc := range testSuite { - result, err := api.Call(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides) + result, err := api.Call(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides) if tc.expectErr != nil { if err == nil { t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) From b85c183ea7417e973dbbccd27b3fb7d7097b87dd Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 26 Sep 2023 21:29:13 +0800 Subject: [PATCH 51/98] eth/downloader: remove header rollback mechanism (#28147) * eth/downloader: remove rollback mechanism in downloader * eth/downloader: remove the tests --- eth/downloader/downloader.go | 55 +-------------------- eth/downloader/downloader_test.go | 80 ------------------------------- 2 files changed, 2 insertions(+), 133 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 1e4f35ccd1..732e79f8ba 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1280,41 +1280,13 @@ func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { // keeps processing and scheduling them into the header chain and downloader's // queue until the stream ends or a failure occurs. func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { - // Keep a count of uncertain headers to roll back var ( - rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) - rollbackErr error - mode = d.getMode() + mode = d.getMode() + gotHeaders = false // Wait for batches of headers to process ) - defer func() { - if rollback > 0 { - lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 - if mode != LightSync { - lastFastBlock = d.blockchain.CurrentSnapBlock().Number - lastBlock = d.blockchain.CurrentBlock().Number - } - if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block - // We're already unwinding the stack, only print the error to make it more visible - log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) - } - curFastBlock, curBlock := common.Big0, common.Big0 - if mode != LightSync { - curFastBlock = d.blockchain.CurrentSnapBlock().Number - curBlock = d.blockchain.CurrentBlock().Number - } - log.Warn("Rolled back chain segment", - "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), - "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), - "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) - } - }() - // Wait for batches of headers to process - gotHeaders := false - for { select { case <-d.cancelCh: - rollbackErr = errCanceled return errCanceled case task := <-d.headerProcCh: @@ -1363,8 +1335,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } } } - // Disable any rollback and return - rollback = 0 return nil } // Otherwise split the chunk of headers into batches and process them @@ -1375,7 +1345,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode // Terminate if something failed in between processing chunks select { case <-d.cancelCh: - rollbackErr = errCanceled return errCanceled default: } @@ -1422,29 +1391,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } if len(chunkHeaders) > 0 { if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { - rollbackErr = err - - // If some headers were inserted, track them as uncertain - if mode == SnapSync && n > 0 && rollback == 0 { - rollback = chunkHeaders[0].Number.Uint64() - } log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } - // All verifications passed, track all headers within the allowed limits - if mode == SnapSync { - head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() - if head-rollback > uint64(fsHeaderSafetyNet) { - rollback = head - uint64(fsHeaderSafetyNet) - } else { - rollback = 1 - } - } } if len(rejected) != 0 { - // Merge threshold reached, stop importing, but don't roll back - rollback = 0 - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) return ErrMergeTransition } @@ -1455,7 +1406,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { select { case <-d.cancelCh: - rollbackErr = errCanceled return errCanceled case <-time.After(time.Second): } @@ -1463,7 +1413,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode // Otherwise insert the headers for content retrieval inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) if len(inserts) != len(chunkHeaders) { - rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) return fmt.Errorf("%w: stale headers", errBadPeer) } } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 06c22afff0..ffe445ea88 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -878,86 +878,6 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chain.blocks)) } -// Tests that upon detecting an invalid header, the recent ones are rolled back -// for various failure scenarios. Afterwards a full sync is attempted to make -// sure no state was corrupted. -func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) } -func TestInvalidHeaderRollback67Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH67, SnapSync) } - -func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a small enough block chain to download - targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks - chain := testChainBase.shorten(targetBlocks) - - // Attempt to sync with an attacker that feeds junk during the fast sync phase. - // This should result in the last fsHeaderSafetyNet headers being rolled back. - missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 - - fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:]) - fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - if err := tester.sync("fast-attack", nil, mode); err == nil { - t.Fatalf("succeeded fast attacker synchronisation") - } - if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) - } - // Attempt to sync with an attacker that feeds junk during the block import phase. - // This should result in both the last fsHeaderSafetyNet number of headers being - // rolled back, and also the pivot point being reverted to a non-block status. - missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - - blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:]) - fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in - blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - if err := tester.sync("block-attack", nil, mode); err == nil { - t.Fatalf("succeeded block attacker synchronisation") - } - if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == SnapSync { - if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - // Attempt to sync with an attacker that withholds promised blocks after the - // fast sync pivot point. This could be a trial to leave the node with a bad - // but already imported pivot block. - withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:]) - - tester.downloader.syncInitHook = func(uint64, uint64) { - for i := missing; i < len(chain.blocks); i++ { - withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} - } - tester.downloader.syncInitHook = nil - } - if err := tester.sync("withhold-attack", nil, mode); err == nil { - t.Fatalf("succeeded withholding attacker synchronisation") - } - if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == SnapSync { - if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - // Synchronise with the valid peer and make sure sync succeeds. Since the last rollback - // should also disable fast syncing for this process, verify that we did a fresh full - // sync. Note, we can't assert anything about the receipts since we won't purge the - // database of them, hence we can't use assertOwnChain. - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. func TestHighTDStarvationAttack66Full(t *testing.T) { From 614804b33c340cd60cbb6087d898b2968b0da320 Mon Sep 17 00:00:00 2001 From: bnovil Date: Wed, 27 Sep 2023 11:08:53 +0800 Subject: [PATCH 52/98] core/txpool: fix typos (#28208) core/txpool:fix typos --- core/txpool/legacypool/legacypool.go | 6 +++--- core/txpool/validation.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 00e326c4b8..57b71bf4e0 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -406,7 +406,7 @@ func (pool *LegacyPool) Close() error { } // Reset implements txpool.SubPool, allowing the legacy pool's internal state to be -// kept in sync with the main transacion pool's internal state. +// kept in sync with the main transaction pool's internal state. func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { wait := pool.requestReset(oldHead, newHead) <-wait @@ -637,7 +637,7 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { // pending or queued one, it overwrites the previous transaction if its price is higher. // // If a newly added transaction is marked as local, its sending account will be -// be added to the allowlist, preventing any associated transaction from being dropped +// added to the allowlist, preventing any associated transaction from being dropped // out of the pool due to pricing constraints. func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { // If the transaction is already known, discard it @@ -943,7 +943,7 @@ func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { } // Add enqueues a batch of transactions into the pool if they are valid. Depending -// on the local flag, full pricing contraints will or will not be applied. +// on the local flag, full pricing constraints will or will not be applied. // // If sync is set, the method will block until all internal maintenance related // to the add is finished. Only use this during tests for determinism! diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 630d5340cf..9de372a66e 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -182,7 +182,7 @@ type ValidationOptionsWithState struct { // be rejected once the number of remaining slots reaches zero. UsedAndLeftSlots func(addr common.Address) (int, int) - // ExistingExpenditure is a mandatory callback to retrieve the cummulative + // ExistingExpenditure is a mandatory callback to retrieve the cumulative // cost of the already pooled transactions to check for overdrafts. ExistingExpenditure func(addr common.Address) *big.Int @@ -237,7 +237,7 @@ func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, op return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance)) } // Transaction takes a new nonce value out of the pool. Ensure it doesn't - // overflow the number of permitted transactions from a single accoun + // overflow the number of permitted transactions from a single account // (i.e. max cancellable via out-of-bound transaction). if used, left := opts.UsedAndLeftSlots(from); left <= 0 { return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used) From a0811300815f1d4e79881113a102e91fdfeecdb8 Mon Sep 17 00:00:00 2001 From: 0xbstn Date: Thu, 28 Sep 2023 03:48:14 +0200 Subject: [PATCH 53/98] core/txpool: fix typos (#28213) fix(core/txpool): fix typos --- core/txpool/blobpool/evictheap.go | 2 +- core/txpool/blobpool/limbo.go | 4 ++-- core/txpool/blobpool/priority.go | 2 +- core/txpool/errors.go | 2 +- core/txpool/legacypool/legacypool.go | 2 +- core/txpool/txpool.go | 4 ++-- core/txpool/validation.go | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go index 7607a911c1..df594099f7 100644 --- a/core/txpool/blobpool/evictheap.go +++ b/core/txpool/blobpool/evictheap.go @@ -44,7 +44,7 @@ type evictHeap struct { index map[common.Address]int // Indices into the heap for replacements } -// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict +// newPriceHeap creates a new heap of cheapest accounts in the blob pool to evict // from in case of over saturation. func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap { heap := &evictHeap{ diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go index 2d62593de6..d1fe9c7394 100644 --- a/core/txpool/blobpool/limbo.go +++ b/core/txpool/blobpool/limbo.go @@ -143,7 +143,7 @@ func (l *limbo) push(tx *types.Transaction, block uint64) error { return errors.New("already tracked blob transaction") } if err := l.setAndIndex(tx, block); err != nil { - log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err) + log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err) return err } return nil @@ -191,7 +191,7 @@ func (l *limbo) update(txhash common.Hash, block uint64) { log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block) return } - // Retrieve the old blobs from the data store and write tehm back with a new + // Retrieve the old blobs from the data store and write them back with a new // block number. IF anything fails, there's not much to do, go on. item, err := l.getAndDrop(id) if err != nil { diff --git a/core/txpool/blobpool/priority.go b/core/txpool/blobpool/priority.go index 18e545c2a8..a8332bd9b0 100644 --- a/core/txpool/blobpool/priority.go +++ b/core/txpool/blobpool/priority.go @@ -27,7 +27,7 @@ import ( var log2_1_125 = math.Log2(1.125) // evictionPriority calculates the eviction priority based on the algorithm -// described in the BlobPool docs for a both fee components. +// described in the BlobPool docs for both fee components. // // This method takes about 8ns on a very recent laptop CPU, recalculating about // 125 million transaction priority values per second. diff --git a/core/txpool/errors.go b/core/txpool/errors.go index bc26550f78..61daa999ff 100644 --- a/core/txpool/errors.go +++ b/core/txpool/errors.go @@ -52,6 +52,6 @@ var ( ErrOversizedData = errors.New("oversized data") // ErrFutureReplacePending is returned if a future transaction replaces a pending - // transaction. Future transactions should only be able to replace other future transactions. + // one. Future transactions should only be able to replace other future transactions. ErrFutureReplacePending = errors.New("future transaction tries to replace pending") ) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 57b71bf4e0..f1b960510a 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -901,7 +901,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ } // addLocals enqueues a batch of transactions into the pool if they are valid, marking the -// senders as a local ones, ensuring they go around the local pricing constraints. +// senders as local ones, ensuring they go around the local pricing constraints. // // This method is used to add transactions from the RPC API and performs synchronous pool // reorganization and event propagation. diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index e40b414054..cacae7bc00 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -70,7 +70,7 @@ type TxPool struct { reservations map[common.Address]SubPool // Map with the account to pool reservations reserveLock sync.Mutex // Lock protecting the account reservations - subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown + subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown quit chan chan error // Quit channel to tear down the head updater } @@ -404,7 +404,7 @@ func (p *TxPool) Locals() []common.Address { } // Status returns the known status (unknown/pending/queued) of a transaction -// identified by their hashes. +// identified by its hash. func (p *TxPool) Status(hash common.Hash) TxStatus { for _, subpool := range p.subpools { if status := subpool.Status(hash); status != TxStatusUnknown { diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 9de372a66e..0df363d81d 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -114,7 +114,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types if sidecar == nil { return fmt.Errorf("missing sidecar in blob transaction") } - // Ensure the number of items in the blob transaction and vairous side + // Ensure the number of items in the blob transaction and various side // data match up before doing any expensive validations hashes := tx.BlobHashes() if len(hashes) == 0 { From 73f5bcb75b562fcb3c109dd9c51f21956bc1f1f4 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 28 Sep 2023 15:00:53 +0800 Subject: [PATCH 54/98] core, accounts, eth, trie: handle genesis state missing (#28171) * core, accounts, eth, trie: handle genesis state missing * core, eth, trie: polish * core: manage txpool subscription in mainpool * eth/backend: fix test * cmd, eth: fix test * core/rawdb, trie/triedb/pathdb: address comments * eth, trie: address comments * eth: inline the function * eth: use synced flag * core/txpool: revert changes in txpool * core, eth, trie: rename functions --- accounts/abi/bind/backends/simulated.go | 17 ++-- cmd/devp2p/internal/ethtest/suite_test.go | 1 + core/blockchain.go | 54 ++++-------- core/rawdb/accessors_sync.go | 22 +++++ core/rawdb/database.go | 2 +- core/rawdb/schema.go | 3 + core/txpool/blobpool/blobpool.go | 6 ++ core/txpool/legacypool/legacypool.go | 15 +++- eth/api_backend.go | 10 ++- eth/backend.go | 2 +- eth/downloader/downloader.go | 4 +- eth/handler.go | 50 +++++------ eth/handler_eth.go | 2 +- eth/handler_eth_test.go | 4 +- eth/sync.go | 23 ++--- miner/miner_test.go | 7 +- trie/database.go | 20 ++++- trie/triedb/pathdb/database.go | 101 +++++++++++++++------- trie/triedb/pathdb/database_test.go | 37 ++++---- trie/triedb/pathdb/errors.go | 10 ++- trie/triedb/pathdb/journal.go | 2 +- 21 files changed, 244 insertions(+), 148 deletions(-) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 0c4342c494..dbdcd17823 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -199,7 +199,6 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, if err != nil { return nil, err } - return stateDB.GetCode(contract), nil } @@ -212,7 +211,6 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres if err != nil { return nil, err } - return stateDB.GetBalance(contract), nil } @@ -225,7 +223,6 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, if err != nil { return 0, err } - return stateDB.GetNonce(contract), nil } @@ -238,7 +235,6 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres if err != nil { return nil, err } - val := stateDB.GetState(contract, key) return val[:], nil } @@ -700,8 +696,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa } block.AddTxWithChain(b.blockchain, tx) }) - stateDB, _ := b.blockchain.State() - + stateDB, err := b.blockchain.State() + if err != nil { + return err + } b.pendingBlock = blocks[0] b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) b.pendingReceipts = receipts[0] @@ -821,11 +819,12 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { block.OffsetTime(int64(adjustment.Seconds())) }) - stateDB, _ := b.blockchain.State() - + stateDB, err := b.blockchain.State() + if err != nil { + return err + } b.pendingBlock = blocks[0] b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) - return nil } diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go index c5bcc3db1d..7890c31348 100644 --- a/cmd/devp2p/internal/ethtest/suite_test.go +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -120,6 +120,7 @@ func setupGeth(stack *node.Node) error { if err != nil { return err } + backend.SetSynced() _, err = backend.BlockChain().InsertChain(chain.blocks[1:]) return err diff --git a/core/blockchain.go b/core/blockchain.go index e371e8d926..067f44d1f1 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -337,17 +337,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if err := bc.loadLastState(); err != nil { return nil, err } - // Make sure the state associated with the block is available + // Make sure the state associated with the block is available, or log out + // if there is no available state, waiting for state sync. head := bc.CurrentBlock() if !bc.HasState(head.Root) { if head.Number.Uint64() == 0 { // The genesis state is missing, which is only possible in the path-based - // scheme. This situation occurs when the state syncer overwrites it. - // - // The solution is to reset the state to the genesis state. Although it may not - // match the sync target, the state healer will later address and correct any - // inconsistencies. - bc.resetState() + // scheme. This situation occurs when the initial state sync is not finished + // yet, or the chain head is rewound below the pivot point. In both scenario, + // there is no possible recovery approach except for rerunning a snap sync. + // Do nothing here until the state syncer picks it up. + log.Info("Genesis state is missing, wait state sync") } else { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the @@ -630,28 +630,6 @@ func (bc *BlockChain) SetSafe(header *types.Header) { } } -// resetState resets the persistent state to genesis state if it's not present. -func (bc *BlockChain) resetState() { - // Short circuit if the genesis state is already present. - root := bc.genesisBlock.Root() - if bc.HasState(root) { - return - } - // Reset the state database to empty for committing genesis state. - // Note, it should only happen in path-based scheme and Reset function - // is also only call-able in this mode. - if bc.triedb.Scheme() == rawdb.PathScheme { - if err := bc.triedb.Reset(types.EmptyRootHash); err != nil { - log.Crit("Failed to clean state", "err", err) // Shouldn't happen - } - } - // Write genesis state into database. - if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { - log.Crit("Failed to commit genesis state", "err", err) - } - log.Info("Reset state to genesis", "root", root) -} - // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition // that the rewind must pass the specified state root. This method is meant to be // used when rewinding with snapshots enabled to ensure that we go back further than @@ -687,7 +665,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha if newHeadBlock == nil { log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock - bc.resetState() } else { // Block exists, keep rewinding until we find one with state, // keeping rewinding until we exceed the optional threshold @@ -715,16 +692,14 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha } } if beyondRoot || newHeadBlock.NumberU64() == 0 { - if newHeadBlock.NumberU64() == 0 { - bc.resetState() - } else if !bc.HasState(newHeadBlock.Root()) { + if !bc.HasState(newHeadBlock.Root()) && bc.stateRecoverable(newHeadBlock.Root()) { // Rewind to a block with recoverable state. If the state is // missing, run the state recovery here. if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil { log.Crit("Failed to rollback state", "err", err) // Shouldn't happen } + log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) } - log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) @@ -739,6 +714,15 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha // to low, so it's safe to update in-memory markers directly. bc.currentBlock.Store(newHeadBlock.Header()) headBlockGauge.Update(int64(newHeadBlock.NumberU64())) + + // The head state is missing, which is only possible in the path-based + // scheme. This situation occurs when the chain head is rewound below + // the pivot point. In this scenario, there is no possible recovery + // approach except for rerunning a snap sync. Do nothing here until the + // state syncer picks it up. + if !bc.HasState(newHeadBlock.Root()) { + log.Info("Chain is stateless, wait state sync", "number", newHeadBlock.Number(), "hash", newHeadBlock.Hash()) + } } // Rewind the snap block in a simpleton way to the target head if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() { @@ -838,7 +822,7 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { // Reset the trie database with the fresh snap synced state. root := block.Root() if bc.triedb.Scheme() == rawdb.PathScheme { - if err := bc.triedb.Reset(root); err != nil { + if err := bc.triedb.Enable(root); err != nil { return err } } diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go index 7a7374e168..2dc08b3b72 100644 --- a/core/rawdb/accessors_sync.go +++ b/core/rawdb/accessors_sync.go @@ -76,3 +76,25 @@ func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) { log.Crit("Failed to delete skeleton header", "err", err) } } + +const ( + StateSyncUnknown = uint8(0) // flags the state snap sync is unknown + StateSyncRunning = uint8(1) // flags the state snap sync is not completed yet + StateSyncFinished = uint8(2) // flags the state snap sync is completed +) + +// ReadSnapSyncStatusFlag retrieves the state snap sync status flag. +func ReadSnapSyncStatusFlag(db ethdb.KeyValueReader) uint8 { + blob, err := db.Get(snapSyncStatusFlagKey) + if err != nil || len(blob) != 1 { + return StateSyncUnknown + } + return blob[0] +} + +// WriteSnapSyncStatusFlag stores the state snap sync status flag into database. +func WriteSnapSyncStatusFlag(db ethdb.KeyValueWriter, flag uint8) { + if err := db.Put(snapSyncStatusFlagKey, []byte{flag}); err != nil { + log.Crit("Failed to store sync status flag", "err", err) + } +} diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 3839e949ed..e97eeb2aa3 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -555,7 +555,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, - persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, + persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey, } { if bytes.Equal(key, meta) { metadata.Add(size) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 7269fe5d56..8e82459e82 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -91,6 +91,9 @@ var ( // transitionStatusKey tracks the eth2 transition status. transitionStatusKey = []byte("eth2-transition") + // snapSyncStatusFlagKey flags that status of snap sync. + snapSyncStatusFlagKey = []byte("SnapSyncStatus") + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 042ff3be20..36916c3f0b 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -355,7 +355,13 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr return err } } + // Initialize the state with head block, or fallback to empty one in + // case the head state is not available(might occur when node is not + // fully synced). state, err := p.chain.StateAt(head.Root) + if err != nil { + state, err = p.chain.StateAt(types.EmptyRootHash) + } if err != nil { return err } diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index f1b960510a..2430028f9d 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -298,7 +298,20 @@ func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool // Set the basic pool parameters pool.gasTip.Store(gasTip) - pool.reset(nil, head) + + // Initialize the state with head block, or fallback to empty one in + // case the head state is not available(might occur when node is not + // fully synced). + statedb, err := pool.chain.StateAt(head.Root) + if err != nil { + statedb, err = pool.chain.StateAt(types.EmptyRootHash) + } + if err != nil { + return err + } + pool.currentHead.Store(head) + pool.currentState = statedb + pool.pendingNonces = newNoncer(statedb) // Start the reorg loop early, so it can handle requests generated during // journal loading. diff --git a/eth/api_backend.go b/eth/api_backend.go index dea745382e..a0c14f1338 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -204,7 +204,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B return nil, nil, errors.New("header not found") } stateDb, err := b.eth.BlockChain().StateAt(header.Root) - return stateDb, header, err + if err != nil { + return nil, nil, err + } + return stateDb, header, nil } func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { @@ -223,7 +226,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN return nil, nil, errors.New("hash is not currently canonical") } stateDb, err := b.eth.BlockChain().StateAt(header.Root) - return stateDb, header, err + if err != nil { + return nil, nil, err + } + return stateDb, header, nil } return nil, nil, errors.New("invalid arguments; neither block nor hash specified") } diff --git a/eth/backend.go b/eth/backend.go index b99ae7655b..af03517792 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -474,7 +474,7 @@ func (s *Ethereum) Engine() consensus.Engine { return s.engine } func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } -func (s *Ethereum) Synced() bool { return s.handler.acceptTxs.Load() } +func (s *Ethereum) Synced() bool { return s.handler.synced.Load() } func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 732e79f8ba..7fed48bdb2 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -403,7 +403,9 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // subsequent state reads, explicitly disable the trie database and state // syncer is responsible to address and correct any state missing. if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme { - d.blockchain.TrieDB().Reset(types.EmptyRootHash) + if err := d.blockchain.TrieDB().Disable(); err != nil { + return err + } } // Snap sync uses the snapshot namespace to store potentially flaky data until // sync completely heals and finishes. Pause snapshot maintenance in the mean- diff --git a/eth/handler.go b/eth/handler.go index a629ec5ee9..59040442e4 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -100,8 +100,8 @@ type handler struct { networkID uint64 forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node - snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) - acceptTxs atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) + snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) + synced atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) database ethdb.Database txpool txPool @@ -163,32 +163,24 @@ func newHandler(config *handlerConfig) (*handler, error) { fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock() if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 { h.snapSync.Store(true) - log.Warn("Switch sync mode from full sync to snap sync") + log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete") + } else if !h.chain.HasState(fullBlock.Root) { + h.snapSync.Store(true) + log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing") } } else { - if h.chain.CurrentBlock().Number.Uint64() > 0 { + head := h.chain.CurrentBlock() + if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) { // Print warning log if database is not empty to run snap sync. - log.Warn("Switch sync mode from snap sync to full sync") + log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete") } else { // If snap sync was requested and our database is empty, grant it h.snapSync.Store(true) + log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash()) } } - // If sync succeeds, pass a callback to potentially disable snap sync mode - // and enable transaction propagation. - success := func() { - // If we were running snap sync and it finished, disable doing another - // round on next sync cycle - if h.snapSync.Load() { - log.Info("Snap sync complete, auto disabling") - h.snapSync.Store(false) - } - // If we've successfully finished a sync cycle, accept transactions from - // the network - h.enableSyncedFeatures() - } // Construct the downloader (long sync) - h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, success) + h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures) if ttd := h.chain.Config().TerminalTotalDifficulty; ttd != nil { if h.chain.Config().TerminalTotalDifficultyPassed { log.Info("Chain post-merge, sync via beacon client") @@ -245,8 +237,8 @@ func newHandler(config *handlerConfig) (*handler, error) { // accept each others' blocks until a restart. Unfortunately we haven't figured // out a way yet where nodes can decide unilaterally whether the network is new // or not. This should be fixed if we figure out a solution. - if h.snapSync.Load() { - log.Warn("Snap syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) + if !h.synced.Load() { + log.Warn("Syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } if h.merger.TDDReached() { @@ -272,11 +264,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } return 0, nil } - n, err := h.chain.InsertChain(blocks) - if err == nil { - h.enableSyncedFeatures() // Mark initial sync done on any fetcher import - } - return n, err + return h.chain.InsertChain(blocks) } h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer) @@ -680,7 +668,15 @@ func (h *handler) txBroadcastLoop() { // enableSyncedFeatures enables the post-sync functionalities when the initial // sync is finished. func (h *handler) enableSyncedFeatures() { - h.acceptTxs.Store(true) + // Mark the local node as synced. + h.synced.Store(true) + + // If we were running snap sync and it finished, disable doing another + // round on next sync cycle + if h.snapSync.Load() { + log.Info("Snap sync complete, auto disabling") + h.snapSync.Store(false) + } if h.chain.TrieDB().Scheme() == rawdb.PathScheme { h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize) } diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 3a5e6608bb..2aba16f928 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -51,7 +51,7 @@ func (h *ethHandler) PeerInfo(id enode.ID) interface{} { // AcceptTxs retrieves whether transaction processing is enabled on the node // or if inbound transactions should simply be dropped. func (h *ethHandler) AcceptTxs() bool { - return h.acceptTxs.Load() + return h.synced.Load() } // Handle is invoked from a peer's message handler when it receives a new remote diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 41619fe300..a16abc5ed6 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -248,7 +248,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { handler := newTestHandler() defer handler.close() - handler.handler.acceptTxs.Store(true) // mark synced to accept transactions + handler.handler.synced.Store(true) // mark synced to accept transactions txs := make(chan core.NewTxsEvent) sub := handler.txpool.SubscribeNewTxsEvent(txs) @@ -401,7 +401,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { sinks[i] = newTestHandler() defer sinks[i].close() - sinks[i].handler.acceptTxs.Store(true) // mark synced to accept transactions + sinks[i].handler.synced.Store(true) // mark synced to accept transactions } // Interconnect all the sink handlers with the source handler for i, sink := range sinks { diff --git a/eth/sync.go b/eth/sync.go index ba7a7427a5..c7ba7c93d6 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -197,16 +197,25 @@ func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { return downloader.SnapSync, td } // We are probably in full sync, but we might have rewound to before the - // snap sync pivot, check if we should reenable + // snap sync pivot, check if we should re-enable snap sync. + head := cs.handler.chain.CurrentBlock() if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { - if head := cs.handler.chain.CurrentBlock(); head.Number.Uint64() < *pivot { + if head.Number.Uint64() < *pivot { block := cs.handler.chain.CurrentSnapBlock() td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) return downloader.SnapSync, td } } + // We are in a full sync, but the associated head state is missing. To complete + // the head state, forcefully rerun the snap sync. Note it doesn't mean the + // persistent state is corrupted, just mismatch with the head block. + if !cs.handler.chain.HasState(head.Root) { + block := cs.handler.chain.CurrentSnapBlock() + td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) + log.Info("Reenabled snap sync as chain is stateless") + return downloader.SnapSync, td + } // Nope, we're really full syncing - head := cs.handler.chain.CurrentBlock() td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) return downloader.FullSync, td } @@ -242,13 +251,7 @@ func (h *handler) doSync(op *chainSyncOp) error { if err != nil { return err } - if h.snapSync.Load() { - log.Info("Snap sync complete, auto disabling") - h.snapSync.Store(false) - } - // If we've successfully finished a sync cycle, enable accepting transactions - // from the network. - h.acceptTxs.Store(true) + h.enableSyncedFeatures() head := h.chain.CurrentBlock() if head.Number.Uint64() > 0 { diff --git a/miner/miner_test.go b/miner/miner_test.go index 489bc46a91..36d5166c6d 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -64,6 +64,7 @@ func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *stat } type testBlockChain struct { + root common.Hash config *params.ChainConfig statedb *state.StateDB gasLimit uint64 @@ -89,6 +90,10 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { return bc.statedb, nil } +func (bc *testBlockChain) HasState(root common.Hash) bool { + return bc.root == root +} + func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { return bc.chainHeadFeed.Subscribe(ch) } @@ -302,7 +307,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { t.Fatalf("can't create new chain %v", err) } statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) - blockchain := &testBlockChain{chainConfig, statedb, 10000000, new(event.Feed)} + blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)} pool := legacypool.New(testTxPoolConfig, blockchain) txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain, []txpool.SubPool{pool}) diff --git a/trie/database.go b/trie/database.go index 535ad87d72..1e59f0908f 100644 --- a/trie/database.go +++ b/trie/database.go @@ -273,15 +273,27 @@ func (db *Database) Recoverable(root common.Hash) (bool, error) { return pdb.Recoverable(root), nil } -// Reset wipes all available journal from the persistent database and discard -// all caches and diff layers. Using the given root to create a new disk layer. +// Disable deactivates the database and invalidates all available state layers +// as stale to prevent access to the persistent state, which is in the syncing +// stage. +// // It's only supported by path-based database and will return an error for others. -func (db *Database) Reset(root common.Hash) error { +func (db *Database) Disable() error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Disable() +} + +// Enable activates database and resets the state tree with the provided persistent +// state root once the state sync is finished. +func (db *Database) Enable(root common.Hash) error { pdb, ok := db.backend.(*pathdb.Database) if !ok { return errors.New("not supported") } - return pdb.Reset(root) + return pdb.Enable(root) } // Journal commits an entire diff hierarchy to disk into a single journal entry. diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go index 18cc36ffc3..dc64414e9b 100644 --- a/trie/triedb/pathdb/database.go +++ b/trie/triedb/pathdb/database.go @@ -128,7 +128,8 @@ type Database struct { // readOnly is the flag whether the mutation is allowed to be applied. // It will be set automatically when the database is journaled during // the shutdown to reject all following unexpected mutations. - readOnly bool // Indicator if database is opened in read only mode + readOnly bool // Flag if database is opened in read only mode + waitSync bool // Flag if database is deactivated due to initial state sync bufferSize int // Memory allowance (in bytes) for caching dirty nodes config *Config // Configuration for database diskdb ethdb.Database // Persistent storage for matured trie nodes @@ -179,6 +180,12 @@ func New(diskdb ethdb.Database, config *Config) *Database { log.Warn("Truncated extra state histories", "number", pruned) } } + // Disable database in case node is still in the initial state sync stage. + if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly { + if err := db.Disable(); err != nil { + log.Crit("Failed to disable database", "err", err) // impossible to happen + } + } log.Warn("Path-based state scheme is an experimental feature") return db } @@ -204,9 +211,9 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6 db.lock.Lock() defer db.lock.Unlock() - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly + // Short circuit if the mutation is not allowed. + if err := db.modifyAllowed(); err != nil { + return err } if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { return err @@ -227,45 +234,59 @@ func (db *Database) Commit(root common.Hash, report bool) error { db.lock.Lock() defer db.lock.Unlock() - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly + // Short circuit if the mutation is not allowed. + if err := db.modifyAllowed(); err != nil { + return err } return db.tree.cap(root, 0) } -// Reset rebuilds the database with the specified state as the base. -// -// - if target state is empty, clear the stored state and all layers on top -// - if target state is non-empty, ensure the stored state matches with it -// and clear all other layers on top. -func (db *Database) Reset(root common.Hash) error { +// Disable deactivates the database and invalidates all available state layers +// as stale to prevent access to the persistent state, which is in the syncing +// stage. +func (db *Database) Disable() error { db.lock.Lock() defer db.lock.Unlock() // Short circuit if the database is in read only mode. if db.readOnly { - return errSnapshotReadOnly + return errDatabaseReadOnly } - batch := db.diskdb.NewBatch() - root = types.TrieRootHash(root) - if root == types.EmptyRootHash { - // Empty state is requested as the target, nuke out - // the root node and leave all others as dangling. - rawdb.DeleteAccountTrieNode(batch, nil) - } else { - // Ensure the requested state is existent before any - // action is applied. - _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil) - if hash != root { - return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root) - } + // Prevent duplicated disable operation. + if db.waitSync { + log.Error("Reject duplicated disable operation") + return nil } - // Mark the disk layer as stale before applying any mutation. + db.waitSync = true + + // Mark the disk layer as stale to prevent access to persistent state. db.tree.bottom().markStale() + // Write the initial sync flag to persist it across restarts. + rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncRunning) + log.Info("Disabled trie database due to state sync") + return nil +} + +// Enable activates database and resets the state tree with the provided persistent +// state root once the state sync is finished. +func (db *Database) Enable(root common.Hash) error { + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errDatabaseReadOnly + } + // Ensure the provided state root matches the stored one. + root = types.TrieRootHash(root) + _, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil) + if stored != root { + return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root) + } // Drop the stale state journal in persistent database and // reset the persistent state id back to zero. + batch := db.diskdb.NewBatch() rawdb.DeleteTrieJournal(batch) rawdb.WritePersistentStateID(batch, 0) if err := batch.Write(); err != nil { @@ -282,8 +303,11 @@ func (db *Database) Reset(root common.Hash) error { } // Re-construct a new disk layer backed by persistent state // with **empty clean cache and node buffer**. - dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)) - db.tree.reset(dl) + db.tree.reset(newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0))) + + // Re-enable the database as the final step. + db.waitSync = false + rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncFinished) log.Info("Rebuilt trie database", "root", root) return nil } @@ -296,7 +320,10 @@ func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error defer db.lock.Unlock() // Short circuit if rollback operation is not supported. - if db.readOnly || db.freezer == nil { + if err := db.modifyAllowed(); err != nil { + return err + } + if db.freezer == nil { return errors.New("state rollback is non-supported") } // Short circuit if the target state is not recoverable. @@ -424,3 +451,15 @@ func (db *Database) SetBufferSize(size int) error { func (db *Database) Scheme() string { return rawdb.PathScheme } + +// modifyAllowed returns the indicator if mutation is allowed. This function +// assumes the db.lock is already held. +func (db *Database) modifyAllowed() error { + if db.readOnly { + return errDatabaseReadOnly + } + if db.waitSync { + return errDatabaseWaitSync + } + return nil +} diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go index 6d346d20ea..912364f7f4 100644 --- a/trie/triedb/pathdb/database_test.go +++ b/trie/triedb/pathdb/database_test.go @@ -439,38 +439,39 @@ func TestDatabaseRecoverable(t *testing.T) { } } -func TestReset(t *testing.T) { - var ( - tester = newTester(t) - index = tester.bottomIndex() - ) +func TestDisable(t *testing.T) { + tester := newTester(t) defer tester.release() - // Reset database to unknown target, should reject it - if err := tester.db.Reset(testutil.RandomHash()); err == nil { - t.Fatal("Failed to reject invalid reset") + _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + if err := tester.db.Disable(); err != nil { + t.Fatal("Failed to deactivate database") } - // Reset database to state persisted in the disk - if err := tester.db.Reset(types.EmptyRootHash); err != nil { - t.Fatalf("Failed to reset database %v", err) + if err := tester.db.Enable(types.EmptyRootHash); err == nil { + t.Fatalf("Invalid activation should be rejected") } + if err := tester.db.Enable(stored); err != nil { + t.Fatal("Failed to activate database") + } + // Ensure journal is deleted from disk if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 { t.Fatal("Failed to clean journal") } // Ensure all trie histories are removed - for i := 0; i <= index; i++ { - _, err := readHistory(tester.db.freezer, uint64(i+1)) - if err == nil { - t.Fatalf("Failed to clean state history, index %d", i+1) - } + n, err := tester.db.freezer.Ancients() + if err != nil { + t.Fatal("Failed to clean state history") + } + if n != 0 { + t.Fatal("Failed to clean state history") } // Verify layer tree structure, single disk layer is expected if tester.db.tree.len() != 1 { t.Fatalf("Extra layer kept %d", tester.db.tree.len()) } - if tester.db.tree.bottom().rootHash() != types.EmptyRootHash { - t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash()) + if tester.db.tree.bottom().rootHash() != stored { + t.Fatalf("Root hash is not matched exp %x got %x", stored, tester.db.tree.bottom().rootHash()) } } diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go index f6ac0ec4a0..78ee4459fe 100644 --- a/trie/triedb/pathdb/errors.go +++ b/trie/triedb/pathdb/errors.go @@ -25,9 +25,13 @@ import ( ) var ( - // errSnapshotReadOnly is returned if the database is opened in read only mode - // and mutation is requested. - errSnapshotReadOnly = errors.New("read only") + // errDatabaseReadOnly is returned if the database is opened in read only mode + // to prevent any mutation. + errDatabaseReadOnly = errors.New("read only") + + // errDatabaseWaitSync is returned if the initial state sync is not completed + // yet and database is disabled to prevent accessing state. + errDatabaseWaitSync = errors.New("waiting for sync") // errSnapshotStale is returned from data accessors if the underlying layer // layer had been invalidated due to the chain progressing forward far enough diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go index ea90207f29..ac770763e3 100644 --- a/trie/triedb/pathdb/journal.go +++ b/trie/triedb/pathdb/journal.go @@ -356,7 +356,7 @@ func (db *Database) Journal(root common.Hash) error { // Short circuit if the database is in read only mode. if db.readOnly { - return errSnapshotReadOnly + return errDatabaseReadOnly } // Firstly write out the metadata of journal journal := new(bytes.Buffer) From dc34fe8291bfcaefbce97f559e9610beffb2e470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 28 Sep 2023 10:22:09 +0300 Subject: [PATCH 55/98] params: release Geth v1.13.2 --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 56d5a99a80..5941701b68 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 13 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 13 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 3dc45a3e1dd50ce1812438a79226b470a839f89c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 28 Sep 2023 10:23:25 +0300 Subject: [PATCH 56/98] params: begin v1.13.3 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 5941701b68..bcffd292a5 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 13 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 13 // Minor version component of the current release + VersionPatch = 3 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From 37a2d919b023f8dcfaf81d00d7af1b3bff3d998d Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Thu, 28 Sep 2023 01:52:27 -0600 Subject: [PATCH 57/98] params: update 4788 beacon roots contract addr (#28205) This change contains the final (?) address for 4788 beacon root contract. The update to the EIP is being tracked here: https://github.com/ethereum/EIPs/pull/7672 --------- Co-authored-by: Martin Holst Swende --- cmd/evm/testdata/29/alloc.json | 2 +- cmd/evm/testdata/29/exp.json | 4 ++-- cmd/evm/testdata/29/readme.md | 22 +++++++++++----------- params/protocol_params.go | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/cmd/evm/testdata/29/alloc.json b/cmd/evm/testdata/29/alloc.json index 70d47862a0..d2c879a45c 100644 --- a/cmd/evm/testdata/29/alloc.json +++ b/cmd/evm/testdata/29/alloc.json @@ -6,7 +6,7 @@ "storage" : { } }, - "0xbEac00dDB15f3B6d645C48263dC93862413A222D" : { + "0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02" : { "balance" : "0x1", "code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "nonce" : "0x00", diff --git a/cmd/evm/testdata/29/exp.json b/cmd/evm/testdata/29/exp.json index 16a8817774..83e1db26f9 100644 --- a/cmd/evm/testdata/29/exp.json +++ b/cmd/evm/testdata/29/exp.json @@ -1,6 +1,6 @@ { "alloc": { - "0xbeac00ddb15f3b6d645c48263dc93862413a222d": { + "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": { "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "storage": { "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", @@ -14,7 +14,7 @@ } }, "result": { - "stateRoot": "0x2db9f6bc233e8fd0af2d8023404493a19b37d9d69ace71f4e73158851fced574", + "stateRoot": "0x19a4f821a7c0a6f4c934f9acb0fe9ce5417b68086e12513ecbc3e3f57e01573c", "txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab", "receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", diff --git a/cmd/evm/testdata/29/readme.md b/cmd/evm/testdata/29/readme.md index 4383e328ed..ab02ce9cf8 100644 --- a/cmd/evm/testdata/29/readme.md +++ b/cmd/evm/testdata/29/readme.md @@ -1,29 +1,29 @@ ## EIP 4788 This test contains testcases for EIP-4788. The 4788-contract is -located at address `0xbeac00ddb15f3b6d645c48263dc93862413a222d`, and this test executes a simple transaction. It also +located at address `0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02`, and this test executes a simple transaction. It also implicitly invokes the system tx, which sets calls the contract and sets the storage values + ``` $ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout -INFO [08-15|20:07:56.335] Trie dumping started root=ecde45..2af8a7 -INFO [08-15|20:07:56.335] Trie dumping complete accounts=2 elapsed="225.848µs" -INFO [08-15|20:07:56.335] Wrote file file=result.json +INFO [09-27|15:34:53.049] Trie dumping started root=19a4f8..01573c +INFO [09-27|15:34:53.049] Trie dumping complete accounts=2 elapsed="192.759µs" +INFO [09-27|15:34:53.050] Wrote file file=result.json { "alloc": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x16345785d871db8", - "nonce": "0x1" - }, - "0xbeac00541d49391ed88abf392bfc1f4dea8c4143": { + "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": { "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "storage": { "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", "0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" }, - "balance": "0x + "balance": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x16345785d871db8", + "nonce": "0x1" } } } - ``` diff --git a/params/protocol_params.go b/params/protocol_params.go index 353ad1e03f..8a5c011849 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -186,7 +186,7 @@ var ( DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not. // BeaconRootsStorageAddress is the address where historical beacon roots are stored as per EIP-4788 - BeaconRootsStorageAddress = common.HexToAddress("0xbEac00dDB15f3B6d645C48263dC93862413A222D") + BeaconRootsStorageAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02") // SystemAddress is where the system-transaction is sent from as per EIP-4788 SystemAddress common.Address = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe") ) From 46c850a9411d7ff15c1a0342fe29f359e6c390ae Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 28 Sep 2023 04:04:45 -0500 Subject: [PATCH 58/98] internal/ethapi: compact db missing key starts with 0xff (#28207) Signed-off-by: jsvisa --- internal/ethapi/api.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index e2911c6b16..f7584cbf5a 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2202,9 +2202,17 @@ func (api *DebugAPI) ChaindbProperty(property string) (string, error) { // ChaindbCompact flattens the entire key-value database into a single level, // removing all unused slots and merging all keys. func (api *DebugAPI) ChaindbCompact() error { - for b := byte(0); b < 255; b++ { - log.Info("Compacting chain database", "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1)) - if err := api.b.ChainDb().Compact([]byte{b}, []byte{b + 1}); err != nil { + cstart := time.Now() + for b := 0; b <= 255; b++ { + var ( + start = []byte{byte(b)} + end = []byte{byte(b + 1)} + ) + if b == 255 { + end = nil + } + log.Info("Compacting database", "range", fmt.Sprintf("%#X-%#X", start, end), "elapsed", common.PrettyDuration(time.Since(cstart))) + if err := api.b.ChainDb().Compact(start, end); err != nil { log.Error("Database compaction failed", "err", err) return err } From b9450bfcca3bee4e3100cb11d3ad2091adb4d3af Mon Sep 17 00:00:00 2001 From: phenix3443 Date: Thu, 28 Sep 2023 18:15:50 +0800 Subject: [PATCH 59/98] core, eth: typos and some code formatting (#28201) * fix: typo * feat: revert symbol name --- core/blockchain.go | 10 +++++----- eth/catalyst/api.go | 4 ++-- eth/handler.go | 8 +++----- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 067f44d1f1..baf2f9f82f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -576,7 +576,7 @@ func (bc *BlockChain) SetHead(head uint64) error { header := bc.CurrentBlock() block := bc.GetBlock(header.Hash(), header.Number.Uint64()) if block == nil { - // This should never happen. In practice, previsouly currentBlock + // This should never happen. In practice, previously currentBlock // contained the entire block whereas now only a "marker", so there // is an ever so slight chance for a race we should handle. log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash()) @@ -598,7 +598,7 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error { header := bc.CurrentBlock() block := bc.GetBlock(header.Hash(), header.Number.Uint64()) if block == nil { - // This should never happen. In practice, previsouly currentBlock + // This should never happen. In practice, previously currentBlock // contained the entire block whereas now only a "marker", so there // is an ever so slight chance for a race we should handle. log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash()) @@ -982,7 +982,7 @@ func (bc *BlockChain) stopWithoutSaving() { func (bc *BlockChain) Stop() { bc.stopWithoutSaving() - // Ensure that the entirety of the state snapshot is journalled to disk. + // Ensure that the entirety of the state snapshot is journaled to disk. var snapBase common.Hash if bc.snaps != nil { var err error @@ -1193,7 +1193,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) // range. In this case, all tx indices of newly imported blocks should be // generated. - var batch = bc.db.NewBatch() + batch := bc.db.NewBatch() for i, block := range blockChain { if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { rawdb.WriteTxLookupEntriesByBlock(batch, block) @@ -2585,7 +2585,7 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { bc.flushInterval.Store(int64(interval)) } -// GetTrieFlushInterval gets the in-memroy tries flush interval +// GetTrieFlushInterval gets the in-memory tries flush interval func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 08cce0558b..1e5fb3ccb3 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -560,7 +560,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time()) return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil } - // Another cornercase: if the node is in snap sync mode, but the CL client + // Another corner case: if the node is in snap sync mode, but the CL client // tries to make it import a block. That should be denied as pushing something // into the database directly will conflict with the assumptions of snap sync // that it has an empty db that it can fill itself. @@ -776,7 +776,7 @@ func (api *ConsensusAPI) ExchangeCapabilities([]string) []string { // GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list // of block bodies by the engine api. func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 { - var bodies = make([]*engine.ExecutionPayloadBodyV1, len(hashes)) + bodies := make([]*engine.ExecutionPayloadBodyV1, len(hashes)) for i, hash := range hashes { block := api.eth.BlockChain().GetBlockByHash(hash) bodies[i] = getBody(block) diff --git a/eth/handler.go b/eth/handler.go index 59040442e4..33b9683740 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -55,9 +55,7 @@ const ( txMaxBroadcastSize = 4096 ) -var ( - syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge -) +var syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge // txPool defines the methods needed from a transaction pool implementation to // support all the operations needed by the Ethereum chain protocols. @@ -89,7 +87,7 @@ type handlerConfig struct { Chain *core.BlockChain // Blockchain to serve data from TxPool txPool // Transaction pool to propagate from Merger *consensus.Merger // The manager for eth1/2 transition - Network uint64 // Network identifier to adfvertise + Network uint64 // Network identifier to advertise Sync downloader.SyncMode // Whether to snap or full sync BloomCache uint64 // Megabytes to alloc for snap sync bloom EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` @@ -255,7 +253,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } td := new(big.Int).Add(ptd, block.Difficulty()) if !h.chain.Config().IsTerminalPoWBlock(ptd, td) { - log.Info("Filtered out non-termimal pow block", "number", block.NumberU64(), "hash", block.Hash()) + log.Info("Filtered out non-terminal pow block", "number", block.NumberU64(), "hash", block.Hash()) return 0, nil } if err := h.chain.InsertBlockWithoutSetHead(block); err != nil { From f988b2332e5974a5d9f123ab056232bc5890c7ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 28 Sep 2023 22:27:21 +0300 Subject: [PATCH 60/98] ethdb, internal/ethapi: support exposing Pebble stats too, beside LevelDB (#28224) ethdb, internal/ethapi: support exposing Pebble stats too, besinde LevelDB --- ethdb/leveldb/leveldb.go | 6 ++++++ ethdb/pebble/pebble.go | 7 +++++-- internal/ethapi/api.go | 5 ----- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index c0e0eb250a..e58efbddbe 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -22,6 +22,7 @@ package leveldb import ( "fmt" + "strings" "sync" "time" @@ -245,6 +246,11 @@ func (db *Database) NewSnapshot() (ethdb.Snapshot, error) { // Stat returns a particular internal stat of the database. func (db *Database) Stat(property string) (string, error) { + if property == "" { + property = "leveldb.stats" + } else if !strings.HasPrefix(property, "leveldb.") { + property = "leveldb." + property + } return db.db.GetProperty(property) } diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 12a84cc91a..c35a154cac 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -379,9 +379,12 @@ func upperBound(prefix []byte) (limit []byte) { return limit } -// Stat returns a particular internal stat of the database. +// Stat returns the internal metrics of Pebble in a text format. It's a developer +// method to read everything there is to read independent of Pebble version. +// +// The property is unused in Pebble as there's only one thing to retrieve. func (d *Database) Stat(property string) (string, error) { - return "", nil + return d.db.Metrics().String(), nil } // Compact flattens the underlying data store for the given key range. In essence, diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index f7584cbf5a..d22424502c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2191,11 +2191,6 @@ func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, err // ChaindbProperty returns leveldb properties of the key-value database. func (api *DebugAPI) ChaindbProperty(property string) (string, error) { - if property == "" { - property = "leveldb.stats" - } else if !strings.HasPrefix(property, "leveldb.") { - property = "leveldb." + property - } return api.b.ChainDb().Stat(property) } From 1f6e63900dfbd2befcd08f647d9d49a5645bd3cc Mon Sep 17 00:00:00 2001 From: 0xbstn Date: Fri, 29 Sep 2023 09:52:22 +0200 Subject: [PATCH 61/98] core: fix typos (#28218) * fix(core/txpool): fix typos * core/asm: fix typos * core/bloombits: fix typos * core/rawdb: fix typos --- core/asm/asm.go | 2 +- core/asm/compiler.go | 2 +- core/asm/lex_test.go | 4 ++-- core/bloombits/matcher.go | 4 ++-- core/bloombits/matcher_test.go | 4 ++-- core/rawdb/chain_freezer.go | 2 +- core/rawdb/database.go | 10 +++++----- core/rawdb/databases_64bit.go | 2 +- core/rawdb/table.go | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/core/asm/asm.go b/core/asm/asm.go index 7c1e14ec01..294eb6ffaa 100644 --- a/core/asm/asm.go +++ b/core/asm/asm.go @@ -34,7 +34,7 @@ type instructionIterator struct { started bool } -// NewInstructionIterator create a new instruction iterator. +// NewInstructionIterator creates a new instruction iterator. func NewInstructionIterator(code []byte) *instructionIterator { it := new(instructionIterator) it.code = code diff --git a/core/asm/compiler.go b/core/asm/compiler.go index 75bf726c96..02c589b2c1 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -49,7 +49,7 @@ func NewCompiler(debug bool) *Compiler { } } -// Feed feeds tokens in to ch and are interpreted by +// Feed feeds tokens into ch and are interpreted by // the compiler. // // feed is the first pass in the compile stage as it collects the used labels in the diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go index 173031521f..1e62d776d4 100644 --- a/core/asm/lex_test.go +++ b/core/asm/lex_test.go @@ -72,12 +72,12 @@ func TestLexer(t *testing.T) { input: "@label123", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, - // comment after label + // Comment after label { input: "@label123 ;; comment", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, - // comment after instruction + // Comment after instruction { input: "push 3 ;; comment\nadd", tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}}, diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index cf799c8324..6a4cfb23db 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -58,7 +58,7 @@ type partialMatches struct { // bit with the given number of fetch elements, or a response for such a request. // It can also have the actual results set to be used as a delivery data struct. // -// The contest and error fields are used by the light client to terminate matching +// The context and error fields are used by the light client to terminate matching // early if an error is encountered on some path of the pipeline. type Retrieval struct { Bit uint @@ -389,7 +389,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) { shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests ) - // assign is a helper method fo try to assign a pending bit an actively + // assign is a helper method to try to assign a pending bit an actively // listening servicer, or schedule it up for later when one arrives. assign := func(bit uint) { select { diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go index 36764c3f17..7f3d5f279c 100644 --- a/core/bloombits/matcher_test.go +++ b/core/bloombits/matcher_test.go @@ -85,7 +85,7 @@ func TestMatcherRandom(t *testing.T) { } // Tests that the matcher can properly find matches if the starting block is -// shifter from a multiple of 8. This is needed to cover an optimisation with +// shifted from a multiple of 8. This is needed to cover an optimisation with // bitset matching https://github.com/ethereum/go-ethereum/issues/15309. func TestMatcherShifted(t *testing.T) { t.Parallel() @@ -106,7 +106,7 @@ func TestWildcardMatcher(t *testing.T) { testMatcherBothModes(t, nil, 0, 10000, 0) } -// makeRandomIndexes generates a random filter system, composed on multiple filter +// makeRandomIndexes generates a random filter system, composed of multiple filter // criteria, each having one bloom list component for the address and arbitrarily // many topic bloom list components. func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes { diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 22dbda4a21..cbfaf5b9e4 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -200,7 +200,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { } batch.Reset() - // Step into the future and delete and dangling side chains + // Step into the future and delete any dangling side chains if frozen > 0 { tip := frozen for len(dangling) > 0 { diff --git a/core/rawdb/database.go b/core/rawdb/database.go index e97eeb2aa3..0c7cf9f11b 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -34,7 +34,7 @@ import ( "github.com/olekukonko/tablewriter" ) -// freezerdb is a database wrapper that enabled freezer data retrievals. +// freezerdb is a database wrapper that enables freezer data retrievals. type freezerdb struct { ancientRoot string ethdb.KeyValueStore @@ -141,7 +141,7 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) // Unlike other ancient-related methods, this method does not return // errNotSupported when invoked. // The reason for this is that the caller might want to do several things: - // 1. Check if something is in freezer, + // 1. Check if something is in the freezer, // 2. If not, check leveldb. // // This will work, since the ancient-checks inside 'fn' will return errors, @@ -209,7 +209,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // of the freezer and database. Ensure that we don't shoot ourselves in the foot // by serving up conflicting data, leading to both datastores getting corrupted. // - // - If both the freezer and key-value store is empty (no genesis), we just + // - If both the freezer and key-value store are empty (no genesis), we just // initialized a new empty freezer, so everything's fine. // - If the key-value store is empty, but the freezer is not, we need to make // sure the user's genesis matches the freezer. That will be checked in the @@ -218,7 +218,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // - If neither the key-value store nor the freezer is empty, cross validate // the genesis hashes to make sure they are compatible. If they are, also // ensure that there's no gap between the freezer and subsequently leveldb. - // - If the key-value store is not empty, but the freezer is we might just be + // - If the key-value store is not empty, but the freezer is, we might just be // upgrading to the freezer release, or we might have had a small chain and // not frozen anything yet. Ensure that no blocks are missing yet from the // key-value store, since that would mean we already had an old freezer. @@ -634,7 +634,7 @@ func printChainMetadata(db ethdb.KeyValueStore) { fmt.Fprintf(os.Stderr, "\n\n") } -// ReadChainMetadata returns a set of key/value pairs that contains informatin +// ReadChainMetadata returns a set of key/value pairs that contains information // about the database chain status. This can be used for diagnostic purposes // when investigating the state of the node. func ReadChainMetadata(db ethdb.KeyValueStore) [][]string { diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go index 1593e89bfe..e9f9332ad0 100644 --- a/core/rawdb/databases_64bit.go +++ b/core/rawdb/databases_64bit.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb/pebble" ) -// Pebble is unsuported on 32bit architecture +// Pebble is unsupported on 32bit architecture const PebbleEnabled = true // NewPebbleDBDatabase creates a persistent key-value database without a freezer diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 1895f61da2..19e4ed5b5c 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -219,7 +219,7 @@ func (b *tableBatch) Put(key, value []byte) error { return b.batch.Put(append([]byte(b.prefix), key...), value) } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts a key removal into the batch for later committing. func (b *tableBatch) Delete(key []byte) error { return b.batch.Delete(append([]byte(b.prefix), key...)) } From 0ded110b805bcdd0ca45f1c2308b5fb9194f418c Mon Sep 17 00:00:00 2001 From: Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Date: Fri, 29 Sep 2023 10:44:28 +0200 Subject: [PATCH 62/98] core: infer blobGasUsed in chain maker (#28212) Same way that the gasUsed in header is updated when a tx is added we should update blob gas used instead of requiring caller to set it manually. --- core/chain_makers.go | 8 +++----- internal/ethapi/api_test.go | 3 --- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/core/chain_makers.go b/core/chain_makers.go index c9c880dd69..3608329a13 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -88,11 +88,6 @@ func (b *BlockGen) SetPoS() { b.header.Difficulty = new(big.Int) } -// SetBlobGas sets the data gas used by the blob in the generated block. -func (b *BlockGen) SetBlobGas(blobGasUsed uint64) { - b.header.BlobGasUsed = &blobGasUsed -} - // addTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // @@ -111,6 +106,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti } b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) + if b.header.BlobGasUsed != nil { + *b.header.BlobGasUsed += receipt.BlobGasUsed + } } // AddTx adds a transaction to the generated block. If no coinbase has diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 846a4347a7..59882cd6bb 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -1448,9 +1448,6 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha b.AddTx(tx) txHashes[i] = tx.Hash() } - if i == 5 { - b.SetBlobGas(params.BlobTxBlobGasPerBlob) - } b.SetPoS() }) return backend, txHashes From c5ff839fb2e92657c61f4c3bb8777837bd93fa25 Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Fri, 29 Sep 2023 10:46:23 -0400 Subject: [PATCH 63/98] core/state: small trie prefetcher nits (#28183) Small trie prefetcher nits --- core/state/trie_prefetcher.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 4e8fd1e10f..772c698dd0 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -37,7 +37,7 @@ var ( type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetcher tries + fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. fetchers map[string]*subfetcher // Subfetchers for each trie deliveryMissMeter metrics.Meter @@ -197,7 +197,10 @@ func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte // trieID returns an unique trie identifier consists the trie owner and root hash. func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { - return string(append(owner.Bytes(), root.Bytes()...)) + trieID := make([]byte, common.HashLength*2) + copy(trieID, owner.Bytes()) + copy(trieID[common.HashLength:], root.Bytes()) + return string(trieID) } // subfetcher is a trie fetcher goroutine responsible for pulling entries for a From 1f9d672df185d31e87609e912c84932123823c05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 Sep 2023 18:14:26 +0300 Subject: [PATCH 64/98] common: remove address.Hash footgun (#28228) --- common/types.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/common/types.go b/common/types.go index bf74e43716..7184b2b112 100644 --- a/common/types.go +++ b/common/types.go @@ -239,9 +239,6 @@ func (a Address) Cmp(other Address) int { // Bytes gets the string representation of the underlying address. func (a Address) Bytes() []byte { return a[:] } -// Hash converts an address to a hash by left-padding it with zeros. -func (a Address) Hash() Hash { return BytesToHash(a[:]) } - // Big converts an address to a big integer. func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) } From 22dcb7a77bc809506b84f4a45a448e9bedd822ff Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Sat, 30 Sep 2023 01:45:38 +0800 Subject: [PATCH 65/98] ethdb/pebble: upgrade pebble to master (aa077af62593) (#28070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ethdb/pebble: upgrade pebble * ethdb/pebble, go.mod: update pebble to master (aa077af62593) --------- Co-authored-by: Péter Szilágyi --- ethdb/pebble/pebble.go | 25 +++++++++++++++------ go.mod | 27 ++++++++++++----------- go.sum | 49 +++++++++++++++++++++++------------------- 3 files changed, 59 insertions(+), 42 deletions(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index c35a154cac..e6a8f8134c 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -27,6 +27,7 @@ import ( "sync/atomic" "time" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" "github.com/ethereum/go-ethereum/common" @@ -118,6 +119,18 @@ func (d *Database) onWriteStallEnd() { d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime))) } +// panicLogger is just a noop logger to disable Pebble's internal logger. +// +// TODO(karalabe): Remove when Pebble sets this as teh default. +type panicLogger struct{} + +func (l panicLogger) Infof(format string, args ...interface{}) { +} + +func (l panicLogger) Fatalf(format string, args ...interface{}) { + panic(errors.Errorf("fatal: "+format, args...)) +} + // New returns a wrapped pebble DB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) { @@ -158,7 +171,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e // The size of memory table(as well as the write buffer). // Note, there may have more than two memory tables in the system. - MemTableSize: memTableSize, + MemTableSize: uint64(memTableSize), // MemTableStopWritesThreshold places a hard limit on the size // of the existent MemTables(including the frozen one). @@ -189,6 +202,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e WriteStallBegin: db.onWriteStallBegin, WriteStallEnd: db.onWriteStallEnd, }, + Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble } // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 // for more details. @@ -305,12 +319,9 @@ func (d *Database) NewBatch() ethdb.Batch { } // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. -// It's not supported by pebble, but pebble has better memory allocation strategy -// which turns out a lot faster than leveldb. It's performant enough to construct -// batch object without any pre-allocated space. -func (d *Database) NewBatchWithSize(_ int) ethdb.Batch { +func (d *Database) NewBatchWithSize(size int) ethdb.Batch { return &batch{ - b: d.db.NewBatch(), + b: d.db.NewBatchWithSize(size), db: d, } } @@ -582,7 +593,7 @@ type pebbleIterator struct { // of database content with a particular key prefix, starting at a particular // initial key (or after, if it does not exist). func (d *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - iter := d.db.NewIter(&pebble.IterOptions{ + iter, _ := d.db.NewIter(&pebble.IterOptions{ LowerBound: append(prefix, start...), UpperBound: upperBound(prefix), }) diff --git a/go.mod b/go.mod index a43b1d3f8b..15832e80c9 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,8 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.2.0 github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 - github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 + github.com/cockroachdb/errors v1.8.1 + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/consensys/gnark-crypto v0.10.0 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 @@ -30,7 +31,7 @@ require ( github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa github.com/google/uuid v1.3.0 @@ -49,7 +50,7 @@ require ( github.com/karalabe/usb v0.0.2 github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.16 + github.com/mattn/go-isatty v0.0.17 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 @@ -57,19 +58,19 @@ require ( github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.25.7 go.uber.org/automaxprocs v1.5.2 - golang.org/x/crypto v0.12.0 - golang.org/x/exp v0.0.0-20230810033253-352e893a4cad + golang.org/x/crypto v0.13.0 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.11.0 - golang.org/x/text v0.12.0 + golang.org/x/sys v0.12.0 + golang.org/x/text v0.13.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.9.1 + golang.org/x/tools v0.13.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -87,10 +88,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.5.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.8.1 // indirect github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect github.com/cockroachdb/redact v1.0.8 // indirect github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect @@ -126,10 +127,10 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.10.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.15.0 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gotest.tools/v3 v3.5.0 // indirect + gotest.tools/v3 v3.5.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index ca5617c2cc..dfbe5420bc 100644 --- a/go.sum +++ b/go.sum @@ -109,18 +109,20 @@ github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw= github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= @@ -262,8 +264,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= @@ -409,8 +412,9 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -536,8 +540,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -592,8 +596,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -604,8 +608,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -627,8 +631,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -672,8 +676,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -756,8 +760,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -771,8 +776,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -827,8 +832,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -941,8 +946,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 966e50bddb502383b795320b0a241baf36910ee4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 Sep 2023 20:52:55 +0300 Subject: [PATCH 66/98] ethdb/pebble: luv you linter --- ethdb/pebble/pebble.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index e6a8f8134c..5aa00aad4e 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -121,7 +121,7 @@ func (d *Database) onWriteStallEnd() { // panicLogger is just a noop logger to disable Pebble's internal logger. // -// TODO(karalabe): Remove when Pebble sets this as teh default. +// TODO(karalabe): Remove when Pebble sets this as the default. type panicLogger struct{} func (l panicLogger) Infof(format string, args ...interface{}) { From a408e37fa1165e4e9b65031dc3fa318724812454 Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Fri, 29 Sep 2023 12:27:30 -0600 Subject: [PATCH 67/98] eth/catalyst: add validation error in new paylaod hash mismatch (#28226) * eth/catalyst: add validation error in new paylaod hash mismatch * eth/catalyst/api: refactor api.invalid(..) to return nil latest valid hash if none provided --- eth/catalyst/api.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 1e5fb3ccb3..d1e1991414 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -513,7 +513,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot) if err != nil { log.Warn("Invalid NewPayload params", "params", params, "error", err) - return engine.PayloadStatusV1{Status: engine.INVALID}, nil + return api.invalid(err, nil), nil } // Stash away the last update to warn the user if the beacon client goes offline api.lastNewPayloadLock.Lock() @@ -694,20 +694,21 @@ func (api *ConsensusAPI) checkInvalidAncestor(check common.Hash, head common.Has } } -// invalid returns a response "INVALID" with the latest valid hash supplied by latest or to the current head -// if no latestValid block was provided. +// invalid returns a response "INVALID" with the latest valid hash supplied by latest. func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 { - currentHash := api.eth.BlockChain().CurrentBlock().Hash() + var currentHash *common.Hash if latestValid != nil { - // Set latest valid hash to 0x0 if parent is PoW block - currentHash = common.Hash{} - if latestValid.Difficulty.BitLen() == 0 { + if latestValid.Difficulty.BitLen() != 0 { + // Set latest valid hash to 0x0 if parent is PoW block + currentHash = &common.Hash{} + } else { // Otherwise set latest valid hash to parent hash - currentHash = latestValid.Hash() + h := latestValid.Hash() + currentHash = &h } } errorMsg := err.Error() - return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: ¤tHash, ValidationError: &errorMsg} + return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: currentHash, ValidationError: &errorMsg} } // heartbeat loops indefinitely, and checks if there have been beacon client updates From 7b6ff527d543a4478b4079d2b69b0a24fac75fa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 Sep 2023 22:11:15 +0300 Subject: [PATCH 68/98] cmd, eth: switch the dev synctarget to hash from block (#28209) * cmd, eth: switch the dev synctarget to hash from block * cmd/utils, eth/catalyst: terminate node wyen synctarget reached --- cmd/geth/config.go | 13 ++++-- cmd/utils/flags.go | 30 ++++-------- eth/catalyst/tester.go | 54 +++++++++++----------- eth/downloader/beacondevsync.go | 81 +++++++++++++++++++++++++++++++++ eth/downloader/downloader.go | 10 ---- eth/downloader/peer.go | 27 +---------- 6 files changed, 126 insertions(+), 89 deletions(-) create mode 100644 eth/downloader/beacondevsync.go diff --git a/cmd/geth/config.go b/cmd/geth/config.go index a5d628d8af..027dac7bd6 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -32,6 +32,8 @@ import ( "github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" @@ -199,17 +201,18 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { if ctx.IsSet(utils.GraphQLEnabledFlag.Name) { utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node) } - // Add the Ethereum Stats daemon if requested. if cfg.Ethstats.URL != "" { utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) } - // Configure full-sync tester service if requested - if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync { - utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name)) + if ctx.IsSet(utils.SyncTargetFlag.Name) { + hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name)) + if len(hex) != common.HashLength { + utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength) + } + utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex)) } - // Start the dev mode if requested, or launch the engine API for // interacting with external consensus client. if ctx.IsSet(utils.DeveloperFlag.Name) { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c172d269c5..9743a7b9ca 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -18,7 +18,6 @@ package utils import ( - "bytes" "context" "crypto/ecdsa" "encoding/hex" @@ -39,11 +38,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/fdlimit" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool/legacypool" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -72,7 +69,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" @@ -595,9 +591,9 @@ var ( } // MISC settings - SyncTargetFlag = &cli.PathFlag{ + SyncTargetFlag = &cli.StringFlag{ Name: "synctarget", - Usage: `File for containing the hex-encoded block-rlp as sync target(dev feature)`, + Usage: `Hash of the block to full sync to (dev testing feature)`, TakesFile: true, Category: flags.MiscCategory, } @@ -1691,7 +1687,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) godebug.SetGCPercent(int(gogc)) - if ctx.IsSet(SyncModeFlag.Name) { + if ctx.IsSet(SyncTargetFlag.Name) { + cfg.SyncMode = downloader.FullSync // dev sync target forces full sync + } else if ctx.IsSet(SyncModeFlag.Name) { cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) } if ctx.IsSet(NetworkIdFlag.Name) { @@ -1976,21 +1974,9 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf } // RegisterFullSyncTester adds the full-sync tester service into node. -func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) { - blob, err := os.ReadFile(path) - if err != nil { - Fatalf("Failed to read block file: %v", err) - } - rlpBlob, err := hexutil.Decode(string(bytes.TrimRight(blob, "\r\n"))) - if err != nil { - Fatalf("Failed to decode block blob: %v", err) - } - var block types.Block - if err := rlp.DecodeBytes(rlpBlob, &block); err != nil { - Fatalf("Failed to decode block: %v", err) - } - catalyst.RegisterFullSyncTester(stack, eth, &block) - log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash()) +func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash) { + catalyst.RegisterFullSyncTester(stack, eth, target) + log.Info("Registered full-sync tester", "hash", target) } func SetupMetrics(ctx *cli.Context) { diff --git a/eth/catalyst/tester.go b/eth/catalyst/tester.go index 3e9159a175..0922ac0ba6 100644 --- a/eth/catalyst/tester.go +++ b/eth/catalyst/tester.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/log" @@ -28,23 +28,27 @@ import ( ) // FullSyncTester is an auxiliary service that allows Geth to perform full sync -// alone without consensus-layer attached. Users must specify a valid block as -// the sync target. This tester can be applied to different networks, no matter -// it's pre-merge or post-merge, but only for full-sync. +// alone without consensus-layer attached. Users must specify a valid block hash +// as the sync target. +// +// This tester can be applied to different networks, no matter it's pre-merge or +// post-merge, but only for full-sync. type FullSyncTester struct { - api *ConsensusAPI - block *types.Block - closed chan struct{} - wg sync.WaitGroup + stack *node.Node + backend *eth.Ethereum + target common.Hash + closed chan struct{} + wg sync.WaitGroup } // RegisterFullSyncTester registers the full-sync tester service into the node // stack for launching and stopping the service controlled by node. -func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, block *types.Block) (*FullSyncTester, error) { +func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash) (*FullSyncTester, error) { cl := &FullSyncTester{ - api: newConsensusAPIWithoutHeartbeat(backend), - block: block, - closed: make(chan struct{}), + stack: stack, + backend: backend, + target: target, + closed: make(chan struct{}), } stack.RegisterLifecycle(cl) return cl, nil @@ -56,29 +60,25 @@ func (tester *FullSyncTester) Start() error { go func() { defer tester.wg.Done() + // Trigger beacon sync with the provided block hash as trusted + // chain head. + err := tester.backend.Downloader().BeaconDevSync(downloader.FullSync, tester.target, tester.closed) + if err != nil { + log.Info("Failed to trigger beacon sync", "err", err) + } + ticker := time.NewTicker(time.Second * 5) defer ticker.Stop() for { select { case <-ticker.C: - // Don't bother downloader in case it's already syncing. - if tester.api.eth.Downloader().Synchronising() { - continue - } - // Short circuit in case the target block is already stored - // locally. TODO(somehow terminate the node stack if target - // is reached). - if tester.api.eth.BlockChain().HasBlock(tester.block.Hash(), tester.block.NumberU64()) { - log.Info("Full-sync target reached", "number", tester.block.NumberU64(), "hash", tester.block.Hash()) + // Stop in case the target block is already stored locally. + if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil { + log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash()) + go tester.stack.Close() // async since we need to close ourselves return } - // Trigger beacon sync with the provided block header as - // trusted chain head. - err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), tester.block.Header()) - if err != nil { - log.Info("Failed to beacon sync", "err", err) - } case <-tester.closed: return diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go new file mode 100644 index 0000000000..9a38fedd46 --- /dev/null +++ b/eth/downloader/beacondevsync.go @@ -0,0 +1,81 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "errors" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// BeaconDevSync is a development helper to test synchronization by providing +// a block hash instead of header to run the beacon sync against. +// +// The method will reach out to the network to retrieve the header of the sync +// target instead of receiving it from the consensus node. +// +// Note, this must not be used in live code. If the forkchcoice endpoint where +// to use this instead of giving us the payload first, then essentially nobody +// in the network would have the block yet that we'd attempt to retrieve. +func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error { + // Be very loud that this code should not be used in a live node + log.Warn("----------------------------------") + log.Warn("Beacon syncing with hash as target", "hash", hash) + log.Warn("This is unhealthy for a live node!") + log.Warn("----------------------------------") + + log.Info("Waiting for peers to retrieve sync target") + for { + // If the node is going down, unblock + select { + case <-stop: + return errors.New("stop requested") + default: + } + // Pick a random peer to sync from and keep retrying if none are yet + // available due to fresh startup + d.peers.lock.RLock() + var peer *peerConnection + for _, peer = range d.peers.peers { + break + } + d.peers.lock.RUnlock() + + if peer == nil { + time.Sleep(time.Second) + continue + } + // Found a peer, attempt to retrieve the header whilst blocking and + // retry if it fails for whatever reason + log.Info("Attempting to retrieve sync target", "peer", peer.id) + headers, metas, err := d.fetchHeadersByHash(peer, hash, 1, 0, false) + if err != nil || len(headers) != 1 { + log.Warn("Failed to fetch sync target", "headers", len(headers), "err", err) + time.Sleep(time.Second) + continue + } + // Head header retrieved, if the hash matches, start the actual sync + if metas[0] != hash { + log.Error("Received invalid sync target", "want", hash, "have", metas[0]) + time.Sleep(time.Second) + continue + } + return d.BeaconSync(mode, headers[0], headers[0]) + } +} diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 7fed48bdb2..2ca7e328c6 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -286,11 +286,6 @@ func (d *Downloader) Progress() ethereum.SyncProgress { } } -// Synchronising returns whether the downloader is currently retrieving blocks. -func (d *Downloader) Synchronising() bool { - return d.synchronising.Load() -} - // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { @@ -309,11 +304,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { return nil } -// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. -func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { - return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) -} - // UnregisterPeer remove a peer from the known list, preventing any action from // the specified peer. An effort is also made to return any pending fetches into // the queue. diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 6b82694959..4c43af5270 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -55,39 +55,16 @@ type peerConnection struct { lock sync.RWMutex } -// LightPeer encapsulates the methods required to synchronise with a remote light peer. -type LightPeer interface { +// Peer encapsulates the methods required to synchronise with a remote full peer. +type Peer interface { Head() (common.Hash, *big.Int) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) -} -// Peer encapsulates the methods required to synchronise with a remote full peer. -type Peer interface { - LightPeer RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) } -// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. -type lightPeerWrapper struct { - peer LightPeer -} - -func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } -func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink) -} -func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink) -} -func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { - panic("RequestBodies not supported in light client mode sync") -} -func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { - panic("RequestReceipts not supported in light client mode sync") -} - // newPeerConnection creates a new downloader peer. func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { return &peerConnection{ From c39cbc1a78aa275523c1b0ff9d21b16ba7bfa486 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 2 Oct 2023 11:49:29 +0200 Subject: [PATCH 69/98] core: implement BLOBBASEFEE opcode (0x4a) (#28098) Implements "EIP-7516: BLOBBASEFEE opcode" for cancun, as per spec: https://eips.ethereum.org/EIPS/eip-7516 --- cmd/evm/internal/t8ntool/execution.go | 16 +++++++++------- cmd/evm/runner.go | 4 +++- core/evm.go | 27 ++++++++++++++++----------- core/state_processor.go | 3 +-- core/state_transition.go | 5 ++--- core/vm/eips.go | 20 ++++++++++++++++++-- core/vm/evm.go | 16 ++++++++-------- core/vm/jump_table.go | 3 ++- core/vm/opcodes.go | 3 +++ core/vm/runtime/env.go | 1 + core/vm/runtime/runtime.go | 4 ++++ internal/ethapi/api.go | 18 +++++++++++------- 12 files changed, 78 insertions(+), 42 deletions(-) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index bb14ac63ca..c522379387 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -163,17 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, rnd := common.BigToHash(pre.Env.Random) vmContext.Random = &rnd } - // If excessBlobGas is defined, add it to the vmContext. + // Calculate the BlobBaseFee + var excessBlobGas uint64 if pre.Env.ExcessBlobGas != nil { - vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas + excessBlobGas := *pre.Env.ExcessBlobGas + vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) } else { // If it is not explicitly defined, but we have the parent values, we try // to calculate it ourselves. parentExcessBlobGas := pre.Env.ParentExcessBlobGas parentBlobGasUsed := pre.Env.ParentBlobGasUsed if parentExcessBlobGas != nil && parentBlobGasUsed != nil { - excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) - vmContext.ExcessBlobGas = &excessBlobGas + excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) + vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) } } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's @@ -189,7 +191,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } var blobGasUsed uint64 for i, tx := range txs { - if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { + if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil { errMsg := "blob tx used but field env.ExcessBlobGas missing" log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) @@ -322,8 +324,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil)) execRs.WithdrawalsRoot = &h } - if vmContext.ExcessBlobGas != nil { - execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas) + if vmContext.BlobBaseFee != nil { + execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas) execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed) } // Re-create statedb instance with new root upon the updated database diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 017388efb5..45fc985351 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -123,7 +123,8 @@ func runCmd(ctx *cli.Context) error { sender = common.BytesToAddress([]byte("sender")) receiver = common.BytesToAddress([]byte("receiver")) preimages = ctx.Bool(DumpFlag.Name) - blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests + blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests + blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests ) if ctx.Bool(MachineFlag.Name) { tracer = logger.NewJSONLogger(logconfig, os.Stdout) @@ -221,6 +222,7 @@ func runCmd(ctx *cli.Context) error { Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), BlobHashes: blobHashes, + BlobBaseFee: blobBaseFee, EVMConfig: vm.Config{ Tracer: tracer, }, diff --git a/core/evm.go b/core/evm.go index 104f2c09dc..46dcb31462 100644 --- a/core/evm.go +++ b/core/evm.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" ) @@ -40,6 +41,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common var ( beneficiary common.Address baseFee *big.Int + blobBaseFee *big.Int random *common.Hash ) @@ -52,21 +54,24 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common if header.BaseFee != nil { baseFee = new(big.Int).Set(header.BaseFee) } + if header.ExcessBlobGas != nil { + blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas) + } if header.Difficulty.Cmp(common.Big0) == 0 { random = &header.MixDigest } return vm.BlockContext{ - CanTransfer: CanTransfer, - Transfer: Transfer, - GetHash: GetHashFn(header, chain), - Coinbase: beneficiary, - BlockNumber: new(big.Int).Set(header.Number), - Time: header.Time, - Difficulty: new(big.Int).Set(header.Difficulty), - BaseFee: baseFee, - GasLimit: header.GasLimit, - Random: random, - ExcessBlobGas: header.ExcessBlobGas, + CanTransfer: CanTransfer, + Transfer: Transfer, + GetHash: GetHashFn(header, chain), + Coinbase: beneficiary, + BlockNumber: new(big.Int).Set(header.Number), + Time: header.Time, + Difficulty: new(big.Int).Set(header.Difficulty), + BaseFee: baseFee, + BlobBaseFee: blobBaseFee, + GasLimit: header.GasLimit, + Random: random, } } diff --git a/core/state_processor.go b/core/state_processor.go index 6a208a1811..7dd81487d5 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -138,7 +137,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta if tx.Type() == types.BlobTxType { receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) - receipt.BlobGasPrice = eip4844.CalcBlobFee(*evm.Context.ExcessBlobGas) + receipt.BlobGasPrice = evm.Context.BlobBaseFee } // If the transaction created a contract, store the creation address in the receipt. diff --git a/core/state_transition.go b/core/state_transition.go index f84757be78..fb03c48aab 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" @@ -248,7 +247,7 @@ func (st *StateTransition) buyGas() error { balanceCheck.Add(balanceCheck, blobBalanceCheck) // Pay for blobGasUsed * actual blob fee blobFee := new(big.Int).SetUint64(blobGas) - blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)) + blobFee.Mul(blobFee, st.evm.Context.BlobBaseFee) mgval.Add(mgval, blobFee) } } @@ -329,7 +328,7 @@ func (st *StateTransition) preCheck() error { if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if st.blobGasUsed() > 0 { // Check that the user is paying at least the current blob fee - blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas) + blobFee := st.evm.Context.BlobBaseFee if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) } diff --git a/core/vm/eips.go b/core/vm/eips.go index 704c1ce127..35f0a3f7c2 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -282,9 +282,15 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ return nil, nil } -// enable4844 applies EIP-4844 (DATAHASH opcode) +// opBlobBaseFee implements BLOBBASEFEE opcode +func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee) + scope.Stack.push(blobBaseFee) + return nil, nil +} + +// enable4844 applies EIP-4844 (BLOBHASH opcode) func enable4844(jt *JumpTable) { - // New opcode jt[BLOBHASH] = &operation{ execute: opBlobHash, constantGas: GasFastestStep, @@ -293,6 +299,16 @@ func enable4844(jt *JumpTable) { } } +// enable7516 applies EIP-7516 (BLOBBASEFEE opcode) +func enable7516(jt *JumpTable) { + jt[BLOBBASEFEE] = &operation{ + execute: opBlobBaseFee, + constantGas: GasQuickStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } +} + // enable6780 applies EIP-6780 (deactivate SELFDESTRUCT) func enable6780(jt *JumpTable) { jt[SELFDESTRUCT] = &operation{ diff --git a/core/vm/evm.go b/core/vm/evm.go index 40e2f3554f..2c6cc7d484 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -67,14 +67,14 @@ type BlockContext struct { GetHash GetHashFunc // Block information - Coinbase common.Address // Provides information for COINBASE - GasLimit uint64 // Provides information for GASLIMIT - BlockNumber *big.Int // Provides information for NUMBER - Time uint64 // Provides information for TIME - Difficulty *big.Int // Provides information for DIFFICULTY - BaseFee *big.Int // Provides information for BASEFEE - Random *common.Hash // Provides information for PREVRANDAO - ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data + Coinbase common.Address // Provides information for COINBASE + GasLimit uint64 // Provides information for GASLIMIT + BlockNumber *big.Int // Provides information for NUMBER + Time uint64 // Provides information for TIME + Difficulty *big.Int // Provides information for DIFFICULTY + BaseFee *big.Int // Provides information for BASEFEE + BlobBaseFee *big.Int // Provides information for BLOBBASEFEE + Random *common.Hash // Provides information for PREVRANDAO } // TxContext provides the EVM with information about a transaction. diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 702b186615..fb87258326 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -82,7 +82,8 @@ func validate(jt JumpTable) JumpTable { func newCancunInstructionSet() JumpTable { instructionSet := newShanghaiInstructionSet() - enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode) + enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode) + enable7516(&instructionSet) // EIP-7516 (BLOBBASEFEE opcode) enable1153(&instructionSet) // EIP-1153 "Transient Storage" enable5656(&instructionSet) // EIP-5656 (MCOPY opcode) enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index 2929b8ce92..a11cf05a15 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -101,6 +101,7 @@ const ( SELFBALANCE OpCode = 0x47 BASEFEE OpCode = 0x48 BLOBHASH OpCode = 0x49 + BLOBBASEFEE OpCode = 0x4a ) // 0x50 range - 'storage' and execution. @@ -287,6 +288,7 @@ var opCodeToString = map[OpCode]string{ SELFBALANCE: "SELFBALANCE", BASEFEE: "BASEFEE", BLOBHASH: "BLOBHASH", + BLOBBASEFEE: "BLOBBASEFEE", // 0x50 range - 'storage' and execution. POP: "POP", @@ -444,6 +446,7 @@ var stringToOp = map[string]OpCode{ "CHAINID": CHAINID, "BASEFEE": BASEFEE, "BLOBHASH": BLOBHASH, + "BLOBBASEFEE": BLOBBASEFEE, "DELEGATECALL": DELEGATECALL, "STATICCALL": STATICCALL, "CODESIZE": CODESIZE, diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index 7e330e0732..64aa550a25 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -37,6 +37,7 @@ func NewEnv(cfg *Config) *vm.EVM { Difficulty: cfg.Difficulty, GasLimit: cfg.GasLimit, BaseFee: cfg.BaseFee, + BlobBaseFee: cfg.BlobBaseFee, Random: cfg.Random, } diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 480e5cec67..cfd7e4dbc4 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -44,6 +44,7 @@ type Config struct { Debug bool EVMConfig vm.Config BaseFee *big.Int + BlobBaseFee *big.Int BlobHashes []common.Hash Random *common.Hash @@ -95,6 +96,9 @@ func setDefaults(cfg *Config) { if cfg.BaseFee == nil { cfg.BaseFee = big.NewInt(params.InitialBaseFee) } + if cfg.BlobBaseFee == nil { + cfg.BlobBaseFee = new(big.Int) + } } // Execute executes the code using the input as call data during the execution. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index d22424502c..cf1960fcf6 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -991,13 +991,14 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { // BlockOverrides is a set of header fields to override. type BlockOverrides struct { - Number *hexutil.Big - Difficulty *hexutil.Big - Time *hexutil.Uint64 - GasLimit *hexutil.Uint64 - Coinbase *common.Address - Random *common.Hash - BaseFee *hexutil.Big + Number *hexutil.Big + Difficulty *hexutil.Big + Time *hexutil.Uint64 + GasLimit *hexutil.Uint64 + Coinbase *common.Address + Random *common.Hash + BaseFee *hexutil.Big + BlobBaseFee *hexutil.Big } // Apply overrides the given header fields into the given block context. @@ -1026,6 +1027,9 @@ func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) { if diff.BaseFee != nil { blockCtx.BaseFee = diff.BaseFee.ToInt() } + if diff.BlobBaseFee != nil { + blockCtx.BlobBaseFee = diff.BlobBaseFee.ToInt() + } } // ChainContextBackend provides methods required to implement ChainContext. From 705a51e566bc9215975d08f27d23ddab7baa9dd7 Mon Sep 17 00:00:00 2001 From: tylerni7 Date: Tue, 3 Oct 2023 00:23:19 -0700 Subject: [PATCH 70/98] eth, rpc: add configurable option for wsMessageSizeLimit (#27801) This change adds a configurable limit to websocket message. --------- Co-authored-by: Martin Holst Swende --- rpc/client_opt.go | 11 +++++++- rpc/server_test.go | 2 +- rpc/testservice_test.go | 4 +++ rpc/websocket.go | 14 ++++++---- rpc/websocket_test.go | 62 ++++++++++++++++++++++++++++++++++++++++- 5 files changed, 85 insertions(+), 8 deletions(-) diff --git a/rpc/client_opt.go b/rpc/client_opt.go index 5bef08cca8..3fa045a9b9 100644 --- a/rpc/client_opt.go +++ b/rpc/client_opt.go @@ -34,7 +34,8 @@ type clientConfig struct { httpAuth HTTPAuth // WebSocket options - wsDialer *websocket.Dialer + wsDialer *websocket.Dialer + wsMessageSizeLimit *int64 // wsMessageSizeLimit nil = default, 0 = no limit // RPC handler options idgen func() ID @@ -66,6 +67,14 @@ func WithWebsocketDialer(dialer websocket.Dialer) ClientOption { }) } +// WithWebsocketMessageSizeLimit configures the websocket message size limit used by the RPC +// client. Passing a limit of 0 means no limit. +func WithWebsocketMessageSizeLimit(messageSizeLimit int64) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.wsMessageSizeLimit = &messageSizeLimit + }) +} + // WithHeader configures HTTP headers set by the RPC client. Headers set using this option // will be used for both HTTP and WebSocket connections. func WithHeader(key, value string) ClientOption { diff --git a/rpc/server_test.go b/rpc/server_test.go index 5d3929dfdc..47a15b610a 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -45,7 +45,7 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected service calc to be registered") } - wantCallbacks := 13 + wantCallbacks := 14 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index eab67f1dd5..7d873af667 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -90,6 +90,10 @@ func (s *testService) EchoWithCtx(ctx context.Context, str string, i int, args * return echoResult{str, i, args} } +func (s *testService) Repeat(msg string, i int) string { + return strings.Repeat(msg, i) +} + func (s *testService) PeerInfo(ctx context.Context) PeerInfo { return PeerInfoFromContext(ctx) } diff --git a/rpc/websocket.go b/rpc/websocket.go index 86cf50594c..538e53a31b 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -38,7 +38,7 @@ const ( wsPingInterval = 30 * time.Second wsPingWriteTimeout = 5 * time.Second wsPongTimeout = 30 * time.Second - wsMessageSizeLimit = 32 * 1024 * 1024 + wsDefaultReadLimit = 32 * 1024 * 1024 ) var wsBufferPool = new(sync.Pool) @@ -60,7 +60,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler { log.Debug("WebSocket upgrade failed", "err", err) return } - codec := newWebsocketCodec(conn, r.Host, r.Header) + codec := newWebsocketCodec(conn, r.Host, r.Header, wsDefaultReadLimit) s.ServeCodec(codec, 0) }) } @@ -251,7 +251,11 @@ func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, er } return nil, hErr } - return newWebsocketCodec(conn, dialURL, header), nil + messageSizeLimit := int64(wsDefaultReadLimit) + if cfg.wsMessageSizeLimit != nil && *cfg.wsMessageSizeLimit >= 0 { + messageSizeLimit = *cfg.wsMessageSizeLimit + } + return newWebsocketCodec(conn, dialURL, header, messageSizeLimit), nil } return connect, nil } @@ -283,8 +287,8 @@ type websocketCodec struct { pongReceived chan struct{} } -func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec { - conn.SetReadLimit(wsMessageSizeLimit) +func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header, readLimit int64) ServerCodec { + conn.SetReadLimit(readLimit) encode := func(v interface{}, isErrorResponse bool) error { return conn.WriteJSON(v) } diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index fb9357605b..e4ac5c3fad 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -113,6 +113,66 @@ func TestWebsocketLargeCall(t *testing.T) { } } +// This test checks whether the wsMessageSizeLimit option is obeyed. +func TestWebsocketLargeRead(t *testing.T) { + t.Parallel() + + var ( + srv = newTestServer() + httpsrv = httptest.NewServer(srv.WebsocketHandler([]string{"*"})) + wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + ) + defer srv.Stop() + defer httpsrv.Close() + + testLimit := func(limit *int64) { + opts := []ClientOption{} + expLimit := int64(wsDefaultReadLimit) + if limit != nil && *limit >= 0 { + opts = append(opts, WithWebsocketMessageSizeLimit(*limit)) + if *limit > 0 { + expLimit = *limit // 0 means infinite + } + } + client, err := DialOptions(context.Background(), wsURL, opts...) + if err != nil { + t.Fatalf("can't dial: %v", err) + } + defer client.Close() + // Remove some bytes for json encoding overhead. + underLimit := int(expLimit - 128) + overLimit := expLimit + 1 + if expLimit == wsDefaultReadLimit { + // No point trying the full 32MB in tests. Just sanity-check that + // it's not obviously limited. + underLimit = 1024 + overLimit = -1 + } + var res string + // Check under limit + if err = client.Call(&res, "test_repeat", "A", underLimit); err != nil { + t.Fatalf("unexpected error with limit %d: %v", expLimit, err) + } + if len(res) != underLimit || strings.Count(res, "A") != underLimit { + t.Fatal("incorrect data") + } + // Check over limit + if overLimit > 0 { + err = client.Call(&res, "test_repeat", "A", expLimit+1) + if err == nil || err != websocket.ErrReadLimit { + t.Fatalf("wrong error with limit %d: %v expecting %v", expLimit, err, websocket.ErrReadLimit) + } + } + } + ptr := func(v int64) *int64 { return &v } + + testLimit(ptr(-1)) // Should be ignored (use default) + testLimit(ptr(0)) // Should be ignored (use default) + testLimit(nil) // Should be ignored (use default) + testLimit(ptr(200)) + testLimit(ptr(wsDefaultReadLimit * 2)) +} + func TestWebsocketPeerInfo(t *testing.T) { var ( s = newTestServer() @@ -206,7 +266,7 @@ func TestClientWebsocketLargeMessage(t *testing.T) { defer srv.Stop() defer httpsrv.Close() - respLength := wsMessageSizeLimit - 50 + respLength := wsDefaultReadLimit - 50 srv.RegisterName("test", largeRespService{respLength}) c, err := DialWebsocket(context.Background(), wsURL, "") From 07dec7a11c9e4e0edd052fffa3b47791e9fe889a Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 3 Oct 2023 01:26:33 -0600 Subject: [PATCH 71/98] cmd/evm: cancun-updates for b11r and t8n -tools (#28195) This change updates `evm b11r` (blockbuilder) and `evm t8n` (transition) tools to contain cancun updates (e.g. new header fields) --------- Co-authored-by: Mario Vega --- cmd/evm/internal/t8ntool/block.go | 88 ++++++++++++++------------ cmd/evm/internal/t8ntool/execution.go | 10 +-- cmd/evm/internal/t8ntool/gen_header.go | 86 +++++++++++++++---------- cmd/evm/internal/t8ntool/gen_stenv.go | 4 +- cmd/evm/internal/t8ntool/transition.go | 2 +- cmd/evm/testdata/28/env.json | 5 +- cmd/evm/testdata/28/exp.json | 4 +- cmd/evm/testdata/29/exp.json | 4 +- 8 files changed, 114 insertions(+), 89 deletions(-) diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 09dca8984e..872e2f6b2a 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -37,33 +37,38 @@ import ( //go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go type header struct { - ParentHash common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom types.Bloom `json:"logsBloom"` - Difficulty *big.Int `json:"difficulty"` - Number *big.Int `json:"number" gencodec:"required"` - GasLimit uint64 `json:"gasLimit" gencodec:"required"` - GasUsed uint64 `json:"gasUsed"` - Time uint64 `json:"timestamp" gencodec:"required"` - Extra []byte `json:"extraData"` - MixDigest common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *big.Int `json:"difficulty"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } type headerMarshaling struct { - Difficulty *math.HexOrDecimal256 - Number *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - GasUsed math.HexOrDecimal64 - Time math.HexOrDecimal64 - Extra hexutil.Bytes - BaseFee *math.HexOrDecimal256 + Difficulty *math.HexOrDecimal256 + Number *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Time math.HexOrDecimal64 + Extra hexutil.Bytes + BaseFee *math.HexOrDecimal256 + BlobGasUsed *math.HexOrDecimal64 + ExcessBlobGas *math.HexOrDecimal64 } type bbInput struct { @@ -113,22 +118,25 @@ func (c *cliqueInput) UnmarshalJSON(input []byte) error { // ToBlock converts i into a *types.Block func (i *bbInput) ToBlock() *types.Block { header := &types.Header{ - ParentHash: i.Header.ParentHash, - UncleHash: types.EmptyUncleHash, - Coinbase: common.Address{}, - Root: i.Header.Root, - TxHash: types.EmptyTxsHash, - ReceiptHash: types.EmptyReceiptsHash, - Bloom: i.Header.Bloom, - Difficulty: common.Big0, - Number: i.Header.Number, - GasLimit: i.Header.GasLimit, - GasUsed: i.Header.GasUsed, - Time: i.Header.Time, - Extra: i.Header.Extra, - MixDigest: i.Header.MixDigest, - BaseFee: i.Header.BaseFee, - WithdrawalsHash: i.Header.WithdrawalsHash, + ParentHash: i.Header.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: common.Address{}, + Root: i.Header.Root, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, + Bloom: i.Header.Bloom, + Difficulty: common.Big0, + Number: i.Header.Number, + GasLimit: i.Header.GasLimit, + GasUsed: i.Header.GasUsed, + Time: i.Header.Time, + Extra: i.Header.Extra, + MixDigest: i.Header.MixDigest, + BaseFee: i.Header.BaseFee, + WithdrawalsHash: i.Header.WithdrawalsHash, + BlobGasUsed: i.Header.BlobGasUsed, + ExcessBlobGas: i.Header.ExcessBlobGas, + ParentBeaconRoot: i.Header.ParentBeaconBlockRoot, } // Fill optional values. diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index c522379387..312f427d4c 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -59,7 +59,7 @@ type ExecutionResult struct { BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` - CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"` + CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"` } type ommer struct { @@ -85,7 +85,7 @@ type stEnv struct { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *big.Int `json:"currentBaseFee,omitempty"` ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` @@ -197,6 +197,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) continue } + if tx.Type() == types.BlobTxType { + blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes())) + } msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) if err != nil { log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) @@ -226,9 +229,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, gaspool.SetGas(prevGas) continue } - if tx.Type() == types.BlobTxType { - blobGasUsed += params.BlobTxBlobGasPerBlob - } includedTxs = append(includedTxs, tx) if hashError != nil { return nil, nil, NewError(ErrorMissingBlockhash, hashError) diff --git a/cmd/evm/internal/t8ntool/gen_header.go b/cmd/evm/internal/t8ntool/gen_header.go index 76228394dc..a8c8668978 100644 --- a/cmd/evm/internal/t8ntool/gen_header.go +++ b/cmd/evm/internal/t8ntool/gen_header.go @@ -18,23 +18,26 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h header) MarshalJSON() ([]byte, error) { type header struct { - ParentHash common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom types.Bloom `json:"logsBloom"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` - GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData"` - MixDigest common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } var enc header enc.ParentHash = h.ParentHash @@ -54,29 +57,35 @@ func (h header) MarshalJSON() ([]byte, error) { enc.Nonce = h.Nonce enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) enc.WithdrawalsHash = h.WithdrawalsHash + enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed) + enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas) + enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (h *header) UnmarshalJSON(input []byte) error { type header struct { - ParentHash *common.Hash `json:"parentHash"` - OmmerHash *common.Hash `json:"sha3Uncles"` - Coinbase *common.Address `json:"miner"` - Root *common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot"` - ReceiptHash *common.Hash `json:"receiptsRoot"` - Bloom *types.Bloom `json:"logsBloom"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` - GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - GasUsed *math.HexOrDecimal64 `json:"gasUsed"` - Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` - Extra *hexutil.Bytes `json:"extraData"` - MixDigest *common.Hash `json:"mixHash"` - Nonce *types.BlockNonce `json:"nonce"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + ParentHash *common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom *types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed *math.HexOrDecimal64 `json:"gasUsed"` + Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } var dec header if err := json.Unmarshal(input, &dec); err != nil { @@ -137,5 +146,14 @@ func (h *header) UnmarshalJSON(input []byte) error { if dec.WithdrawalsHash != nil { h.WithdrawalsHash = dec.WithdrawalsHash } + if dec.BlobGasUsed != nil { + h.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } + if dec.ExcessBlobGas != nil { + h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } + if dec.ParentBeaconBlockRoot != nil { + h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot + } return nil } diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index bb195ef64b..d47db4a876 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -33,7 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` @@ -81,7 +81,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` ParentUncleHash *common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 396b341d2e..600bc460f7 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -334,7 +334,7 @@ func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *pa txsWithKeys = inputData.Txs } // We may have to sign the transactions. - signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp) + signer := types.LatestSignerForChainID(chainConfig.ChainID) return signUnsignedTransactions(txsWithKeys, signer) } diff --git a/cmd/evm/testdata/28/env.json b/cmd/evm/testdata/28/env.json index 5056fe29a4..82f22ac62f 100644 --- a/cmd/evm/testdata/28/env.json +++ b/cmd/evm/testdata/28/env.json @@ -9,8 +9,7 @@ "parentDifficulty" : "0x00", "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "withdrawals" : [ - ], + "withdrawals" : [], "parentBaseFee" : "0x0a", "parentGasUsed" : "0x00", "parentGasLimit" : "0x7fffffffffffffff", @@ -20,4 +19,4 @@ "0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6" }, "parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" -} \ No newline at end of file +} diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json index a55ce0aec4..75c715e972 100644 --- a/cmd/evm/testdata/28/exp.json +++ b/cmd/evm/testdata/28/exp.json @@ -42,6 +42,6 @@ "currentBaseFee": "0x9", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentExcessBlobGas": "0x0", - "currentBlobGasUsed": "0x20000" + "blobGasUsed": "0x20000" } -} \ No newline at end of file +} diff --git a/cmd/evm/testdata/29/exp.json b/cmd/evm/testdata/29/exp.json index 83e1db26f9..c4c001ec14 100644 --- a/cmd/evm/testdata/29/exp.json +++ b/cmd/evm/testdata/29/exp.json @@ -40,6 +40,6 @@ "currentBaseFee": "0x9", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentExcessBlobGas": "0x0", - "currentBlobGasUsed": "0x0" + "blobGasUsed": "0x0" } -} \ No newline at end of file +} From 339a4cf056bb202851e4bb221928e4309d74e175 Mon Sep 17 00:00:00 2001 From: 0xbstn Date: Tue, 3 Oct 2023 13:44:01 +0200 Subject: [PATCH 72/98] core: fix typos (#28238) --- core/types/hashing.go | 2 +- core/types/state_account.go | 2 +- core/types/transaction.go | 6 +++--- core/types/transaction_signing.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/types/hashing.go b/core/types/hashing.go index 9a6a80ac52..224d7a87ea 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -95,7 +95,7 @@ type DerivableList interface { func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { buf.Reset() list.EncodeIndex(i, buf) - // It's really unfortunate that we need to do perform this copy. + // It's really unfortunate that we need to perform this copy. // StackTrie holds onto the values until Hash is called, so the values // written to it must not alias. return common.CopyBytes(buf.Bytes()) diff --git a/core/types/state_account.go b/core/types/state_account.go index 314f4943ec..ad07ca3f3a 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -87,7 +87,7 @@ func SlimAccountRLP(account StateAccount) []byte { return data } -// FullAccount decodes the data on the 'slim RLP' format and return +// FullAccount decodes the data on the 'slim RLP' format and returns // the consensus format account. func FullAccount(data []byte) (*StateAccount, error) { var slim SlimAccount diff --git a/core/types/transaction.go b/core/types/transaction.go index 78a1b9ba64..6f83c21d8f 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -168,7 +168,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { } // UnmarshalBinary decodes the canonical encoding of transactions. -// It supports legacy RLP transactions and EIP2718 typed transactions. +// It supports legacy RLP transactions and EIP-2718 typed transactions. func (tx *Transaction) UnmarshalBinary(b []byte) error { if len(b) > 0 && b[0] > 0x7f { // It's a legacy transaction. @@ -180,7 +180,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error { tx.setDecoded(&data, uint64(len(b))) return nil } - // It's an EIP2718 typed transaction envelope. + // It's an EIP-2718 typed transaction envelope. inner, err := tx.decodeTyped(b) if err != nil { return err @@ -395,7 +395,7 @@ func (tx *Transaction) BlobGasFeeCap() *big.Int { return nil } -// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. +// BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise. func (tx *Transaction) BlobHashes() []common.Hash { if blobtx, ok := tx.inner.(*BlobTx); ok { return blobtx.BlobHashes diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index cd57effcb1..9e26642f75 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -57,7 +57,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint } // LatestSigner returns the 'most permissive' Signer available for the given chain -// configuration. Specifically, this enables support of all types of transacrions +// configuration. Specifically, this enables support of all types of transactions // when their respective forks are scheduled to occur at any block number (or time) // in the chain config. // From 2091ebdf5e77c641f225114add1dcc3c2d50b270 Mon Sep 17 00:00:00 2001 From: Chirag Garg <38765776+DeVil2O@users.noreply.github.com> Date: Tue, 3 Oct 2023 17:16:22 +0530 Subject: [PATCH 73/98] trie: fix benchmark by ensuring key immutability (#28221) This change fixes the bug in a benchmark, where the input to the trie is reused in a way which is not correct. --------- Co-authored-by: Martin Holst Swende --- trie/trie_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/trie/trie_test.go b/trie/trie_test.go index 35ccc77201..8078770e7a 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -614,7 +614,9 @@ func benchGet(b *testing.B) { k := make([]byte, 32) for i := 0; i < benchElemCount; i++ { binary.LittleEndian.PutUint64(k, uint64(i)) - trie.MustUpdate(k, k) + v := make([]byte, 32) + binary.LittleEndian.PutUint64(v, uint64(i)) + trie.MustUpdate(k, v) } binary.LittleEndian.PutUint64(k, benchElemCount/2) @@ -630,8 +632,10 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { k := make([]byte, 32) b.ReportAllocs() for i := 0; i < b.N; i++ { + v := make([]byte, 32) e.PutUint64(k, uint64(i)) - trie.MustUpdate(k, k) + e.PutUint64(v, uint64(i)) + trie.MustUpdate(k, v) } return trie } From 7963c4e808811048a20ebbe37b10b1d3aff14d7a Mon Sep 17 00:00:00 2001 From: hyunchel <3271191+hyunchel@users.noreply.github.com> Date: Tue, 3 Oct 2023 07:48:36 -0400 Subject: [PATCH 74/98] rpc: fix erroneous error-message in test (#28227) --- rpc/server_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/rpc/server_test.go b/rpc/server_test.go index 47a15b610a..9d1c7fb5f0 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -32,7 +32,8 @@ func TestServerRegisterName(t *testing.T) { server := NewServer() service := new(testService) - if err := server.RegisterName("test", service); err != nil { + svcName := "test" + if err := server.RegisterName(svcName, service); err != nil { t.Fatalf("%v", err) } @@ -40,9 +41,9 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected 2 service entries, got %d", len(server.services.services)) } - svc, ok := server.services.services["test"] + svc, ok := server.services.services[svcName] if !ok { - t.Fatalf("Expected service calc to be registered") + t.Fatalf("Expected service %s to be registered", svcName) } wantCallbacks := 14 From bc6d184872889224480cf9df58b0539b210ffa9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 3 Oct 2023 15:03:19 +0300 Subject: [PATCH 75/98] cmd/devp2p, eth: drop eth/66 (#28239) * cmd/devp2p, eth: drop eth/66 * eth/protocols/eth: yes sir, linter --- cmd/devp2p/internal/ethtest/chain_test.go | 6 +- cmd/devp2p/internal/ethtest/helpers.go | 13 +- cmd/devp2p/internal/ethtest/suite.go | 38 ++-- cmd/devp2p/internal/ethtest/types.go | 26 +-- eth/downloader/downloader_test.go | 136 ++++++------ eth/downloader/fetchers.go | 8 +- eth/downloader/fetchers_concurrent_bodies.go | 2 +- eth/downloader/fetchers_concurrent_headers.go | 2 +- .../fetchers_concurrent_receipts.go | 2 +- eth/downloader/skeleton.go | 2 +- eth/downloader/skeleton_test.go | 6 +- eth/fetcher/block_fetcher.go | 4 +- eth/fetcher/block_fetcher_test.go | 4 +- eth/handler.go | 2 +- eth/handler_eth.go | 4 +- eth/handler_eth_test.go | 15 +- eth/protocols/eth/handler.go | 66 ++---- eth/protocols/eth/handler_test.go | 208 ++++-------------- eth/protocols/eth/handlers.go | 138 ++++-------- eth/protocols/eth/handshake_test.go | 3 +- eth/protocols/eth/peer.go | 98 +++------ eth/protocols/eth/protocol.go | 197 ++++++++--------- eth/protocols/eth/protocol_test.go | 102 ++++----- eth/sync_test.go | 2 +- 24 files changed, 401 insertions(+), 683 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 67221923a6..de6acfdcda 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) { }{ { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: uint64(2)}, Amount: uint64(5), Skip: 1, @@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)}, Amount: uint64(3), Skip: 0, @@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: chain.Head().Hash()}, Amount: uint64(1), Skip: 0, diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index bc901bdeb0..a0339b88cb 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) { } // set default p2p capabilities conn.caps = []p2p.Cap{ - {Name: "eth", Version: 66}, {Name: "eth", Version: 67}, {Name: "eth", Version: 68}, } @@ -237,8 +236,8 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { return errorf("could not get headers for inbound header request: %v", err) } resp := &BlockHeaders{ - RequestId: msg.ReqID(), - BlockHeadersPacket: eth.BlockHeadersPacket(headers), + RequestId: msg.ReqID(), + BlockHeadersRequest: eth.BlockHeadersRequest(headers), } if err := c.Write(resp); err != nil { return errorf("could not write to connection: %v", err) @@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint if !ok { return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) } - headers := []*types.Header(resp.BlockHeadersPacket) + headers := []*types.Header(resp.BlockHeadersRequest) return headers, nil } @@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error { conn.SetReadDeadline(time.Now().Add(20 * time.Second)) // create request req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1, }, @@ -604,8 +603,8 @@ func (s *Suite) hashAnnounce() error { pretty.Sdump(blockHeaderReq)) } err = sendConn.Write(&BlockHeaders{ - RequestId: blockHeaderReq.ReqID(), - BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()}, + RequestId: blockHeaderReq.ReqID(), + BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()}, }) if err != nil { return fmt.Errorf("failed to write to connection: %v", err) diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 815353be72..0b56c8cf4b 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { } // write request req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()}, Amount: 2, Skip: 1, @@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { // create two requests req1 := &GetBlockHeaders{ RequestId: uint64(111), - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { } req2 := &GetBlockHeaders{ RequestId: uint64(222), - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, @@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected headers for request 2: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersPacket) { + if !headersMatch(expected1, headers1.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersPacket) { + if !headersMatch(expected2, headers2.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { reqID := uint64(1234) request1 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: 1, }, @@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { } request2 := &GetBlockHeaders{ RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: 33, }, @@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) { if err != nil { t.Fatalf("failed to get expected block headers: %v", err) } - if !headersMatch(expected1, headers1.BlockHeadersPacket) { + if !headersMatch(expected1, headers1.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } - if !headersMatch(expected2, headers2.BlockHeadersPacket) { + if !headersMatch(expected2, headers2.BlockHeadersRequest) { t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) } } @@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) { t.Fatalf("peering failed: %v", err) } req := &GetBlockHeaders{ - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + GetBlockHeadersRequest: ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{Number: 0}, Amount: 2, }, @@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { // create block bodies request req := &GetBlockBodies{ RequestId: uint64(55), - GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + GetBlockBodiesRequest: eth.GetBlockBodiesRequest{ s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash(), }, @@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if !ok { t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } - bodies := resp.BlockBodiesPacket + bodies := resp.BlockBodiesResponse t.Logf("received %d block bodies", len(bodies)) - if len(bodies) != len(req.GetBlockBodiesPacket) { + if len(bodies) != len(req.GetBlockBodiesRequest) { t.Fatalf("wrong bodies in response: expected %d bodies, "+ - "got %d", len(req.GetBlockBodiesPacket), len(bodies)) + "got %d", len(req.GetBlockBodiesRequest), len(bodies)) } } @@ -481,8 +481,8 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { hashes = append(hashes, hash) } getTxReq := &GetPooledTransactions{ - RequestId: 1234, - GetPooledTransactionsPacket: hashes, + RequestId: 1234, + GetPooledTransactionsRequest: hashes, } if err = conn.Write(getTxReq); err != nil { t.Fatalf("could not write to conn: %v", err) @@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) { // check that all received transactions match those that were sent to node switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { case *PooledTransactions: - for _, gotTx := range msg.PooledTransactionsPacket { + for _, gotTx := range msg.PooledTransactionsResponse { if _, exists := hashMap[gotTx.Hash()]; !exists { t.Fatalf("unexpected tx received: %v", gotTx.Hash()) } @@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) { msg := conn.readAndServe(s.chain, timeout) switch msg := msg.(type) { case *GetPooledTransactions: - if len(msg.GetPooledTransactionsPacket) != len(hashes) { - t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) + if len(msg.GetPooledTransactionsRequest) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest)) } return diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index afa9a9c8c6..805d7a81b9 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 } func (msg Transactions) ReqID() uint64 { return 18 } // GetBlockHeaders represents a block header query. -type GetBlockHeaders eth.GetBlockHeadersPacket66 +type GetBlockHeaders eth.GetBlockHeadersPacket func (msg GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } -type BlockHeaders eth.BlockHeadersPacket66 +type BlockHeaders eth.BlockHeadersPacket func (msg BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } // GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies eth.GetBlockBodiesPacket66 +type GetBlockBodies eth.GetBlockBodiesPacket func (msg GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } // BlockBodies is the network packet for block content distribution. -type BlockBodies eth.BlockBodiesPacket66 +type BlockBodies eth.BlockBodiesPacket func (msg BlockBodies) Code() int { return 22 } func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } @@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 } func (msg NewBlock) ReqID() uint64 { return 0 } // NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. -type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66 +type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67 func (msg NewPooledTransactionHashes66) Code() int { return 24 } func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } @@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68 func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } -type GetPooledTransactions eth.GetPooledTransactionsPacket66 +type GetPooledTransactions eth.GetPooledTransactionsPacket func (msg GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } -type PooledTransactions eth.PooledTransactionsPacket66 +type PooledTransactions eth.PooledTransactionsPacket func (msg PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } @@ -180,25 +180,25 @@ func (c *Conn) Read() Message { case (Status{}).Code(): msg = new(Status) case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket66) + ethMsg := new(eth.GetBlockHeadersPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockHeaders)(ethMsg) case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket66) + ethMsg := new(eth.BlockHeadersPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*BlockHeaders)(ethMsg) case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket66) + ethMsg := new(eth.GetBlockBodiesPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetBlockBodies)(ethMsg) case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket66) + ethMsg := new(eth.BlockBodiesPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } @@ -217,13 +217,13 @@ func (c *Conn) Read() Message { } msg = new(NewPooledTransactionHashes66) case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket66) + ethMsg := new(eth.GetPooledTransactionsPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } return (*GetPooledTransactions)(ethMsg) case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket66) + ethMsg := new(eth.PooledTransactionsPacket) if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { return errorf("could not rlp decode message: %v", err) } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index ffe445ea88..e4875b959a 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Hash: origin, }, @@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i // function can be used to retrieve batches of headers from the particular peer. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ Origin: eth.HashOrNumber{ Number: origin, }, @@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesPacket)(&bodies), + Res: (*eth.BlockBodiesResponse)(&bodies), Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan * } res := ð.Response{ Req: req, - Res: (*eth.ReceiptsPacket)(&receipts), + Res: (*eth.ReceiptsResponse)(&receipts), Meta: hashes, Time: 1, Done: make(chan error, 1), // Ignore the returned status @@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { } } -func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } -func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } -func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } +func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } +func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } +func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } @@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. -func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } -func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } +func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) } +func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) } func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } @@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. -func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } -func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } -func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } +func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } +func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } +func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } @@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronising against a much shorter but much heavier fork works // currently and is not dropped. -func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } -func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } +func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } +func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } +func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } @@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. -func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } -func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } +func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } +func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } +func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } @@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head for short but heavy forks too. These are a bit special because they // take different ancestor lookup paths. -func TestBoundedHeavyForkedSync66Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) +func TestBoundedHeavyForkedSync68Full(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedHeavyForkedSync66Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) +func TestBoundedHeavyForkedSync68Snap(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedHeavyForkedSync66Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) +func TestBoundedHeavyForkedSync68Light(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) } func TestBoundedHeavyForkedSync67Full(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) @@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { } // Tests that a canceled download wipes all previously accumulated state. -func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } -func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } -func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } +func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } +func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } +func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } @@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { } // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } -func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } -func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } +func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } +func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } +func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } @@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } -func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } -func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } +func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } +func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } +func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } @@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) + tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) // Synchronise with the requested peer and make sure all blocks were retrieved @@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chain.blocks)) // Check that no peers have been dropped off - for _, version := range []int{66, 67} { + for _, version := range []int{68, 67} { peer := fmt.Sprintf("peer %d", version) if _, ok := tester.peers[peer]; !ok { t.Errorf("%s dropped", peer) @@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } -func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } -func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } +func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } +func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } +func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } @@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } -func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } -func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } +func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } +func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } +func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } @@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. -func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } -func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } -func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } +func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } +func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } +func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } @@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack66Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, FullSync) +func TestHighTDStarvationAttack68Full(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, FullSync) } -func TestHighTDStarvationAttack66Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, SnapSync) +func TestHighTDStarvationAttack68Snap(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, SnapSync) } -func TestHighTDStarvationAttack66Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, LightSync) +func TestHighTDStarvationAttack68Light(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH68, LightSync) } func TestHighTDStarvationAttack67Full(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH67, FullSync) @@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } +func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { @@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. -func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } -func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } -func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } +func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } +func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } +func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } @@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // Tests that synchronisation progress (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } -func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } -func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } +func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } +func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } +func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } @@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if synchronisation is aborted due to some failure, then the progress // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } -func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } -func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } +func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } +func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } +func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } @@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } -func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } -func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } +func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } +func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } +func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } @@ -1330,8 +1330,10 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. -func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } -func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } +func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } +func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } +func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) } +func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index 021e8c4f9b..cc4279b0da 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } @@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou case res := <-resCh: // Headers successfully retrieved, update the metrics headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) // Don't reject the packet even if it turns out to be bad, downloader will // disconnect the peer on its own terms. Simply delivery the headers to // be processed by the caller res.Done <- nil - return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 9440972c6d..5105fda66b 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the body data and delivering it to the downloader's queue. func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() + txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack() hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2]) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go index 84c7f20986..8201f4ca74 100644 --- a/eth/downloader/fetchers_concurrent_headers.go +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the header data and delivering it to the downloader's queue. func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersPacket) + headers := *packet.Res.(*eth.BlockHeadersRequest) hashes := packet.Meta.([]common.Hash) accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go index 1c853c2184..3169f030ba 100644 --- a/eth/downloader/fetchers_concurrent_receipts.go +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the receipt data and delivering it to the downloader's queue. func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - receipts := *packet.Res.(*eth.ReceiptsPacket) + receipts := *packet.Res.(*eth.ReceiptsResponse) hashes := packet.Meta.([]common.Hash) // {receipt hashes} accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index a07e1695f5..4f1f462048 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { case res := <-resCh: // Headers successfully retrieved, update the metrics - headers := *res.Res.(*eth.BlockHeadersPacket) + headers := *res.Res.(*eth.BlockHeadersRequest) headerReqTimer.Update(time.Since(start)) s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 6a76d78ac8..c31007765a 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Meta: hashes, Time: 1, Done: make(chan error), @@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // Create a peer set to feed headers through peerset := newPeerSet() for _, peer := range tt.peers { - peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) + peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id))) } // Create a peer dropper to track malicious peers dropped := make(map[string]int) @@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { skeleton.Sync(tt.newHead, nil, true) } if tt.newPeer != nil { - if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { + if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { t.Errorf("test %d: failed to register new peer: %v", i, err) } } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 35608031d9..8751c4e3ea 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() { select { case res := <-resCh: res.Done <- nil - f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) + f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time)) case <-timeout.C: // The peer didn't respond in time. The request @@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() { case res := <-resCh: res.Done <- nil // Ignoring withdrawals here, since the block fetcher is not used post-merge. - txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() + txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack() f.FilterBodies(peer, txs, uncles, time.Now()) case <-timeout.C: diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 7c490df3f7..6927300b1d 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t } res := ð.Response{ Req: req, - Res: (*eth.BlockHeadersPacket)(&headers), + Res: (*eth.BlockHeadersRequest)(&headers), Time: drift, Done: make(chan error, 1), // Ignore the returned status } @@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } res := ð.Response{ Req: req, - Res: (*eth.BlockBodiesPacket)(&bodies), + Res: (*eth.BlockBodiesResponse)(&bodies), Time: drift, Done: make(chan error, 1), // Ignore the returned status } diff --git a/eth/handler.go b/eth/handler.go index 33b9683740..f731efe1b8 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -414,7 +414,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { select { case res := <-resCh: - headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest)) if len(headers) == 0 { // Required blocks are allowed to be missing if the remote // node is not yet synced diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 2aba16f928..3a0944640e 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -66,7 +66,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { case *eth.NewBlockPacket: return h.handleBlockBroadcast(peer, packet.Block, packet.TD) - case *eth.NewPooledTransactionHashesPacket66: + case *eth.NewPooledTransactionHashesPacket67: return h.txFetcher.Notify(peer.ID(), *packet) case *eth.NewPooledTransactionHashesPacket68: @@ -75,7 +75,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { case *eth.TransactionsPacket: return h.txFetcher.Enqueue(peer.ID(), *packet, false) - case *eth.PooledTransactionsPacket: + case *eth.PooledTransactionsResponse: return h.txFetcher.Enqueue(peer.ID(), *packet, true) default: diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index a16abc5ed6..4cdfdf47b8 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.blockBroadcasts.Send(packet.Block) return nil - case *eth.NewPooledTransactionHashesPacket66: + case *eth.NewPooledTransactionHashesPacket67: h.txAnnounces.Send(([]common.Hash)(*packet)) return nil @@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil - case *eth.PooledTransactionsPacket: + case *eth.PooledTransactionsResponse: h.txBroadcasts.Send(([]*types.Transaction)(*packet)) return nil @@ -81,7 +81,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { // Tests that peers are correctly accepted (or rejected) based on the advertised // fork IDs in the protocol handshake. -func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } @@ -237,7 +236,6 @@ func testForkIDSplit(t *testing.T, protocol uint) { } // Tests that received transactions are added to the local pool. -func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) } func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } @@ -296,7 +294,6 @@ func testRecvTransactions(t *testing.T, protocol uint) { } // This test checks that pending transactions are sent. -func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) } func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } @@ -356,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen := make(map[common.Hash]struct{}) for len(seen) < len(insert) { switch protocol { - case 66, 67, 68: + case 67, 68: select { case hashes := <-anns: for _, hash := range hashes { @@ -382,7 +379,6 @@ func testSendTransactions(t *testing.T, protocol uint) { // Tests that transactions get propagated to all attached peers, either via direct // broadcasts or via announcements/retrievals. -func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) } func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } @@ -490,8 +486,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) - sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) + sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) + sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) defer sourcePeer.Close() defer sinkPeer.Close() @@ -543,7 +539,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { // Tests that a propagated malformed block (uncles or transactions don't match // with the hashes in the header) gets discarded and not broadcast forward. -func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) } func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index b2ce883bc5..a7d6ed25a9 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -23,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" @@ -45,10 +44,6 @@ const ( // nowadays, the practical limit will always be softResponseLimit. maxBodiesServe = 1024 - // maxNodeDataServe is the maximum number of state trie nodes to serve. This - // number is there to limit the number of disk lookups. - maxNodeDataServe = 1024 - // maxReceiptsServe is the maximum number of block receipts to serve. This // number is mostly there to limit the number of disk lookups. With block // containing 200+ transactions nowadays, the practical limit will always @@ -100,10 +95,6 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 for _, version := range ProtocolVersions { version := version // Closure - // Path scheme does not support GetNodeData, don't advertise eth66 on it - if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme { - continue - } protocols = append(protocols, p2p.Protocol{ Name: ProtocolName, Version: version, @@ -171,36 +162,19 @@ type Decoder interface { Time() time.Time } -var eth66 = map[uint64]msgHandler{ - NewBlockHashesMsg: handleNewBlockhashes, - NewBlockMsg: handleNewBlock, - TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetNodeDataMsg: handleGetNodeData66, - NodeDataMsg: handleNodeData66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, -} - var eth67 = map[uint64]msgHandler{ NewBlockHashesMsg: handleNewBlockhashes, NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, - NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67, + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + GetPooledTransactionsMsg: handleGetPooledTransactions, + PooledTransactionsMsg: handlePooledTransactions, } var eth68 = map[uint64]msgHandler{ @@ -208,14 +182,14 @@ var eth68 = map[uint64]msgHandler{ NewBlockMsg: handleNewBlock, TransactionsMsg: handleTransactions, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68, - GetBlockHeadersMsg: handleGetBlockHeaders66, - BlockHeadersMsg: handleBlockHeaders66, - GetBlockBodiesMsg: handleGetBlockBodies66, - BlockBodiesMsg: handleBlockBodies66, - GetReceiptsMsg: handleGetReceipts66, - ReceiptsMsg: handleReceipts66, - GetPooledTransactionsMsg: handleGetPooledTransactions66, - PooledTransactionsMsg: handlePooledTransactions66, + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + GetPooledTransactionsMsg: handleGetPooledTransactions, + PooledTransactionsMsg: handlePooledTransactions, } // handleMessage is invoked whenever an inbound message is received from a remote @@ -231,14 +205,10 @@ func handleMessage(backend Backend, peer *Peer) error { } defer msg.Discard() - var handlers = eth66 - if peer.Version() == ETH67 { - handlers = eth67 - } + var handlers = eth67 if peer.Version() >= ETH68 { handlers = eth68 } - // Track the amount of time it takes to serve the request and run the handler if metrics.Enabled { h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index bf2874721a..41e18bfb3e 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" @@ -151,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error { } // Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) } func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) } func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } @@ -178,29 +176,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { // Create a batch of tests for various scenarios limit := uint64(maxHeadersServe) tests := []struct { - query *GetBlockHeadersPacket // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected + query *GetBlockHeadersRequest // The query to execute for header retrieval + expect []common.Hash // The hashes of the block whose headers are expected }{ // A single random block should be retrievable by hash { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // A single random block should be retrievable by number { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), @@ -209,14 +207,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Multiple headers with skip lists should be retrievable { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), @@ -225,31 +223,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // The chain endpoints should be retrievable { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, { // If the peer requests a bit into the future, we deliver what we have - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), }, // Check that requesting more than available is handled gracefully { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(0).Hash(), @@ -257,13 +255,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check that requesting more than available is handled gracefully, even if mid skip { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), }, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -271,7 +269,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where requesting more can iterate past the endpoints { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, []common.Hash{ backend.chain.GetBlockByNumber(2).Hash(), backend.chain.GetBlockByNumber(1).Hash(), @@ -280,24 +278,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { }, // Check a corner case where skipping overflow loops back into the chain start { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, []common.Hash{ backend.chain.GetBlockByNumber(3).Hash(), }, }, // Check a corner case where skipping overflow loops back to the same header { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, []common.Hash{ backend.chain.GetBlockByNumber(1).Hash(), }, }, // Check that non existing headers aren't returned { - &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, + &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -309,13 +307,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: 123, - GetBlockHeadersPacket: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ + RequestId: 123, + GetBlockHeadersRequest: tt.query, }) - if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{ - RequestId: 123, - BlockHeadersPacket: headers, + if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{ + RequestId: 123, + BlockHeadersRequest: headers, }); err != nil { t.Errorf("test %d: headers mismatch: %v", i, err) } @@ -324,11 +322,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: 456, - GetBlockHeadersPacket: tt.query, + p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ + RequestId: 456, + GetBlockHeadersRequest: tt.query, }) - expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers} + expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers} if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { t.Errorf("test %d by hash: headers mismatch: %v", i, err) } @@ -338,7 +336,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { } // Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) } func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) } func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } @@ -420,139 +417,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) { } // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ - RequestId: 123, - GetBlockBodiesPacket: hashes, + p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{ + RequestId: 123, + GetBlockBodiesRequest: hashes, }) - if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{ - RequestId: 123, - BlockBodiesPacket: bodies, + if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{ + RequestId: 123, + BlockBodiesResponse: bodies, }); err != nil { t.Fatalf("test %d: bodies mismatch: %v", i, err) } } } -// Tests that the state trie nodes can be retrieved based on hashes. -func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) } -func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) } -func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) } - -func testGetNodeData(t *testing.T, protocol uint, drop bool) { - t.Parallel() - - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - signer := types.HomesteadSigner{} - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - backend := newTestBackendWithGenerator(4, false, generator) - defer backend.close() - - peer, _ := newTestPeer("peer", protocol, backend) - defer peer.close() - - // Collect all state tree hashes. - var hashes []common.Hash - it := backend.db.NewIterator(nil, nil) - for it.Next() { - if key := it.Key(); len(key) == common.HashLength { - hashes = append(hashes, common.BytesToHash(key)) - } - } - it.Release() - - // Request all hashes. - p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{ - RequestId: 123, - GetNodeDataPacket: hashes, - }) - msg, err := peer.app.ReadMsg() - if !drop { - if err != nil { - t.Fatalf("failed to read node data response: %v", err) - } - } else { - if err != nil { - return - } - t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg) - } - if msg.Code != NodeDataMsg { - t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg) - } - var res NodeDataPacket66 - if err := msg.Decode(&res); err != nil { - t.Fatalf("failed to decode response node data: %v", err) - } - - // Verify that all hashes correspond to the requested data. - data := res.NodeDataPacket - for i, want := range hashes { - if hash := crypto.Keccak256Hash(data[i]); hash != want { - t.Errorf("data hash mismatch: have %x, want %x", hash, want) - } - } - - // Reconstruct state tree from the received data. - reconstructDB := rawdb.NewMemoryDatabase() - for i := 0; i < len(data); i++ { - rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i]) - } - - // Sanity check whether all state matches. - accounts := []common.Address{testAddr, acc1Addr, acc2Addr} - for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ { - root := backend.chain.GetBlockByNumber(i).Root() - reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil) - for j, acc := range accounts { - state, _ := backend.chain.StateAt(root) - bw := state.GetBalance(acc) - bh := reconstructed.GetBalance(acc) - - if (bw == nil) != (bh == nil) { - t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - if bw != nil && bh != nil && bw.Cmp(bh) != 0 { - t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - } - } -} - // Tests that the transaction receipts can be retrieved based on hashes. -func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) } func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) } func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } @@ -613,13 +491,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) } // Send the hash request and verify the response - p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{ - RequestId: 123, - GetReceiptsPacket: hashes, + p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{ + RequestId: 123, + GetReceiptsRequest: hashes, }) - if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{ - RequestId: 123, - ReceiptsPacket: receipts, + if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{ + RequestId: 123, + ReceiptsResponse: receipts, }); err != nil { t.Errorf("receipts mismatch: %v", err) } diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index da741791bc..da4ffd327e 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,27 +22,25 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) -// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders -func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { // Decode the complex header query - var query GetBlockHeadersPacket66 + var query GetBlockHeadersPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) + response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer) return peer.ReplyBlockHeadersRLP(query.RequestId, response) } // ServiceGetBlockHeadersQuery assembles the response to a header query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { +func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { if query.Skip == 0 { // The fast path: when the request is for a contiguous segment of headers. return serviceContiguousBlockHeaderQuery(chain, query) @@ -51,7 +49,7 @@ func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersP } } -func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { +func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -140,7 +138,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc return headers } -func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue { +func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue { count := query.Amount if count > maxHeadersServe { count = maxHeadersServe @@ -203,19 +201,19 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe } } -func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error { // Decode the block body retrieval message - var query GetBlockBodiesPacket66 + var query GetBlockBodiesPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) + response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest) return peer.ReplyBlockBodiesRLP(query.RequestId, response) } // ServiceGetBlockBodiesQuery assembles the response to a body query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { +func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue { // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -234,60 +232,19 @@ func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPack return bodies } -func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { - // Decode the trie node data retrieval message - var query GetNodeDataPacket66 - if err := msg.Decode(&query); err != nil { - return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) - } - response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket) - return peer.ReplyNodeData(query.RequestId, response) -} - -// ServiceGetNodeDataQuery assembles the response to a node data query. It is -// exposed to allow external packages to test protocol behavior. -func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { - // Request nodes by hash is not supported in path-based scheme. - if chain.TrieDB().Scheme() == rawdb.PathScheme { - return nil - } - // Gather state data until the fetch or network limits is reached - var ( - bytes int - nodes [][]byte - ) - for lookups, hash := range query { - if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe || - lookups >= 2*maxNodeDataServe { - break - } - // Retrieve the requested state entry - entry, err := chain.TrieDB().Node(hash) - if len(entry) == 0 || err != nil { - // Read the contract code with prefix only to save unnecessary lookups. - entry, err = chain.ContractCodeWithPrefix(hash) - } - if err == nil && len(entry) > 0 { - nodes = append(nodes, entry) - bytes += len(entry) - } - } - return nodes -} - -func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error { // Decode the block receipts retrieval message - var query GetReceiptsPacket66 + var query GetReceiptsPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) + response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest) return peer.ReplyReceiptsRLP(query.RequestId, response) } // ServiceGetReceiptsQuery assembles the response to a receipt query. It is // exposed to allow external packages to test protocol behavior. -func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { +func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -356,15 +313,15 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, ann) } -func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { // A batch of headers arrived to one of our previous requests - res := new(BlockHeadersPacket66) + res := new(BlockHeadersPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { - hashes := make([]common.Hash, len(res.BlockHeadersPacket)) - for i, header := range res.BlockHeadersPacket { + hashes := make([]common.Hash, len(res.BlockHeadersRequest)) + for i, header := range res.BlockHeadersRequest { hashes[i] = header.Hash() } return hashes @@ -372,24 +329,24 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockHeadersMsg, - Res: &res.BlockHeadersPacket, + Res: &res.BlockHeadersRequest, }, metadata) } -func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { +func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { // A batch of block bodies arrived to one of our previous requests - res := new(BlockBodiesPacket66) + res := new(BlockBodiesPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { var ( - txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) - uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) - withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + txsHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse)) + withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse)) ) hasher := trie.NewStackTrie(nil) - for i, body := range res.BlockBodiesPacket { + for i, body := range res.BlockBodiesResponse { txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) uncleHashes[i] = types.CalcUncleHash(body.Uncles) if body.Withdrawals != nil { @@ -401,33 +358,20 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: BlockBodiesMsg, - Res: &res.BlockBodiesPacket, + Res: &res.BlockBodiesResponse, }, metadata) } -func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { - // A batch of node state data arrived to one of our previous requests - res := new(NodeDataPacket66) - if err := msg.Decode(res); err != nil { - return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) - } - return peer.dispatchResponse(&Response{ - id: res.RequestId, - code: NodeDataMsg, - Res: &res.NodeDataPacket, - }, nil) // No post-processing, we're not using this packet anymore -} - -func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { +func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { // A batch of receipts arrived to one of our previous requests - res := new(ReceiptsPacket66) + res := new(ReceiptsPacket) if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } metadata := func() interface{} { hasher := trie.NewStackTrie(nil) - hashes := make([]common.Hash, len(res.ReceiptsPacket)) - for i, receipt := range res.ReceiptsPacket { + hashes := make([]common.Hash, len(res.ReceiptsResponse)) + for i, receipt := range res.ReceiptsResponse { hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) } return hashes @@ -435,17 +379,17 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { return peer.dispatchResponse(&Response{ id: res.RequestId, code: ReceiptsMsg, - Res: &res.ReceiptsPacket, + Res: &res.ReceiptsResponse, }, metadata) } -func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error { +func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error { // New transaction announcement arrived, make sure we have // a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } - ann := new(NewPooledTransactionHashesPacket66) + ann := new(NewPooledTransactionHashesPacket67) if err := msg.Decode(ann); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } @@ -476,17 +420,17 @@ func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer return backend.Handle(peer, ann) } -func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { +func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error { // Decode the pooled transactions retrieval message - var query GetPooledTransactionsPacket66 + var query GetPooledTransactionsPacket if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest, peer) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) } -func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) { +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest, peer *Peer) ([]common.Hash, []rlp.RawValue) { // Gather transactions until the fetch or network limits is reached var ( bytes int @@ -534,17 +478,17 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, &txs) } -func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { +func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { // Transactions arrived, make sure we have a valid and fresh chain to handle them if !backend.AcceptTxs() { return nil } // Transactions can be processed, parse all of them and deliver to the pool - var txs PooledTransactionsPacket66 + var txs PooledTransactionsPacket if err := msg.Decode(&txs); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - for i, tx := range txs.PooledTransactionsPacket { + for i, tx := range txs.PooledTransactionsResponse { // Validate and mark the remote transaction if tx == nil { return fmt.Errorf("%w: transaction %d is nil", errDecode, i) @@ -553,5 +497,5 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error } requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) - return backend.Handle(peer, &txs.PooledTransactionsPacket) + return backend.Handle(peer, &txs.PooledTransactionsResponse) } diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go index dca66e0c57..d96cfc8165 100644 --- a/eth/protocols/eth/handshake_test.go +++ b/eth/protocols/eth/handshake_test.go @@ -27,7 +27,8 @@ import ( ) // Tests that handshake failures are detected and reported correctly. -func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) } +func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) } +func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) } func testHandshake(t *testing.T, protocol uint) { t.Parallel() diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 219f486c8e..938af0cab0 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -219,7 +219,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) { func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes)) + return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes)) } // sendPooledTransactionHashes68 sends transaction hashes (tagged with their type @@ -248,15 +248,15 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) { } } -// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP. +// ReplyPooledTransactionsRLP is the response to RequestTxs. func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error { // Mark all the transactions as known, but ensure we don't overflow our limits p.knownTxs.Add(hashes...) - // Not packed into PooledTransactionsPacket to avoid RLP decoding - return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{ - RequestId: id, - PooledTransactionsRLPPacket: txs, + // Not packed into PooledTransactionsResponse to avoid RLP decoding + return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{ + RequestId: id, + PooledTransactionsRLPResponse: txs, }) } @@ -309,36 +309,28 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { } } -// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders. +// ReplyBlockHeadersRLP is the response to GetBlockHeaders. func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { - return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{ - RequestId: id, - BlockHeadersRLPPacket: headers, + return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{ + RequestId: id, + BlockHeadersRLPResponse: headers, }) } -// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies. +// ReplyBlockBodiesRLP is the response to GetBlockBodies. func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { - // Not packed into BlockBodiesPacket to avoid RLP decoding - return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{ - RequestId: id, - BlockBodiesRLPPacket: bodies, + // Not packed into BlockBodiesResponse to avoid RLP decoding + return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{ + RequestId: id, + BlockBodiesRLPResponse: bodies, }) } -// ReplyNodeData is the eth/66 response to GetNodeData. -func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error { - return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{ - RequestId: id, - NodeDataPacket: data, - }) -} - -// ReplyReceiptsRLP is the eth/66 response to GetReceipts. +// ReplyReceiptsRLP is the response to GetReceipts. func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { - return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{ - RequestId: id, - ReceiptsRLPPacket: receipts, + return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{ + RequestId: id, + ReceiptsRLPResponse: receipts, }) } @@ -353,9 +345,9 @@ func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), @@ -380,9 +372,9 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -407,9 +399,9 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever sink: sink, code: GetBlockHeadersMsg, want: BlockHeadersMsg, - data: &GetBlockHeadersPacket66{ + data: &GetBlockHeadersPacket{ RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ + GetBlockHeadersRequest: &GetBlockHeadersRequest{ Origin: HashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), @@ -434,31 +426,9 @@ func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Reques sink: sink, code: GetBlockBodiesMsg, want: BlockBodiesMsg, - data: &GetBlockBodiesPacket66{ - RequestId: id, - GetBlockBodiesPacket: hashes, - }, - } - if err := p.dispatchRequest(req); err != nil { - return nil, err - } - return req, nil -} - -// RequestNodeData fetches a batch of arbitrary data from a node's known state -// data, corresponding to the specified hashes. -func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) { - p.Log().Debug("Fetching batch of state data", "count", len(hashes)) - id := rand.Uint64() - - req := &Request{ - id: id, - sink: sink, - code: GetNodeDataMsg, - want: NodeDataMsg, - data: &GetNodeDataPacket66{ - RequestId: id, - GetNodeDataPacket: hashes, + data: &GetBlockBodiesPacket{ + RequestId: id, + GetBlockBodiesRequest: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -477,9 +447,9 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ sink: sink, code: GetReceiptsMsg, want: ReceiptsMsg, - data: &GetReceiptsPacket66{ - RequestId: id, - GetReceiptsPacket: hashes, + data: &GetReceiptsPacket{ + RequestId: id, + GetReceiptsRequest: hashes, }, } if err := p.dispatchRequest(req); err != nil { @@ -494,9 +464,9 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error { id := rand.Uint64() requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) - return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{ - RequestId: id, - GetPooledTransactionsPacket: hashes, + return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{ + RequestId: id, + GetPooledTransactionsRequest: hashes, }) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 4b9f5ad6ba..0f44f83de1 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -30,7 +30,6 @@ import ( // Constants to match up protocol versions and messages const ( - ETH66 = 66 ETH67 = 67 ETH68 = 68 ) @@ -41,11 +40,11 @@ const ProtocolName = "eth" // ProtocolVersions are the supported versions of the `eth` protocol (first // is primary). -var ProtocolVersions = []uint{ETH68, ETH67, ETH66} +var ProtocolVersions = []uint{ETH68, ETH67} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17} +var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 @@ -62,8 +61,6 @@ const ( NewPooledTransactionHashesMsg = 0x08 GetPooledTransactionsMsg = 0x09 PooledTransactionsMsg = 0x0a - GetNodeDataMsg = 0x0d - NodeDataMsg = 0x0e GetReceiptsMsg = 0x0f ReceiptsMsg = 0x10 ) @@ -85,7 +82,7 @@ type Packet interface { Kind() byte // Kind returns the message type. } -// StatusPacket is the network packet for the status message for eth/64 and later. +// StatusPacket is the network packet for the status message. type StatusPacket struct { ProtocolVersion uint32 NetworkID uint64 @@ -118,18 +115,18 @@ func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) { // TransactionsPacket is the network packet for broadcasting new transactions. type TransactionsPacket []*types.Transaction -// GetBlockHeadersPacket represents a block header query. -type GetBlockHeadersPacket struct { +// GetBlockHeadersRequest represents a block header query. +type GetBlockHeadersRequest struct { Origin HashOrNumber // Block from which to retrieve headers Amount uint64 // Maximum number of headers to retrieve Skip uint64 // Blocks to skip between consecutive headers Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) } -// GetBlockHeadersPacket66 represents a block header query over eth/66 -type GetBlockHeadersPacket66 struct { +// GetBlockHeadersPacket represents a block header query with request ID wrapping. +type GetBlockHeadersPacket struct { RequestId uint64 - *GetBlockHeadersPacket + *GetBlockHeadersRequest } // HashOrNumber is a combined field for specifying an origin block. @@ -168,23 +165,23 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error { } } -// BlockHeadersPacket represents a block header response. -type BlockHeadersPacket []*types.Header +// BlockHeadersRequest represents a block header response. +type BlockHeadersRequest []*types.Header -// BlockHeadersPacket66 represents a block header response over eth/66. -type BlockHeadersPacket66 struct { +// BlockHeadersPacket represents a block header response over with request ID wrapping. +type BlockHeadersPacket struct { RequestId uint64 - BlockHeadersPacket + BlockHeadersRequest } -// BlockHeadersRLPPacket represents a block header response, to use when we already +// BlockHeadersRLPResponse represents a block header response, to use when we already // have the headers rlp encoded. -type BlockHeadersRLPPacket []rlp.RawValue +type BlockHeadersRLPResponse []rlp.RawValue -// BlockHeadersRLPPacket66 represents a block header response over eth/66. -type BlockHeadersRLPPacket66 struct { +// BlockHeadersRLPPacket represents a block header response with request ID wrapping. +type BlockHeadersRLPPacket struct { RequestId uint64 - BlockHeadersRLPPacket + BlockHeadersRLPResponse } // NewBlockPacket is the network packet for the block propagation message. @@ -206,33 +203,34 @@ func (request *NewBlockPacket) sanityCheck() error { return nil } -// GetBlockBodiesPacket represents a block body query. -type GetBlockBodiesPacket []common.Hash +// GetBlockBodiesRequest represents a block body query. +type GetBlockBodiesRequest []common.Hash -// GetBlockBodiesPacket66 represents a block body query over eth/66. -type GetBlockBodiesPacket66 struct { +// GetBlockBodiesPacket represents a block body query with request ID wrapping. +type GetBlockBodiesPacket struct { RequestId uint64 - GetBlockBodiesPacket + GetBlockBodiesRequest } -// BlockBodiesPacket is the network packet for block content distribution. -type BlockBodiesPacket []*BlockBody +// BlockBodiesResponse is the network packet for block content distribution. +type BlockBodiesResponse []*BlockBody -// BlockBodiesPacket66 is the network packet for block content distribution over eth/66. -type BlockBodiesPacket66 struct { +// BlockBodiesPacket is the network packet for block content distribution with +// request ID wrapping. +type BlockBodiesPacket struct { RequestId uint64 - BlockBodiesPacket + BlockBodiesResponse } -// BlockBodiesRLPPacket is used for replying to block body requests, in cases +// BlockBodiesRLPResponse is used for replying to block body requests, in cases // where we already have them RLP-encoded, and thus can avoid the decode-encode // roundtrip. -type BlockBodiesRLPPacket []rlp.RawValue +type BlockBodiesRLPResponse []rlp.RawValue -// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66 -type BlockBodiesRLPPacket66 struct { +// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping. +type BlockBodiesRLPPacket struct { RequestId uint64 - BlockBodiesRLPPacket + BlockBodiesRLPResponse } // BlockBody represents the data content of a single block. @@ -244,7 +242,7 @@ type BlockBody struct { // Unpack retrieves the transactions and uncles from the range packet and returns // them in a split flat format that's more consistent with the internal data structures. -func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { +func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) { // TODO(matt): add support for withdrawals to fetchers var ( txset = make([][]*types.Transaction, len(*p)) @@ -257,53 +255,36 @@ func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, return txset, uncleset, withdrawalset } -// GetNodeDataPacket represents a trie node data query. -type GetNodeDataPacket []common.Hash - -// GetNodeDataPacket66 represents a trie node data query over eth/66. -type GetNodeDataPacket66 struct { - RequestId uint64 - GetNodeDataPacket -} - -// NodeDataPacket is the network packet for trie node data distribution. -type NodeDataPacket [][]byte +// GetReceiptsRequest represents a block receipts query. +type GetReceiptsRequest []common.Hash -// NodeDataPacket66 is the network packet for trie node data distribution over eth/66. -type NodeDataPacket66 struct { +// GetReceiptsPacket represents a block receipts query with request ID wrapping. +type GetReceiptsPacket struct { RequestId uint64 - NodeDataPacket + GetReceiptsRequest } -// GetReceiptsPacket represents a block receipts query. -type GetReceiptsPacket []common.Hash +// ReceiptsResponse is the network packet for block receipts distribution. +type ReceiptsResponse [][]*types.Receipt -// GetReceiptsPacket66 represents a block receipts query over eth/66. -type GetReceiptsPacket66 struct { +// ReceiptsPacket is the network packet for block receipts distribution with +// request ID wrapping. +type ReceiptsPacket struct { RequestId uint64 - GetReceiptsPacket + ReceiptsResponse } -// ReceiptsPacket is the network packet for block receipts distribution. -type ReceiptsPacket [][]*types.Receipt +// ReceiptsRLPResponse is used for receipts, when we already have it encoded +type ReceiptsRLPResponse []rlp.RawValue -// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66. -type ReceiptsPacket66 struct { +// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping. +type ReceiptsRLPPacket struct { RequestId uint64 - ReceiptsPacket + ReceiptsRLPResponse } -// ReceiptsRLPPacket is used for receipts, when we already have it encoded -type ReceiptsRLPPacket []rlp.RawValue - -// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket -type ReceiptsRLPPacket66 struct { - RequestId uint64 - ReceiptsRLPPacket -} - -// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67. -type NewPooledTransactionHashesPacket66 []common.Hash +// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67. +type NewPooledTransactionHashesPacket67 []common.Hash // NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer. type NewPooledTransactionHashesPacket68 struct { @@ -312,31 +293,33 @@ type NewPooledTransactionHashesPacket68 struct { Hashes []common.Hash } -// GetPooledTransactionsPacket represents a transaction query. -type GetPooledTransactionsPacket []common.Hash +// GetPooledTransactionsRequest represents a transaction query. +type GetPooledTransactionsRequest []common.Hash -type GetPooledTransactionsPacket66 struct { +// GetPooledTransactionsPacket represents a transaction query with request ID wrapping. +type GetPooledTransactionsPacket struct { RequestId uint64 - GetPooledTransactionsPacket + GetPooledTransactionsRequest } -// PooledTransactionsPacket is the network packet for transaction distribution. -type PooledTransactionsPacket []*types.Transaction +// PooledTransactionsResponse is the network packet for transaction distribution. +type PooledTransactionsResponse []*types.Transaction -// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66. -type PooledTransactionsPacket66 struct { +// PooledTransactionsPacket is the network packet for transaction distribution +// with request ID wrapping. +type PooledTransactionsPacket struct { RequestId uint64 - PooledTransactionsPacket + PooledTransactionsResponse } -// PooledTransactionsRLPPacket is the network packet for transaction distribution, used +// PooledTransactionsRLPResponse is the network packet for transaction distribution, used // in the cases we already have them in rlp-encoded form -type PooledTransactionsRLPPacket []rlp.RawValue +type PooledTransactionsRLPResponse []rlp.RawValue -// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket -type PooledTransactionsRLPPacket66 struct { +// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping. +type PooledTransactionsRLPPacket struct { RequestId uint64 - PooledTransactionsRLPPacket + PooledTransactionsRLPResponse } func (*StatusPacket) Name() string { return "Status" } @@ -348,40 +331,34 @@ func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg } func (*TransactionsPacket) Name() string { return "Transactions" } func (*TransactionsPacket) Kind() byte { return TransactionsMsg } -func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" } -func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg } +func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" } +func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg } -func (*BlockHeadersPacket) Name() string { return "BlockHeaders" } -func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg } +func (*BlockHeadersRequest) Name() string { return "BlockHeaders" } +func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg } -func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" } -func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg } +func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" } +func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg } -func (*BlockBodiesPacket) Name() string { return "BlockBodies" } -func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } +func (*BlockBodiesResponse) Name() string { return "BlockBodies" } +func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } -func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } -func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } +func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg } func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } -func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } -func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } - -func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } -func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } - -func (*GetNodeDataPacket) Name() string { return "GetNodeData" } -func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } +func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" } +func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg } -func (*NodeDataPacket) Name() string { return "NodeData" } -func (*NodeDataPacket) Kind() byte { return NodeDataMsg } +func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" } +func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg } -func (*GetReceiptsPacket) Name() string { return "GetReceipts" } -func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } +func (*GetReceiptsRequest) Name() string { return "GetReceipts" } +func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg } -func (*ReceiptsPacket) Name() string { return "Receipts" } -func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg } +func (*ReceiptsResponse) Name() string { return "Receipts" } +func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index a86fbb0a69..bc2545dea2 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -35,19 +35,19 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } // Assemble some table driven tests tests := []struct { - packet *GetBlockHeadersPacket + packet *GetBlockHeadersRequest fail bool }{ // Providing the origin as either a hash or a number should both work - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}}, - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}}, // Providing arbitrary query field should also work - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, - {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, // Providing both the origin hash and origin number must fail - {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}}, + {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}}, } // Iterate over each of the tests and try to encode and then decode for i, tt := range tests { @@ -58,7 +58,7 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { t.Fatalf("test %d: encode should have failed", i) } if !tt.fail { - packet := new(GetBlockHeadersPacket) + packet := new(GetBlockHeadersRequest) if err := rlp.DecodeBytes(bytes, packet); err != nil { t.Fatalf("test %d: failed to decode packet: %v", i, err) } @@ -70,46 +70,40 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } } -// TestEth66EmptyMessages tests encoding of empty eth66 messages -func TestEth66EmptyMessages(t *testing.T) { +// TestEmptyMessages tests encoding of empty messages. +func TestEmptyMessages(t *testing.T) { // All empty messages encodes to the same format want := common.FromHex("c4820457c0") for i, msg := range []interface{}{ // Headers - GetBlockHeadersPacket66{1111, nil}, - BlockHeadersPacket66{1111, nil}, + GetBlockHeadersPacket{1111, nil}, + BlockHeadersPacket{1111, nil}, // Bodies - GetBlockBodiesPacket66{1111, nil}, - BlockBodiesPacket66{1111, nil}, - BlockBodiesRLPPacket66{1111, nil}, - // Node data - GetNodeDataPacket66{1111, nil}, - NodeDataPacket66{1111, nil}, + GetBlockBodiesPacket{1111, nil}, + BlockBodiesPacket{1111, nil}, + BlockBodiesRLPPacket{1111, nil}, // Receipts - GetReceiptsPacket66{1111, nil}, - ReceiptsPacket66{1111, nil}, + GetReceiptsPacket{1111, nil}, + ReceiptsPacket{1111, nil}, // Transactions - GetPooledTransactionsPacket66{1111, nil}, - PooledTransactionsPacket66{1111, nil}, - PooledTransactionsRLPPacket66{1111, nil}, + GetPooledTransactionsPacket{1111, nil}, + PooledTransactionsPacket{1111, nil}, + PooledTransactionsRLPPacket{1111, nil}, // Headers - BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})}, + BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})}, // Bodies - GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, - BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, - BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, - // Node data - GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, - NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, + GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})}, + BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})}, + BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})}, // Receipts - GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, - ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, + GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})}, + ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})}, // Transactions - GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, - PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, - PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, + GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})}, + PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})}, + PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})}, } { if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) @@ -117,8 +111,8 @@ func TestEth66EmptyMessages(t *testing.T) { } } -// TestEth66Messages tests the encoding of all redefined eth66 messages -func TestEth66Messages(t *testing.T) { +// TestMessages tests the encoding of all messages. +func TestMessages(t *testing.T) { // Some basic structs used during testing var ( header *types.Header @@ -169,10 +163,6 @@ func TestEth66Messages(t *testing.T) { common.HexToHash("deadc0de"), common.HexToHash("feedbeef"), } - byteSlices := [][]byte{ - common.FromHex("deadc0de"), - common.FromHex("feedbeef"), - } // init the receipts { receipts = []*types.Receipt{ @@ -203,59 +193,51 @@ func TestEth66Messages(t *testing.T) { want []byte }{ { - GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}}, + GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}}, common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"), }, { - GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, + GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, common.FromHex("ca820457c682270f050580"), }, { - BlockHeadersPacket66{1111, BlockHeadersPacket{header}}, + BlockHeadersPacket{1111, BlockHeadersRequest{header}}, common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)}, + GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})}, + BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { // Identical to non-rlp-shortcut version - BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, + BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, { - GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, - common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), - }, - { - NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, - common.FromHex("ce820457ca84deadc0de84feedbeef"), - }, - { - GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, + GetReceiptsPacket{1111, GetReceiptsRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})}, + ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})}, + ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})}, common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), }, { - GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)}, + GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, + PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, { - PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)}, + PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, } { diff --git a/eth/sync_test.go b/eth/sync_test.go index b5e00298b9..d26cbb66ea 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -28,8 +28,8 @@ import ( ) // Tests that snap sync is disabled after a successful sync cycle. -func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) } func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) } +func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) } // Tests that snap sync gets disabled as soon as a real block is successfully // imported into the blockchain. From a8a9c8e4b00c5b9f84242181839234b8e9fd54e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 4 Oct 2023 12:36:36 +0300 Subject: [PATCH 76/98] core, eth, miner: start propagating and consuming blob txs (#28243) * core, eth, miner: start propagating and consuming blob txs * eth/protocols/eth: disable eth/67 if Cancun is enabled * core/txpool, eth, miner: pass gas limit infos in lazy tx for mienr filtering * core/txpool, miner: add lazy resolver for pending txs too * core, eth: fix review noticed bugs * eth, miner: minor polishes in the mining and announcing logs * core/expool: unsubscribe the event scope --- core/txpool/blobpool/blobpool.go | 55 ++++++++++++++++++++-------- core/txpool/legacypool/legacypool.go | 18 +++++---- core/txpool/subpool.go | 19 ++++++++-- core/txpool/txpool.go | 12 +++--- eth/api_backend.go | 2 +- eth/catalyst/simulated_beacon.go | 2 +- eth/handler.go | 41 ++++++++++++--------- eth/handler_eth.go | 14 ++++--- eth/handler_eth_test.go | 4 +- eth/handler_test.go | 6 ++- eth/protocols/eth/handler.go | 4 ++ eth/protocols/eth/handlers.go | 4 +- miner/ordering_test.go | 4 ++ miner/worker.go | 33 +++++++++++------ 14 files changed, 145 insertions(+), 73 deletions(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 36916c3f0b..32c6c0e8fe 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -97,6 +97,8 @@ type blobTxMeta struct { execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump execFeeCap *uint256.Int // Needed to validate replacement price bump blobFeeCap *uint256.Int // Needed to validate replacement price bump + execGas uint64 // Needed to check inclusion validity before reading the blob + blobGas uint64 // Needed to check inclusion validity before reading the blob basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap @@ -118,6 +120,8 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta { execTipCap: uint256.MustFromBig(tx.GasTipCap()), execFeeCap: uint256.MustFromBig(tx.GasFeeCap()), blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()), + execGas: tx.Gas(), + blobGas: tx.BlobGas(), } meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap) meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap) @@ -307,8 +311,8 @@ type BlobPool struct { spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts evict *evictHeap // Heap of cheapest accounts for eviction when full - eventFeed event.Feed // Event feed to send out new tx events on pool inclusion - eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination + discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded) + insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included) lock sync.RWMutex // Mutex protecting the pool during reorg handling } @@ -436,8 +440,6 @@ func (p *BlobPool) Close() error { if err := p.store.Close(); err != nil { errs = append(errs, err) } - p.eventScope.Close() - switch { case errs == nil: return nil @@ -758,15 +760,21 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { // Run the reorg between the old and new head and figure out which accounts // need to be rechecked and which transactions need to be readded if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil { + var adds []*types.Transaction for addr, txs := range reinject { // Blindly push all the lost transactions back into the pool for _, tx := range txs { - p.reinject(addr, tx.Hash()) + if err := p.reinject(addr, tx.Hash()); err == nil { + adds = append(adds, tx.WithoutBlobTxSidecar()) + } } // Recheck the account's pooled transactions to drop included and // invalidated one p.recheck(addr, inclusions) } + if len(adds) > 0 { + p.insertFeed.Send(core.NewTxsEvent{Txs: adds}) + } } // Flush out any blobs from limbo that are older than the latest finality if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { @@ -921,13 +929,13 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]* // Note, the method will not initialize the eviction cache values as those will // be done once for all transactions belonging to an account after all individual // transactions are injected back into the pool. -func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { +func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error { // Retrieve the associated blob from the limbo. Without the blobs, we cannot // add the transaction back into the pool as it is not mineable. tx, err := p.limbo.pull(txhash) if err != nil { log.Error("Blobs unavailable, dropping reorged tx", "err", err) - return + return err } // TODO: seems like an easy optimization here would be getting the serialized tx // from limbo instead of re-serializing it here. @@ -936,12 +944,12 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { blob, err := rlp.EncodeToBytes(tx) if err != nil { log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) - return + return err } id, err := p.store.Put(blob) if err != nil { log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err) - return + return err } // Update the indixes and metrics @@ -949,7 +957,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { if _, ok := p.index[addr]; !ok { if err := p.reserve(addr, true); err != nil { log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err) - return + return err } p.index[addr] = []*blobTxMeta{meta} p.spent[addr] = meta.costCap @@ -960,6 +968,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { } p.lookup[meta.hash] = meta.id p.stored += uint64(meta.size) + return nil } // SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements @@ -1154,9 +1163,19 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction { // Add inserts a set of blob transactions into the pool if they pass validation (both // consensus validity and pool restictions). func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { - errs := make([]error, len(txs)) + var ( + adds = make([]*types.Transaction, 0, len(txs)) + errs = make([]error, len(txs)) + ) for i, tx := range txs { errs[i] = p.add(tx) + if errs[i] == nil { + adds = append(adds, tx.WithoutBlobTxSidecar()) + } + } + if len(adds) > 0 { + p.discoverFeed.Send(core.NewTxsEvent{Txs: adds}) + p.insertFeed.Send(core.NewTxsEvent{Txs: adds}) } return errs } @@ -1384,6 +1403,8 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr Time: time.Now(), // TODO(karalabe): Maybe save these and use that? GasFeeCap: tx.execFeeCap.ToBig(), GasTipCap: tx.execTipCap.ToBig(), + Gas: tx.execGas, + BlobGas: tx.blobGas, }) } if len(lazies) > 0 { @@ -1468,10 +1489,14 @@ func (p *BlobPool) updateLimboMetrics() { limboSlotusedGauge.Update(int64(slotused)) } -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return p.eventScope.Track(p.eventFeed.Subscribe(ch)) +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { + if reorgs { + return p.insertFeed.Subscribe(ch) + } else { + return p.discoverFeed.Subscribe(ch) + } } // Nonce returns the next nonce of an account, with all transactions executable diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 2430028f9d..e71204185f 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -208,7 +208,6 @@ type LegacyPool struct { chain BlockChain gasTip atomic.Pointer[big.Int] txFeed event.Feed - scope event.SubscriptionScope signer types.Signer mu sync.RWMutex @@ -404,9 +403,6 @@ func (pool *LegacyPool) loop() { // Close terminates the transaction pool. func (pool *LegacyPool) Close() error { - // Unsubscribe all subscriptions registered from txpool - pool.scope.Close() - // Terminate the pool reorger and return close(pool.reorgShutdownCh) pool.wg.Wait() @@ -425,10 +421,14 @@ func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { <-wait } -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return pool.scope.Track(pool.txFeed.Subscribe(ch)) +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { + // The legacy pool has a very messed up internal shuffling, so it's kind of + // hard to separate newly discovered transaction from resurrected ones. This + // is because the new txs are added to the queue, resurrected ones too and + // reorgs run lazily, so separating the two would need a marker. + return pool.txFeed.Subscribe(ch) } // SetGasTip updates the minimum gas tip required by the transaction pool for a @@ -552,6 +552,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L Time: txs[i].Time(), GasFeeCap: txs[i].GasFeeCap(), GasTipCap: txs[i].GasTipCap(), + Gas: txs[i].Gas(), + BlobGas: txs[i].BlobGas(), } } pending[addr] = lazies diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index 85312c4318..de05b38d43 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -30,13 +30,16 @@ import ( // enough for the miner and other APIs to handle large batches of transactions; // and supports pulling up the entire transaction when really needed. type LazyTransaction struct { - Pool SubPool // Transaction subpool to pull the real transaction up + Pool LazyResolver // Transaction resolver to pull the real transaction up Hash common.Hash // Transaction hash to pull up if needed Tx *types.Transaction // Transaction if already resolved Time time.Time // Time when the transaction was first seen GasFeeCap *big.Int // Maximum fee per gas the transaction may consume GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay + + Gas uint64 // Amount of gas required by the transaction + BlobGas uint64 // Amount of blob gas required by the transaction } // Resolve retrieves the full transaction belonging to a lazy handle if it is still @@ -48,6 +51,14 @@ func (ltx *LazyTransaction) Resolve() *types.Transaction { return ltx.Tx } +// LazyResolver is a minimal interface needed for a transaction pool to satisfy +// resolving lazy transactions. It's mostly a helper to avoid the entire sub- +// pool being injected into the lazy transaction. +type LazyResolver interface { + // Get returns a transaction if it is contained in the pool, or nil otherwise. + Get(hash common.Hash) *types.Transaction +} + // AddressReserver is passed by the main transaction pool to subpools, so they // may request (and relinquish) exclusive access to certain addresses. type AddressReserver func(addr common.Address, reserve bool) error @@ -99,8 +110,10 @@ type SubPool interface { // account and sorted by nonce. Pending(enforceTips bool) map[common.Address][]*LazyTransaction - // SubscribeTransactions subscribes to new transaction events. - SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription + // SubscribeTransactions subscribes to new transaction events. The subscriber + // can decide whether to receive notifications only for newly seen transactions + // or also for reorged out ones. + SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index cacae7bc00..0d4e05da4c 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -155,13 +155,15 @@ func (p *TxPool) Close() error { if err := <-errc; err != nil { errs = append(errs, err) } - // Terminate each subpool for _, subpool := range p.subpools { if err := subpool.Close(); err != nil { errs = append(errs, err) } } + // Unsubscribe anyone still listening for tx events + p.subs.Close() + if len(errs) > 0 { return fmt.Errorf("subpool close errors: %v", errs) } @@ -316,12 +318,12 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction return txs } -// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending -// events to the given channel. -func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { +// SubscribeTransactions registers a subscription for new transaction events, +// supporting feeding only newly seen or also resurrected transactions. +func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { subs := make([]event.Subscription, len(p.subpools)) for i, subpool := range p.subpools { - subs[i] = subpool.SubscribeTransactions(ch) + subs[i] = subpool.SubscribeTransactions(ch, reorgs) } return p.subs.Track(event.JoinSubscriptions(subs...)) } diff --git a/eth/api_backend.go b/eth/api_backend.go index a0c14f1338..601e555158 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -334,7 +334,7 @@ func (b *EthAPIBackend) TxPool() *txpool.TxPool { } func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return b.eth.txPool.SubscribeNewTxsEvent(ch) + return b.eth.txPool.SubscribeTransactions(ch, true) } func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress { diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index 1f7a3266cd..a9a2bb4a9a 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -199,7 +199,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error { func (c *SimulatedBeacon) loopOnDemand() { var ( newTxs = make(chan core.NewTxsEvent) - sub = c.eth.TxPool().SubscribeNewTxsEvent(newTxs) + sub = c.eth.TxPool().SubscribeTransactions(newTxs, true) ) defer sub.Unsubscribe() diff --git a/eth/handler.go b/eth/handler.go index f731efe1b8..665df7d8cf 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -75,9 +75,10 @@ type txPool interface { // The slice should be modifiable by the caller. Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction - // SubscribeNewTxsEvent should return an event subscription of - // NewTxsEvent and send events to the given channel. - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + // SubscribeTransactions subscribes to new transaction events. The subscriber + // can decide whether to receive notifications only for newly seen transactions + // or also for reorged out ones. + SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription } // handlerConfig is the collection of initialization parameters to create a full @@ -509,10 +510,10 @@ func (h *handler) unregisterPeer(id string) { func (h *handler) Start(maxPeers int) { h.maxPeers = maxPeers - // broadcast transactions + // broadcast and announce transactions (only new ones, not resurrected ones) h.wg.Add(1) h.txsCh = make(chan core.NewTxsEvent, txChanSize) - h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh) + h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false) go h.txBroadcastLoop() // broadcast mined blocks @@ -592,26 +593,33 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { } // BroadcastTransactions will propagate a batch of transactions -// - To a square root of all peers +// - To a square root of all peers for non-blob transactions // - And, separately, as announcements to all peers which are not known to // already have the given transaction. func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( - annoCount int // Count of announcements made - annoPeers int - directCount int // Count of the txs sent directly to peers - directPeers int // Count of the peers that were sent transactions directly + blobTxs int // Number of blob transactions to announce only + largeTxs int // Number of large transactions to announce only + + directCount int // Number of transactions sent directly to peers (duplicates included) + directPeers int // Number of peers that were sent transactions directly + annCount int // Number of transactions announced across all peers (duplicates included) + annPeers int // Number of peers announced about transactions txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce - ) // Broadcast transactions to a batch of peers not knowing about it for _, tx := range txs { peers := h.peers.peersWithoutTransaction(tx.Hash()) var numDirect int - if tx.Size() <= txMaxBroadcastSize { + switch { + case tx.Type() == types.BlobTxType: + blobTxs++ + case tx.Size() > txMaxBroadcastSize: + largeTxs++ + default: numDirect = int(math.Sqrt(float64(len(peers)))) } // Send the tx unconditionally to a subset of our peers @@ -629,13 +637,12 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { peer.AsyncSendTransactions(hashes) } for peer, hashes := range annos { - annoPeers++ - annoCount += len(hashes) + annPeers++ + annCount += len(hashes) peer.AsyncSendPooledTransactionHashes(hashes) } - log.Debug("Transaction broadcast", "txs", len(txs), - "announce packs", annoPeers, "announced hashes", annoCount, - "tx packs", directPeers, "broadcast txs", directCount) + log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs, + "bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount) } // minedBroadcastLoop sends mined blocks to connected peers. diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 3a0944640e..e844b36cca 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -17,6 +17,7 @@ package eth import ( + "errors" "fmt" "math/big" "time" @@ -73,6 +74,11 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { return h.txFetcher.Notify(peer.ID(), packet.Hashes) case *eth.TransactionsPacket: + for _, tx := range *packet { + if tx.Type() == types.BlobTxType { + return errors.New("disallowed broadcast blob transaction") + } + } return h.txFetcher.Enqueue(peer.ID(), *packet, false) case *eth.PooledTransactionsResponse: @@ -90,9 +96,7 @@ func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, // the chain already entered the pos stage and disconnect the // remote peer. if h.merger.PoSFinalized() { - // TODO (MariusVanDerWijden) drop non-updated peers after the merge - return nil - // return errors.New("unexpected block announces") + return errors.New("disallowed block announcement") } // Schedule all the unknown hashes for retrieval var ( @@ -118,9 +122,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td // the chain already entered the pos stage and disconnect the // remote peer. if h.merger.PoSFinalized() { - // TODO (MariusVanDerWijden) drop non-updated peers after the merge - return nil - // return errors.New("unexpected block announces") + return errors.New("disallowed block broadcast") } // Schedule the block for import h.blockFetcher.Enqueue(peer.ID(), block) diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 4cdfdf47b8..bb342acc18 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -249,7 +249,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { handler.handler.synced.Store(true) // mark synced to accept transactions txs := make(chan core.NewTxsEvent) - sub := handler.txpool.SubscribeNewTxsEvent(txs) + sub := handler.txpool.SubscribeTransactions(txs, false) defer sub.Unsubscribe() // Create a source peer to send messages through and a sink handler to receive them @@ -424,7 +424,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { for i := 0; i < len(sinks); i++ { txChs[i] = make(chan core.NewTxsEvent, 1024) - sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) + sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false) defer sub.Unsubscribe() } // Fill the source pool with transactions and wait for them at the sinks diff --git a/eth/handler_test.go b/eth/handler_test.go index 2e0a988452..6d6132ee4c 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -113,15 +113,17 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } } return pending } -// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and +// SubscribeTransactions should return an event subscription of NewTxsEvent and // send events to the given channel. -func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { +func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { return p.txFeed.Subscribe(ch) } diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index a7d6ed25a9..42d0412a12 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -93,6 +93,10 @@ type TxPool interface { func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) for _, version := range ProtocolVersions { + // Blob transactions require eth/68 announcements, disable everything else + if version <= ETH67 && backend.Chain().Config().CancunTime != nil { + continue + } version := version // Closure protocols = append(protocols, p2p.Protocol{ diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index da4ffd327e..069e92dadf 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -426,11 +426,11 @@ func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest, peer) + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest) return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) } -func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest, peer *Peer) ([]common.Hash, []rlp.RawValue) { +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) { // Gather transactions until the fetch or network limits is reached var ( bytes int diff --git a/miner/ordering_test.go b/miner/ordering_test.go index bdbdc32148..59d478274d 100644 --- a/miner/ordering_test.go +++ b/miner/ordering_test.go @@ -92,6 +92,8 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } expectedCount += count @@ -157,6 +159,8 @@ func TestTransactionTimeSort(t *testing.T) { Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } // Sort the transactions and cross check the nonce ordering diff --git a/miner/worker.go b/miner/worker.go index 711149232b..f680702814 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -263,8 +263,8 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), } - // Subscribe NewTxsEvent for tx pool - worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) + // Subscribe for transaction insertion events (whether from network or resurrects) + worker.txsSub = eth.TxPool().SubscribeTransactions(worker.txsCh, true) // Subscribe events for blockchain worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) @@ -542,11 +542,14 @@ func (w *worker) mainLoop() { for _, tx := range ev.Txs { acc, _ := types.Sender(w.current.signer, tx) txs[acc] = append(txs[acc], &txpool.LazyTransaction{ + Pool: w.eth.TxPool(), // We don't know where this came from, yolo resolve from everywhere Hash: tx.Hash(), - Tx: tx.WithoutBlobTxSidecar(), + Tx: nil, // Do *not* set this! We need to resolve it later to pull blobs in Time: tx.Time(), GasFeeCap: tx.GasFeeCap(), GasTipCap: tx.GasTipCap(), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), }) } txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) @@ -742,7 +745,6 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* if tx.Type() == types.BlobTxType { return w.commitBlobTransaction(env, tx) } - receipt, err := w.applyTransaction(env, tx) if err != nil { return nil, err @@ -764,7 +766,6 @@ func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction) if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { return nil, errors.New("max data blobs reached") } - receipt, err := w.applyTransaction(env, tx) if err != nil { return nil, err @@ -815,13 +816,24 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn if ltx == nil { break } + // If we don't have enough space for the next transaction, skip the account. + if env.gasPool.Gas() < ltx.Gas { + log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas) + txs.Pop() + continue + } + if left := uint64(params.MaxBlobGasPerBlock - env.blobs*params.BlobTxBlobGasPerBlob); left < ltx.BlobGas { + log.Trace("Not enough blob gas left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas) + txs.Pop() + continue + } + // Transaction seems to fit, pull it up from the pool tx := ltx.Resolve() if tx == nil { - log.Warn("Ignoring evicted transaction") + log.Trace("Ignoring evicted transaction", "hash", ltx.Hash) txs.Pop() continue } - // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. from, _ := types.Sender(env.signer, tx) @@ -829,11 +841,10 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { - log.Trace("Ignoring replay protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) + log.Trace("Ignoring replay protected transaction", "hash", ltx.Hash, "eip155", w.chainConfig.EIP155Block) txs.Pop() continue } - // Start executing the transaction env.state.SetTxContext(tx.Hash(), env.tcount) @@ -841,7 +852,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn switch { case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + log.Trace("Skipping transaction with low nonce", "hash", ltx.Hash, "sender", from, "nonce", tx.Nonce()) txs.Shift() case errors.Is(err, nil): @@ -853,7 +864,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn default: // Transaction is regarded as invalid, drop all consecutive transactions from // the same sender because of `nonce-too-high` clause. - log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + log.Debug("Transaction failed, account skipped", "hash", ltx.Hash, "err", err) txs.Pop() } } From 95b0555c84ec29c0d8b6947403804d2bccc4189e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 4 Oct 2023 12:37:04 +0300 Subject: [PATCH 77/98] eth: when snap is complaining for missing eth, be verbose about the details (#28249) * eth: when snap is complaining for missing eth, be verbost about the details * eth: lower snapshot registration error verbosity --- eth/handler.go | 2 +- eth/peerset.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index 665df7d8cf..0c0c17fee1 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -462,7 +462,7 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error snap.EgressRegistrationErrorMeter.Mark(1) } } - peer.Log().Warn("Snapshot extension registration failed", "err", err) + peer.Log().Debug("Snapshot extension registration failed", "err", err) return err } return handler(peer) diff --git a/eth/peerset.go b/eth/peerset.go index b9cc1e03ac..b27d3964a1 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -18,6 +18,7 @@ package eth import ( "errors" + "fmt" "math/big" "sync" @@ -74,7 +75,7 @@ func (ps *peerSet) registerSnapExtension(peer *snap.Peer) error { // Reject the peer if it advertises `snap` without `eth` as `snap` is only a // satellite protocol meaningful with the chain selection of `eth` if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) { - return errSnapWithoutEth + return fmt.Errorf("%w: have %v", errSnapWithoutEth, peer.Caps()) } // Ensure nobody can double connect ps.lock.Lock() From 052355f5e2b1726552fdb38a94cf6ea1506caf95 Mon Sep 17 00:00:00 2001 From: tactical_retreat Date: Wed, 4 Oct 2023 05:38:25 -0400 Subject: [PATCH 78/98] cmd/evm/internal/t8ntoo: tiny bugfix for difficulty field (#28245) --- cmd/evm/internal/t8ntool/block.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 872e2f6b2a..5c0e28e284 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -158,7 +158,7 @@ func (i *bbInput) ToBlock() *types.Block { if i.Header.Nonce != nil { header.Nonce = *i.Header.Nonce } - if header.Difficulty != nil { + if i.Header.Difficulty != nil { header.Difficulty = i.Header.Difficulty } return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) From 4e1e37323d87967708b58546f040990db7c2820c Mon Sep 17 00:00:00 2001 From: hyunchel <3271191+hyunchel@users.noreply.github.com> Date: Fri, 6 Oct 2023 22:57:12 -0400 Subject: [PATCH 79/98] core/txpool/legacypool: fix typo (#28258) --- core/txpool/legacypool/list.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go index 384fa7b61b..05ae0b58cd 100644 --- a/core/txpool/legacypool/list.go +++ b/core/txpool/legacypool/list.go @@ -205,7 +205,7 @@ func (m *sortedMap) Remove(nonce uint64) bool { // removed from the list. // // Note, all transactions with nonces lower than start will also be returned to -// prevent getting into and invalid state. This is not something that should ever +// prevent getting into an invalid state. This is not something that should ever // happen but better to be self correcting than failing! func (m *sortedMap) Ready(start uint64) types.Transactions { // Short circuit if no transactions are available @@ -421,7 +421,7 @@ func (l *list) Remove(tx *types.Transaction) (bool, types.Transactions) { // removed from the list. // // Note, all transactions with nonces lower than start will also be returned to -// prevent getting into and invalid state. This is not something that should ever +// prevent getting into an invalid state. This is not something that should ever // happen but better to be self correcting than failing! func (l *list) Ready(start uint64) types.Transactions { txs := l.txs.Ready(start) From 08326794e8069835b519217e1d2195f667398748 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 10 Oct 2023 08:28:56 +0200 Subject: [PATCH 80/98] trie: refactor stacktrie (#28233) This change refactors stacktrie to separate the stacktrie itself from the internal representation of nodes: a stacktrie is not a recursive structure of stacktries, rather, a framework for representing and operating upon a set of nodes. --------- Co-authored-by: Gary Rong --- trie/stacktrie.go | 343 ++++++++++++---------------------- trie/stacktrie_marshalling.go | 120 ++++++++++++ trie/stacktrie_test.go | 10 +- 3 files changed, 241 insertions(+), 232 deletions(-) create mode 100644 trie/stacktrie_marshalling.go diff --git a/trie/stacktrie.go b/trie/stacktrie.go index 0d65ee75e0..781c842961 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -17,11 +17,7 @@ package trie import ( - "bufio" - "bytes" - "encoding/gob" "errors" - "io" "sync" "github.com/ethereum/go-ethereum/common" @@ -29,171 +25,96 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var ErrCommitDisabled = errors.New("no database for committing") - -var stPool = sync.Pool{ - New: func() interface{} { - return NewStackTrie(nil) - }, -} +var ( + ErrCommitDisabled = errors.New("no database for committing") + stPool = sync.Pool{New: func() any { return new(stNode) }} + _ = types.TrieHasher((*StackTrie)(nil)) +) // NodeWriteFunc is used to provide all information of a dirty node for committing // so that callers can flush nodes into database with desired scheme. type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) -func stackTrieFromPool(writeFn NodeWriteFunc, owner common.Hash) *StackTrie { - st := stPool.Get().(*StackTrie) - st.owner = owner - st.writeFn = writeFn - return st -} - -func returnToPool(st *StackTrie) { - st.Reset() - stPool.Put(st) -} - // StackTrie is a trie implementation that expects keys to be inserted // in order. Once it determines that a subtree will no longer be inserted // into, it will hash it and free up the memory it uses. type StackTrie struct { - owner common.Hash // the owner of the trie - nodeType uint8 // node type (as in branch, ext, leaf) - val []byte // value contained by this node if it's a leaf - key []byte // key chunk covered by this (leaf|ext) node - children [16]*StackTrie // list of children (for branch and exts) - writeFn NodeWriteFunc // function for committing nodes, can be nil + owner common.Hash // the owner of the trie + writeFn NodeWriteFunc // function for committing nodes, can be nil + root *stNode + h *hasher } // NewStackTrie allocates and initializes an empty trie. func NewStackTrie(writeFn NodeWriteFunc) *StackTrie { return &StackTrie{ - nodeType: emptyNode, - writeFn: writeFn, + writeFn: writeFn, + root: stPool.Get().(*stNode), + h: newHasher(false), } } // NewStackTrieWithOwner allocates and initializes an empty trie, but with // the additional owner field. func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie { - return &StackTrie{ - owner: owner, - nodeType: emptyNode, - writeFn: writeFn, - } + stack := NewStackTrie(writeFn) + stack.owner = owner + return stack } -// NewFromBinary initialises a serialized stacktrie with the given db. -func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) { - var st StackTrie - if err := st.UnmarshalBinary(data); err != nil { - return nil, err - } - // If a database is used, we need to recursively add it to every child - if writeFn != nil { - st.setWriter(writeFn) +// Update inserts a (key, value) pair into the stack trie. +func (t *StackTrie) Update(key, value []byte) error { + k := keybytesToHex(key) + if len(value) == 0 { + panic("deletion not supported") } - return &st, nil + t.insert(t.root, k[:len(k)-1], value, nil) + return nil } -// MarshalBinary implements encoding.BinaryMarshaler -func (st *StackTrie) MarshalBinary() (data []byte, err error) { - var ( - b bytes.Buffer - w = bufio.NewWriter(&b) - ) - if err := gob.NewEncoder(w).Encode(struct { - Owner common.Hash - NodeType uint8 - Val []byte - Key []byte - }{ - st.owner, - st.nodeType, - st.val, - st.key, - }); err != nil { - return nil, err - } - for _, child := range st.children { - if child == nil { - w.WriteByte(0) - continue - } - w.WriteByte(1) - if childData, err := child.MarshalBinary(); err != nil { - return nil, err - } else { - w.Write(childData) - } +// MustUpdate is a wrapper of Update and will omit any encountered error but +// just print out an error message. +func (t *StackTrie) MustUpdate(key, value []byte) { + if err := t.Update(key, value); err != nil { + log.Error("Unhandled trie error in StackTrie.Update", "err", err) } - w.Flush() - return b.Bytes(), nil } -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (st *StackTrie) UnmarshalBinary(data []byte) error { - r := bytes.NewReader(data) - return st.unmarshalBinary(r) +func (t *StackTrie) Reset() { + t.writeFn = nil + t.root = stPool.Get().(*stNode) } -func (st *StackTrie) unmarshalBinary(r io.Reader) error { - var dec struct { - Owner common.Hash - NodeType uint8 - Val []byte - Key []byte - } - if err := gob.NewDecoder(r).Decode(&dec); err != nil { - return err - } - st.owner = dec.Owner - st.nodeType = dec.NodeType - st.val = dec.Val - st.key = dec.Key - - var hasChild = make([]byte, 1) - for i := range st.children { - if _, err := r.Read(hasChild); err != nil { - return err - } else if hasChild[0] == 0 { - continue - } - var child StackTrie - if err := child.unmarshalBinary(r); err != nil { - return err - } - st.children[i] = &child - } - return nil +// stNode represents a node within a StackTrie +type stNode struct { + typ uint8 // node type (as in branch, ext, leaf) + key []byte // key chunk covered by this (leaf|ext) node + val []byte // value contained by this node if it's a leaf + children [16]*stNode // list of children (for branch and exts) } -func (st *StackTrie) setWriter(writeFn NodeWriteFunc) { - st.writeFn = writeFn - for _, child := range st.children { - if child != nil { - child.setWriter(writeFn) - } - } -} - -func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie { - st := stackTrieFromPool(writeFn, owner) - st.nodeType = leafNode +// newLeaf constructs a leaf node with provided node key and value. The key +// will be deep-copied in the function and safe to modify afterwards, but +// value is not. +func newLeaf(key, val []byte) *stNode { + st := stPool.Get().(*stNode) + st.typ = leafNode st.key = append(st.key, key...) st.val = val return st } -func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie { - st := stackTrieFromPool(writeFn, owner) - st.nodeType = extNode +// newExt constructs an extension node with provided node key and child. The +// key will be deep-copied in the function and safe to modify afterwards. +func newExt(key []byte, child *stNode) *stNode { + st := stPool.Get().(*stNode) + st.typ = extNode st.key = append(st.key, key...) st.children[0] = child return st } -// List all values that StackTrie#nodeType can hold +// List all values that stNode#nodeType can hold const ( emptyNode = iota branchNode @@ -202,59 +123,40 @@ const ( hashedNode ) -// Update inserts a (key, value) pair into the stack trie. -func (st *StackTrie) Update(key, value []byte) error { - k := keybytesToHex(key) - if len(value) == 0 { - panic("deletion not supported") - } - st.insert(k[:len(k)-1], value, nil) - return nil -} - -// MustUpdate is a wrapper of Update and will omit any encountered error but -// just print out an error message. -func (st *StackTrie) MustUpdate(key, value []byte) { - if err := st.Update(key, value); err != nil { - log.Error("Unhandled trie error in StackTrie.Update", "err", err) - } -} - -func (st *StackTrie) Reset() { - st.owner = common.Hash{} - st.writeFn = nil - st.key = st.key[:0] - st.val = nil - for i := range st.children { - st.children[i] = nil +func (n *stNode) reset() *stNode { + n.key = n.key[:0] + n.val = nil + for i := range n.children { + n.children[i] = nil } - st.nodeType = emptyNode + n.typ = emptyNode + return n } // Helper function that, given a full key, determines the index // at which the chunk pointed by st.keyOffset is different from // the same chunk in the full key. -func (st *StackTrie) getDiffIndex(key []byte) int { - for idx, nibble := range st.key { +func (n *stNode) getDiffIndex(key []byte) int { + for idx, nibble := range n.key { if nibble != key[idx] { return idx } } - return len(st.key) + return len(n.key) } // Helper function to that inserts a (key, value) pair into // the trie. -func (st *StackTrie) insert(key, value []byte, prefix []byte) { - switch st.nodeType { +func (t *StackTrie) insert(st *stNode, key, value []byte, prefix []byte) { + switch st.typ { case branchNode: /* Branch */ idx := int(key[0]) // Unresolve elder siblings for i := idx - 1; i >= 0; i-- { if st.children[i] != nil { - if st.children[i].nodeType != hashedNode { - st.children[i].hash(append(prefix, byte(i))) + if st.children[i].typ != hashedNode { + t.hash(st.children[i], append(prefix, byte(i))) } break } @@ -262,9 +164,9 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) { // Add new child if st.children[idx] == nil { - st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn) + st.children[idx] = newLeaf(key[1:], value) } else { - st.children[idx].insert(key[1:], value, append(prefix, key[0])) + t.insert(st.children[idx], key[1:], value, append(prefix, key[0])) } case extNode: /* Ext */ @@ -279,46 +181,46 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) { if diffidx == len(st.key) { // Ext key and key segment are identical, recurse into // the child node. - st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...)) + t.insert(st.children[0], key[diffidx:], value, append(prefix, key[:diffidx]...)) return } // Save the original part. Depending if the break is // at the extension's last byte or not, create an // intermediate extension or use the extension's child // node directly. - var n *StackTrie + var n *stNode if diffidx < len(st.key)-1 { // Break on the non-last byte, insert an intermediate // extension. The path prefix of the newly-inserted // extension should also contain the different byte. - n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn) - n.hash(append(prefix, st.key[:diffidx+1]...)) + n = newExt(st.key[diffidx+1:], st.children[0]) + t.hash(n, append(prefix, st.key[:diffidx+1]...)) } else { // Break on the last byte, no need to insert // an extension node: reuse the current node. // The path prefix of the original part should // still be same. n = st.children[0] - n.hash(append(prefix, st.key...)) + t.hash(n, append(prefix, st.key...)) } - var p *StackTrie + var p *stNode if diffidx == 0 { // the break is on the first byte, so // the current node is converted into // a branch node. st.children[0] = nil p = st - st.nodeType = branchNode + st.typ = branchNode } else { // the common prefix is at least one byte // long, insert a new intermediate branch // node. - st.children[0] = stackTrieFromPool(st.writeFn, st.owner) - st.children[0].nodeType = branchNode + st.children[0] = stPool.Get().(*stNode) + st.children[0].typ = branchNode p = st.children[0] } // Create a leaf for the inserted part - o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn) + o := newLeaf(key[diffidx+1:], value) // Insert both child leaves where they belong: origIdx := st.key[diffidx] @@ -344,18 +246,18 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) { // Check if the split occurs at the first nibble of the // chunk. In that case, no prefix extnode is necessary. // Otherwise, create that - var p *StackTrie + var p *stNode if diffidx == 0 { // Convert current leaf into a branch - st.nodeType = branchNode + st.typ = branchNode p = st st.children[0] = nil } else { // Convert current node into an ext, // and insert a child branch node. - st.nodeType = extNode - st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner) - st.children[0].nodeType = branchNode + st.typ = extNode + st.children[0] = stPool.Get().(*stNode) + st.children[0].typ = branchNode p = st.children[0] } @@ -363,11 +265,11 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) { // value and another containing the new value. The child leaf // is hashed directly in order to free up some memory. origIdx := st.key[diffidx] - p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn) - p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...)) + p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val) + t.hash(p.children[origIdx], append(prefix, st.key[:diffidx+1]...)) newIdx := key[diffidx] - p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn) + p.children[newIdx] = newLeaf(key[diffidx+1:], value) // Finally, cut off the key part that has been passed // over to the children. @@ -375,7 +277,7 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) { st.val = nil case emptyNode: /* Empty */ - st.nodeType = leafNode + st.typ = leafNode st.key = key st.val = value @@ -398,25 +300,18 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) { // - And the 'st.type' will be 'hashedNode' AGAIN // // This method also sets 'st.type' to hashedNode, and clears 'st.key'. -func (st *StackTrie) hash(path []byte) { - h := newHasher(false) - defer returnHasherToPool(h) - - st.hashRec(h, path) -} - -func (st *StackTrie) hashRec(hasher *hasher, path []byte) { +func (t *StackTrie) hash(st *stNode, path []byte) { // The switch below sets this to the RLP-encoding of this node. var encodedNode []byte - switch st.nodeType { + switch st.typ { case hashedNode: return case emptyNode: st.val = types.EmptyRootHash.Bytes() st.key = st.key[:0] - st.nodeType = hashedNode + st.typ = hashedNode return case branchNode: @@ -426,23 +321,21 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) { nodes.Children[i] = nilValueNode continue } - child.hashRec(hasher, append(path, byte(i))) + t.hash(child, append(path, byte(i))) + if len(child.val) < 32 { nodes.Children[i] = rawNode(child.val) } else { nodes.Children[i] = hashNode(child.val) } - - // Release child back to pool. st.children[i] = nil - returnToPool(child) + stPool.Put(child.reset()) // Release child back to pool. } - - nodes.encode(hasher.encbuf) - encodedNode = hasher.encodedBytes() + nodes.encode(t.h.encbuf) + encodedNode = t.h.encodedBytes() case extNode: - st.children[0].hashRec(hasher, append(path, st.key...)) + t.hash(st.children[0], append(path, st.key...)) n := shortNode{Key: hexToCompactInPlace(st.key)} if len(st.children[0].val) < 32 { @@ -450,26 +343,24 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) { } else { n.Val = hashNode(st.children[0].val) } + n.encode(t.h.encbuf) + encodedNode = t.h.encodedBytes() - n.encode(hasher.encbuf) - encodedNode = hasher.encodedBytes() - - // Release child back to pool. - returnToPool(st.children[0]) + stPool.Put(st.children[0].reset()) // Release child back to pool. st.children[0] = nil case leafNode: st.key = append(st.key, byte(16)) n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)} - n.encode(hasher.encbuf) - encodedNode = hasher.encodedBytes() + n.encode(t.h.encbuf) + encodedNode = t.h.encodedBytes() default: panic("invalid node type") } - st.nodeType = hashedNode + st.typ = hashedNode st.key = st.key[:0] if len(encodedNode) < 32 { st.val = common.CopyBytes(encodedNode) @@ -478,18 +369,16 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) { // Write the hash to the 'val'. We allocate a new val here to not mutate // input values - st.val = hasher.hashData(encodedNode) - if st.writeFn != nil { - st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode) + st.val = t.h.hashData(encodedNode) + if t.writeFn != nil { + t.writeFn(t.owner, path, common.BytesToHash(st.val), encodedNode) } } // Hash returns the hash of the current node. -func (st *StackTrie) Hash() (h common.Hash) { - hasher := newHasher(false) - defer returnHasherToPool(hasher) - - st.hashRec(hasher, nil) +func (t *StackTrie) Hash() (h common.Hash) { + st := t.root + t.hash(st, nil) if len(st.val) == 32 { copy(h[:], st.val) return h @@ -497,9 +386,9 @@ func (st *StackTrie) Hash() (h common.Hash) { // If the node's RLP isn't 32 bytes long, the node will not // be hashed, and instead contain the rlp-encoding of the // node. For the top level node, we need to force the hashing. - hasher.sha.Reset() - hasher.sha.Write(st.val) - hasher.sha.Read(h[:]) + t.h.sha.Reset() + t.h.sha.Write(st.val) + t.h.sha.Read(h[:]) return h } @@ -510,14 +399,12 @@ func (st *StackTrie) Hash() (h common.Hash) { // // The associated database is expected, otherwise the whole commit // functionality should be disabled. -func (st *StackTrie) Commit() (h common.Hash, err error) { - if st.writeFn == nil { +func (t *StackTrie) Commit() (h common.Hash, err error) { + if t.writeFn == nil { return common.Hash{}, ErrCommitDisabled } - hasher := newHasher(false) - defer returnHasherToPool(hasher) - - st.hashRec(hasher, nil) + st := t.root + t.hash(st, nil) if len(st.val) == 32 { copy(h[:], st.val) return h, nil @@ -525,10 +412,10 @@ func (st *StackTrie) Commit() (h common.Hash, err error) { // If the node's RLP isn't 32 bytes long, the node will not // be hashed (and committed), and instead contain the rlp-encoding of the // node. For the top level node, we need to force the hashing+commit. - hasher.sha.Reset() - hasher.sha.Write(st.val) - hasher.sha.Read(h[:]) + t.h.sha.Reset() + t.h.sha.Write(st.val) + t.h.sha.Read(h[:]) - st.writeFn(st.owner, nil, h, st.val) + t.writeFn(t.owner, nil, h, st.val) return h, nil } diff --git a/trie/stacktrie_marshalling.go b/trie/stacktrie_marshalling.go new file mode 100644 index 0000000000..c0bb07f868 --- /dev/null +++ b/trie/stacktrie_marshalling.go @@ -0,0 +1,120 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bufio" + "bytes" + "encoding" + "encoding/gob" +) + +// Compile-time interface checks. +var ( + _ = encoding.BinaryMarshaler((*StackTrie)(nil)) + _ = encoding.BinaryUnmarshaler((*StackTrie)(nil)) +) + +// NewFromBinaryV2 initialises a serialized stacktrie with the given db. +// OBS! Format was changed along with the name of this constructor. +func NewFromBinaryV2(data []byte) (*StackTrie, error) { + stack := NewStackTrie(nil) + if err := stack.UnmarshalBinary(data); err != nil { + return nil, err + } + return stack, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (t *StackTrie) MarshalBinary() (data []byte, err error) { + var ( + b bytes.Buffer + w = bufio.NewWriter(&b) + ) + if err := gob.NewEncoder(w).Encode(t.owner); err != nil { + return nil, err + } + if err := t.root.marshalInto(w); err != nil { + return nil, err + } + w.Flush() + return b.Bytes(), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (t *StackTrie) UnmarshalBinary(data []byte) error { + r := bytes.NewReader(data) + if err := gob.NewDecoder(r).Decode(&t.owner); err != nil { + return err + } + if err := t.root.unmarshalFrom(r); err != nil { + return err + } + return nil +} + +type stackNodeMarshaling struct { + Typ uint8 + Key []byte + Val []byte +} + +func (n *stNode) marshalInto(w *bufio.Writer) (err error) { + enc := stackNodeMarshaling{ + Typ: n.typ, + Key: n.key, + Val: n.val, + } + if err := gob.NewEncoder(w).Encode(enc); err != nil { + return err + } + for _, child := range n.children { + if child == nil { + w.WriteByte(0) + continue + } + w.WriteByte(1) + if err := child.marshalInto(w); err != nil { + return err + } + } + return nil +} + +func (n *stNode) unmarshalFrom(r *bytes.Reader) error { + var dec stackNodeMarshaling + if err := gob.NewDecoder(r).Decode(&dec); err != nil { + return err + } + n.typ = dec.Typ + n.key = dec.Key + n.val = dec.Val + + for i := range n.children { + if b, err := r.ReadByte(); err != nil { + return err + } else if b == 0 { + continue + } + var child stNode + if err := child.unmarshalFrom(r); err != nil { + return err + } + n.children[i] = &child + } + return nil +} diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index 6bd0b83e39..5b86a971e1 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -198,12 +198,11 @@ func TestStackTrieInsertAndHash(t *testing.T) { {"000003", "XXXXXXXXXXXXXXXXXXXXXXXXXXXX", "962c0fffdeef7612a4f7bff1950d67e3e81c878e48b9ae45b3b374253b050bd8"}, }, } - st := NewStackTrie(nil) for i, test := range tests { // The StackTrie does not allow Insert(), Hash(), Insert(), ... // so we will create new trie for every sequence length of inserts. for l := 1; l <= len(test); l++ { - st.Reset() + st := NewStackTrie(nil) for j := 0; j < l; j++ { kv := &test[j] if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil { @@ -382,7 +381,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { // serialize/unserialize it a lot func TestStacktrieSerialization(t *testing.T) { var ( - st = NewStackTrie(nil) + st = NewStackTrieWithOwner(nil, common.Hash{0x12}) nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) keyB = big.NewInt(1) keyDelta = big.NewInt(1) @@ -411,7 +410,7 @@ func TestStacktrieSerialization(t *testing.T) { if err != nil { t.Fatal(err) } - newSt, err := NewFromBinary(blob, nil) + newSt, err := NewFromBinaryV2(blob) if err != nil { t.Fatal(err) } @@ -421,4 +420,7 @@ func TestStacktrieSerialization(t *testing.T) { if have, want := st.Hash(), nt.Hash(); have != want { t.Fatalf("have %#x want %#x", have, want) } + if have, want := st.owner, (common.Hash{0x12}); have != want { + t.Fatalf("have %#x want %#x", have, want) + } } From fa6107c85e5717f10b2a57d470ead0e34b2152ba Mon Sep 17 00:00:00 2001 From: 0xbstn Date: Tue, 10 Oct 2023 10:22:03 +0200 Subject: [PATCH 81/98] core: fix typos (#28255) fixes various typos in core --- core/vm/contract.go | 4 ++-- core/vm/gas_table.go | 2 +- core/vm/interpreter.go | 2 +- core/vm/jump_table_export.go | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/vm/contract.go b/core/vm/contract.go index bb0902969e..e4b03bd74f 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -31,13 +31,13 @@ type ContractRef interface { // AccountRef implements ContractRef. // // Account references are used during EVM initialisation and -// it's primary use is to fetch addresses. Removing this object +// its primary use is to fetch addresses. Removing this object // proves difficult because of the cached jump destinations which // are fetched from the parent contract (i.e. the caller), which // is a ContractRef. type AccountRef common.Address -// Address casts AccountRef to a Address +// Address casts AccountRef to an Address func (ar AccountRef) Address() common.Address { return (common.Address)(ar) } // Contract represents an ethereum contract in the state database. It contains diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 5153c8b7a3..4b141d8f9a 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -104,7 +104,7 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi // Legacy rules should be applied if we are in Petersburg (removal of EIP-1283) // OR Constantinople is not active if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople { - // This checks for 3 scenario's and calculates gas accordingly: + // This checks for 3 scenarios and calculates gas accordingly: // // 1. From a zero-value address to a non-zero value (NEW VALUE) // 2. From a non-zero value address to a zero-value address (DELETE) diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 873337850e..28da2e80e6 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -45,7 +45,7 @@ type EVMInterpreter struct { table *JumpTable hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes - hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes + hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes readOnly bool // Whether to throw on stateful modifications returnData []byte // Last CALL's return data for subsequent reuse diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go index 6ea47d63a2..b74109da0a 100644 --- a/core/vm/jump_table_export.go +++ b/core/vm/jump_table_export.go @@ -22,7 +22,7 @@ import ( "github.com/ethereum/go-ethereum/params" ) -// LookupInstructionSet returns the instructionset for the fork configured by +// LookupInstructionSet returns the instruction set for the fork configured by // the rules. func LookupInstructionSet(rules params.Rules) (JumpTable, error) { switch { @@ -56,7 +56,7 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) { return newFrontierInstructionSet(), nil } -// Stack returns the mininum and maximum stack requirements. +// Stack returns the minimum and maximum stack requirements. func (op *operation) Stack() (int, int) { return op.minStack, op.maxStack } From db9afae2eab509d01805a75440390bd000aa3eb4 Mon Sep 17 00:00:00 2001 From: hyunchel <3271191+hyunchel@users.noreply.github.com> Date: Tue, 10 Oct 2023 04:22:45 -0400 Subject: [PATCH 82/98] eth, params: fix typos (#28286) * eth/ethconfig: fix typo on comment * params/config: fix typo on comment * eth/ethconfig: fix typo on comment --- eth/ethconfig/config.go | 4 ++-- params/config.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 55441a2cb9..342ff3da9e 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -83,7 +83,7 @@ var Defaults = Config{ //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go -// Config contains configuration options for of the ETH and LES protocols. +// Config contains configuration options for ETH and LES protocols. type Config struct { // The genesis block, which is inserted if the database is empty. // If nil, the Ethereum main net block is used. @@ -177,7 +177,7 @@ func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (conse return beacon.New(clique.New(config.Clique, db)), nil } // If defaulting to proof-of-work, enforce an already merged network since - // we cannot run PoW algorithms and more, so we cannot even follow a chain + // we cannot run PoW algorithms anymore, so we cannot even follow a chain // not coordinated by a beacon node. if !config.TerminalTotalDifficultyPassed { return nil, errors.New("ethash is only supported as a historical component of already merged networks") diff --git a/params/config.go b/params/config.go index ac55d3771a..88ff772a1d 100644 --- a/params/config.go +++ b/params/config.go @@ -214,7 +214,7 @@ var ( } // TestChainConfig contains every protocol change (EIPs) introduced - // and accepted by the Ethereum core developers for testing proposes. + // and accepted by the Ethereum core developers for testing purposes. TestChainConfig = &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), From 6b1e4f42111f6daef6c5c0eb4775b2b39f6a3401 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 10 Oct 2023 10:30:47 +0200 Subject: [PATCH 83/98] all: move light.NodeSet to trienode.ProofSet (#28287) This is a minor refactor in preparation of changes to range verifier. This PR contains no intentional functional changes but moves (and renames) the light.NodeSet --- cmd/devp2p/internal/ethtest/snap.go | 6 +-- eth/protocols/snap/handler.go | 10 ++--- eth/protocols/snap/sync.go | 10 ++--- eth/protocols/snap/sync_test.go | 18 ++++---- les/client_handler.go | 3 +- les/handler_test.go | 9 ++-- les/odr_requests.go | 11 ++--- les/peer.go | 3 +- les/server_requests.go | 9 ++-- light/odr.go | 7 +-- light/odr_test.go | 3 +- light/postprocess.go | 2 +- light/nodeset.go => trie/trienode/proof.go | 50 +++++++++++----------- 13 files changed, 74 insertions(+), 67 deletions(-) rename light/nodeset.go => trie/trienode/proof.go (73%) diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go index f947e4bc9b..ea528e5e2f 100644 --- a/cmd/devp2p/internal/ethtest/snap.go +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -27,8 +27,8 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" ) @@ -530,11 +530,11 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error { for i, key := range hashes { keys[i] = common.CopyBytes(key[:]) } - nodes := make(light.NodeList, len(proof)) + nodes := make(trienode.ProofList, len(proof)) for i, node := range proof { nodes[i] = node } - proofdb := nodes.NodeSet() + proofdb := nodes.Set() var end []byte if len(keys) > 0 { diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index b2fd03766e..ce23ca514c 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -24,13 +24,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" ) const ( @@ -321,7 +321,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac it.Release() // Generate the Merkle proofs for the first and last account - proof := light.NewNodeSet() + proof := trienode.NewProofSet() if err := tr.Prove(req.Origin[:], proof); err != nil { log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) return nil, nil @@ -333,7 +333,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac } } var proofs [][]byte - for _, blob := range proof.NodeList() { + for _, blob := range proof.List() { proofs = append(proofs, blob) } return accounts, proofs @@ -427,7 +427,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP if err != nil { return nil, nil } - proof := light.NewNodeSet() + proof := trienode.NewProofSet() if err := stTrie.Prove(origin[:], proof); err != nil { log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) return nil, nil @@ -438,7 +438,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP return nil, nil } } - for _, blob := range proof.NodeList() { + for _, blob := range proof.List() { proofs = append(proofs, blob) } // Proof terminates the reply as proofs are only added if a node diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 0f5f2ccdfe..6a2d92c009 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -37,11 +37,11 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/msgrate" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" ) @@ -2394,11 +2394,11 @@ func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, acco for i, key := range hashes { keys[i] = common.CopyBytes(key[:]) } - nodes := make(light.NodeList, len(proof)) + nodes := make(trienode.ProofList, len(proof)) for i, node := range proof { nodes[i] = node } - proofdb := nodes.NodeSet() + proofdb := nodes.Set() var end []byte if len(keys) > 0 { @@ -2639,7 +2639,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo for j, key := range hashes[i] { keys[j] = common.CopyBytes(key[:]) } - nodes := make(light.NodeList, 0, len(proof)) + nodes := make(trienode.ProofList, 0, len(proof)) if i == len(hashes)-1 { for _, node := range proof { nodes = append(nodes, node) @@ -2658,7 +2658,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo } else { // A proof was attached, the response is only partial, check that the // returned data is indeed part of the storage trie - proofdb := nodes.NodeSet() + proofdb := nodes.Set() var end []byte if len(keys) > 0 { diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 1514ad4e13..1ee381a661 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" @@ -273,7 +272,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H // Unless we send the entire trie, we need to supply proofs // Actually, we need to supply proofs either way! This seems to be an implementation // quirk in go-ethereum - proof := light.NewNodeSet() + proof := trienode.NewProofSet() if err := t.accountTrie.Prove(origin[:], proof); err != nil { t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) } @@ -283,7 +282,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H t.logger.Error("Could not prove last item", "error", err) } } - for _, blob := range proof.NodeList() { + for _, blob := range proof.List() { proofs = append(proofs, blob) } return keys, vals, proofs @@ -353,7 +352,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm if originHash != (common.Hash{}) || (abort && len(keys) > 0) { // If we're aborting, we need to prove the first and last item // This terminates the response (and thus the loop) - proof := light.NewNodeSet() + proof := trienode.NewProofSet() stTrie := t.storageTries[account] // Here's a potential gotcha: when constructing the proof, we cannot @@ -368,7 +367,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm t.logger.Error("Could not prove last item", "error", err) } } - for _, blob := range proof.NodeList() { + for _, blob := range proof.List() { proofs = append(proofs, blob) } break @@ -411,7 +410,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco if exit { // If we're aborting, we need to prove the first and last item // This terminates the response (and thus the loop) - proof := light.NewNodeSet() + proof := trienode.NewProofSet() stTrie := t.storageTries[account] // Here's a potential gotcha: when constructing the proof, we cannot @@ -427,7 +426,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco t.logger.Error("Could not prove last item", "error", err) } } - for _, blob := range proof.NodeList() { + for _, blob := range proof.List() { proofs = append(proofs, blob) } break @@ -599,9 +598,10 @@ func testSyncBloatedProof(t *testing.T, scheme string) { vals = append(vals, entry.v) } // The proofs - proof := light.NewNodeSet() + proof := trienode.NewProofSet() if err := t.accountTrie.Prove(origin[:], proof); err != nil { t.logger.Error("Could not prove origin", "origin", origin, "error", err) + t.logger.Error("Could not prove origin", "origin", origin, "error", err) } // The bloat: add proof of every single element for _, entry := range t.accountValues { @@ -614,7 +614,7 @@ func testSyncBloatedProof(t *testing.T, scheme string) { keys = append(keys[:1], keys[2:]...) vals = append(vals[:1], vals[2:]...) } - for _, blob := range proof.NodeList() { + for _, blob := range proof.List() { proofs = append(proofs, blob) } if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { diff --git a/les/client_handler.go b/les/client_handler.go index 4cfeba08fe..50f6dce879 100644 --- a/les/client_handler.go +++ b/les/client_handler.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/trie/trienode" ) // clientHandler is responsible for receiving and processing all incoming server @@ -236,7 +237,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { p.Log().Trace("Received les/2 proofs response") var resp struct { ReqID, BV uint64 - Data light.NodeList + Data trienode.ProofList } if err := msg.Decode(&resp); err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) diff --git a/les/handler_test.go b/les/handler_test.go index 26a083f475..c803a5ddb3 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" ) func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error { @@ -401,7 +402,7 @@ func testGetProofs(t *testing.T, protocol int) { bc := server.handler.blockchain var proofreqs []ProofReq - proofsV2 := light.NewNodeSet() + proofsV2 := trienode.NewProofSet() accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}} for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ { @@ -419,7 +420,7 @@ func testGetProofs(t *testing.T, protocol int) { } // Send the proof request and verify the response sendRequest(rawPeer.app, GetProofsV2Msg, 42, proofreqs) - if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil { + if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.List()); err != nil { t.Errorf("proofs mismatch: %v", err) } } @@ -456,10 +457,10 @@ func testGetStaleProof(t *testing.T, protocol int) { var expected []rlp.RawValue if wantOK { - proofsV2 := light.NewNodeSet() + proofsV2 := trienode.NewProofSet() t, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB()) t.Prove(account, proofsV2) - expected = proofsV2.NodeList() + expected = proofsV2.List() } if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil { t.Errorf("codes mismatch: %v", err) diff --git a/les/odr_requests.go b/les/odr_requests.go index 2b23e0540c..c907018590 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" ) var ( @@ -222,9 +223,9 @@ func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error { if msg.MsgType != MsgProofsV2 { return errInvalidMessageType } - proofs := msg.Obj.(light.NodeList) + proofs := msg.Obj.(trienode.ProofList) // Verify the proof and store if checks out - nodeSet := proofs.NodeSet() + nodeSet := proofs.Set() reads := &readTraceDB{db: nodeSet} if _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil { return fmt.Errorf("merkle proof verification failed: %v", err) @@ -308,7 +309,7 @@ type HelperTrieReq struct { } type HelperTrieResps struct { // describes all responses, not just a single one - Proofs light.NodeList + Proofs trienode.ProofList AuxData [][]byte } @@ -356,7 +357,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { if len(resp.AuxData) != 1 { return errInvalidEntryCount } - nodeSet := resp.Proofs.NodeSet() + nodeSet := resp.Proofs.Set() headerEnc := resp.AuxData[0] if len(headerEnc) == 0 { return errHeaderUnavailable @@ -451,7 +452,7 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error { } resps := msg.Obj.(HelperTrieResps) proofs := resps.Proofs - nodeSet := proofs.NodeSet() + nodeSet := proofs.Set() reads := &readTraceDB{db: nodeSet} r.BloomBits = make([][]byte, len(r.SectionIndexList)) diff --git a/les/peer.go b/les/peer.go index 48381689ef..58cb928700 100644 --- a/les/peer.go +++ b/les/peer.go @@ -40,6 +40,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" ) var ( @@ -899,7 +900,7 @@ func (p *clientPeer) replyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *re } // replyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested. -func (p *clientPeer) replyProofsV2(reqID uint64, proofs light.NodeList) *reply { +func (p *clientPeer) replyProofsV2(reqID uint64, proofs trienode.ProofList) *reply { data, _ := rlp.EncodeToBytes(proofs) return &reply{p.rw, ProofsV2Msg, reqID, data} } diff --git a/les/server_requests.go b/les/server_requests.go index 485be6d9e9..9a249f04c9 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" ) // serverBackend defines the backend functions needed for serving LES requests @@ -378,7 +379,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { err error ) bc := backend.BlockChain() - nodes := light.NewNodeSet() + nodes := trienode.NewProofSet() for i, request := range r.Reqs { if i != 0 && !waitOrStop() { @@ -444,7 +445,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { break } } - return p.replyProofsV2(r.ReqID, nodes.NodeList()) + return p.replyProofsV2(r.ReqID, nodes.List()) }, r.ReqID, uint64(len(r.Reqs)), nil } @@ -463,7 +464,7 @@ func handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, err auxData [][]byte ) bc := backend.BlockChain() - nodes := light.NewNodeSet() + nodes := trienode.NewProofSet() for i, request := range r.Reqs { if i != 0 && !waitOrStop() { return nil @@ -498,7 +499,7 @@ func handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, err break } } - return p.replyHelperTrieProofs(r.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}) + return p.replyHelperTrieProofs(r.ReqID, HelperTrieResps{Proofs: nodes.List(), AuxData: auxData}) }, r.ReqID, uint64(len(r.Reqs)), nil } diff --git a/light/odr.go b/light/odr.go index 2597027435..39f626ee2c 100644 --- a/light/odr.go +++ b/light/odr.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie/trienode" ) // NoOdr is the default context passed to an ODR capable function when the ODR @@ -90,7 +91,7 @@ func StorageTrieID(state *TrieID, address common.Address, root common.Hash) *Tri type TrieRequest struct { Id *TrieID Key []byte - Proof *NodeSet + Proof *trienode.ProofSet } // StoreResult stores the retrieved data in local database @@ -143,7 +144,7 @@ type ChtRequest struct { ChtRoot common.Hash Header *types.Header Td *big.Int - Proof *NodeSet + Proof *trienode.ProofSet } // StoreResult stores the retrieved data in local database @@ -163,7 +164,7 @@ type BloomRequest struct { SectionIndexList []uint64 BloomTrieRoot common.Hash BloomBits [][]byte - Proofs *NodeSet + Proofs *trienode.ProofSet } // StoreResult stores the retrieved data in local database diff --git a/light/odr_test.go b/light/odr_test.go index d8a7f10675..c415d73e7e 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/trienode" ) var ( @@ -95,7 +96,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { if err != nil { panic(err) } - nodes := NewNodeSet() + nodes := trienode.NewProofSet() t.Prove(req.Key, nodes) req.Proof = nodes case *CodeRequest: diff --git a/light/postprocess.go b/light/postprocess.go index 13d75f8617..a317e30b90 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -363,7 +363,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { indexCh := make(chan uint, types.BloomBitLength) type res struct { - nodes *NodeSet + nodes *trienode.ProofSet err error } resCh := make(chan res, types.BloomBitLength) diff --git a/light/nodeset.go b/trie/trienode/proof.go similarity index 73% rename from light/nodeset.go rename to trie/trienode/proof.go index 3662596785..012f0087dd 100644 --- a/light/nodeset.go +++ b/trie/trienode/proof.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package light +package trienode import ( "errors" @@ -26,9 +26,9 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -// NodeSet stores a set of trie nodes. It implements trie.Database and can also +// ProofSet stores a set of trie nodes. It implements trie.Database and can also // act as a cache for another trie.Database. -type NodeSet struct { +type ProofSet struct { nodes map[string][]byte order []string @@ -36,15 +36,15 @@ type NodeSet struct { lock sync.RWMutex } -// NewNodeSet creates an empty node set -func NewNodeSet() *NodeSet { - return &NodeSet{ +// NewProofSet creates an empty node set +func NewProofSet() *ProofSet { + return &ProofSet{ nodes: make(map[string][]byte), } } // Put stores a new node in the set -func (db *NodeSet) Put(key []byte, value []byte) error { +func (db *ProofSet) Put(key []byte, value []byte) error { db.lock.Lock() defer db.lock.Unlock() @@ -61,7 +61,7 @@ func (db *NodeSet) Put(key []byte, value []byte) error { } // Delete removes a node from the set -func (db *NodeSet) Delete(key []byte) error { +func (db *ProofSet) Delete(key []byte) error { db.lock.Lock() defer db.lock.Unlock() @@ -70,7 +70,7 @@ func (db *NodeSet) Delete(key []byte) error { } // Get returns a stored node -func (db *NodeSet) Get(key []byte) ([]byte, error) { +func (db *ProofSet) Get(key []byte) ([]byte, error) { db.lock.RLock() defer db.lock.RUnlock() @@ -81,13 +81,13 @@ func (db *NodeSet) Get(key []byte) ([]byte, error) { } // Has returns true if the node set contains the given key -func (db *NodeSet) Has(key []byte) (bool, error) { +func (db *ProofSet) Has(key []byte) (bool, error) { _, err := db.Get(key) return err == nil, nil } // KeyCount returns the number of nodes in the set -func (db *NodeSet) KeyCount() int { +func (db *ProofSet) KeyCount() int { db.lock.RLock() defer db.lock.RUnlock() @@ -95,19 +95,19 @@ func (db *NodeSet) KeyCount() int { } // DataSize returns the aggregated data size of nodes in the set -func (db *NodeSet) DataSize() int { +func (db *ProofSet) DataSize() int { db.lock.RLock() defer db.lock.RUnlock() return db.dataSize } -// NodeList converts the node set to a NodeList -func (db *NodeSet) NodeList() NodeList { +// List converts the node set to a ProofList +func (db *ProofSet) List() ProofList { db.lock.RLock() defer db.lock.RUnlock() - var values NodeList + var values ProofList for _, key := range db.order { values = append(values, db.nodes[key]) } @@ -115,7 +115,7 @@ func (db *NodeSet) NodeList() NodeList { } // Store writes the contents of the set to the given database -func (db *NodeSet) Store(target ethdb.KeyValueWriter) { +func (db *ProofSet) Store(target ethdb.KeyValueWriter) { db.lock.RLock() defer db.lock.RUnlock() @@ -124,36 +124,36 @@ func (db *NodeSet) Store(target ethdb.KeyValueWriter) { } } -// NodeList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter. -type NodeList []rlp.RawValue +// ProofList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter. +type ProofList []rlp.RawValue // Store writes the contents of the list to the given database -func (n NodeList) Store(db ethdb.KeyValueWriter) { +func (n ProofList) Store(db ethdb.KeyValueWriter) { for _, node := range n { db.Put(crypto.Keccak256(node), node) } } -// NodeSet converts the node list to a NodeSet -func (n NodeList) NodeSet() *NodeSet { - db := NewNodeSet() +// Set converts the node list to a ProofSet +func (n ProofList) Set() *ProofSet { + db := NewProofSet() n.Store(db) return db } // Put stores a new node at the end of the list -func (n *NodeList) Put(key []byte, value []byte) error { +func (n *ProofList) Put(key []byte, value []byte) error { *n = append(*n, value) return nil } // Delete panics as there's no reason to remove a node from the list. -func (n *NodeList) Delete(key []byte) error { +func (n *ProofList) Delete(key []byte) error { panic("not supported") } // DataSize returns the aggregated data size of nodes in the list -func (n NodeList) DataSize() int { +func (n ProofList) DataSize() int { var size int for _, node := range n { size += len(node) From 65052974561461b47badeef370e6224f4cdd476a Mon Sep 17 00:00:00 2001 From: Brandon Liu Date: Tue, 10 Oct 2023 16:32:14 +0800 Subject: [PATCH 84/98] trie: fix a typo, use correct docstrings (#28302) * fix a typo * trie: additional fixes to docstrings --------- Co-authored-by: Martin Holst Swende --- trie/sync.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/trie/sync.go b/trie/sync.go index 9da0706075..6939aed76d 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -302,7 +302,7 @@ func (s *Sync) Missing(max int) ([]string, []common.Hash, []common.Hash) { } // ProcessCode injects the received data for requested item. Note it can -// happpen that the single response commits two pending requests(e.g. +// happen that the single response commits two pending requests(e.g. // there are two requests one for code and one for node but the hash // is same). In this case the second response for the same hash will // be treated as "non-requested" item or "already-processed" item but @@ -391,7 +391,7 @@ func (s *Sync) Pending() int { return len(s.nodeReqs) + len(s.codeReqs) } -// schedule inserts a new state retrieval request into the fetch queue. If there +// scheduleNodeRequest inserts a new state retrieval request into the fetch queue. If there // is already a pending request for this node, the new request will be discarded // and only a parent reference added to the old one. func (s *Sync) scheduleNodeRequest(req *nodeRequest) { @@ -406,7 +406,7 @@ func (s *Sync) scheduleNodeRequest(req *nodeRequest) { s.queue.Push(string(req.path), prio) } -// schedule inserts a new state retrieval request into the fetch queue. If there +// scheduleCodeRequest inserts a new state retrieval request into the fetch queue. If there // is already a pending request for this node, the new request will be discarded // and only a parent reference added to the old one. func (s *Sync) scheduleCodeRequest(req *codeRequest) { @@ -556,7 +556,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { return requests, nil } -// commit finalizes a retrieval request and stores it into the membatch. If any +// commitNodeRequest finalizes a retrieval request and stores it into the membatch. If any // of the referencing parent requests complete due to this commit, they are also // committed themselves. func (s *Sync) commitNodeRequest(req *nodeRequest) error { @@ -591,7 +591,7 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error { return nil } -// commit finalizes a retrieval request and stores it into the membatch. If any +// commitCodeRequest finalizes a retrieval request and stores it into the membatch. If any // of the referencing parent requests complete due to this commit, they are also // committed themselves. func (s *Sync) commitCodeRequest(req *codeRequest) error { From 8afbcf47138a14c3e653e9d7e3523c5db342405b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 10 Oct 2023 11:35:51 +0300 Subject: [PATCH 85/98] eth: enforce announcement metadatas and drop peers violating the protocol (#28261) * eth: enforce announcement metadatas and drop peers violating the protocol * eth/fetcher: relax eth/68 validation a bit for flakey clients * tests/fuzzers/txfetcher: pull in suggestion from Marius * eth/fetcher: add tests for peer dropping * eth/fetcher: linter linter linter linter linter --- eth/fetcher/tx_fetcher.go | 127 ++++-- eth/fetcher/tx_fetcher_test.go | 456 ++++++++++++++++++-- eth/handler.go | 2 +- eth/handler_eth.go | 4 +- tests/fuzzers/txfetcher/txfetcher_fuzzer.go | 7 +- 5 files changed, 531 insertions(+), 65 deletions(-) diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index a11b5e2164..8bb5d579dc 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -20,6 +20,7 @@ import ( "bytes" "errors" "fmt" + "math" mrand "math/rand" "sort" "time" @@ -105,6 +106,14 @@ var ( type txAnnounce struct { origin string // Identifier of the peer originating the notification hashes []common.Hash // Batch of transaction hashes being announced + metas []*txMetadata // Batch of metadatas associated with the hashes (nil before eth/68) +} + +// txMetadata is a set of extra data transmitted along the announcement for better +// fetch scheduling. +type txMetadata struct { + kind byte // Transaction consensus type + size uint32 // Transaction size in bytes } // txRequest represents an in-flight transaction retrieval request destined to @@ -120,6 +129,7 @@ type txRequest struct { type txDelivery struct { origin string // Identifier of the peer originating the notification hashes []common.Hash // Batch of transaction hashes having been delivered + metas []txMetadata // Batch of metadatas associated with the delivered hashes direct bool // Whether this is a direct reply or a broadcast } @@ -155,14 +165,14 @@ type TxFetcher struct { // Stage 1: Waiting lists for newly discovered transactions that might be // broadcast without needing explicit request/reply round trips. - waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast - waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist - waitslots map[string]map[common.Hash]struct{} // Waiting announcements grouped by peer (DoS protection) + waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast + waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist + waitslots map[string]map[common.Hash]*txMetadata // Waiting announcements grouped by peer (DoS protection) // Stage 2: Queue of transactions that waiting to be allocated to some peer // to be retrieved directly. - announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer - announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash + announces map[string]map[common.Hash]*txMetadata // Set of announced transactions, grouped by origin peer + announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash // Stage 3: Set of transactions currently being retrieved, some which may be // fulfilled and some rescheduled. Note, this step shares 'announces' from the @@ -175,6 +185,7 @@ type TxFetcher struct { hasTx func(common.Hash) bool // Retrieves a tx from the local txpool addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer + dropPeer func(string) // Drops a peer in case of announcement violation step chan struct{} // Notification channel when the fetcher loop iterates clock mclock.Clock // Time wrapper to simulate in tests @@ -183,14 +194,14 @@ type TxFetcher struct { // NewTxFetcher creates a transaction fetcher to retrieve transaction // based on hash announcements. -func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher { - return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil) +func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher { + return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, dropPeer, mclock.System{}, nil) } // NewTxFetcherForTests is a testing method to mock out the realtime clock with // a simulated version and the internal randomness with a deterministic one. func NewTxFetcherForTests( - hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, + hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string), clock mclock.Clock, rand *mrand.Rand) *TxFetcher { return &TxFetcher{ notify: make(chan *txAnnounce), @@ -199,8 +210,8 @@ func NewTxFetcherForTests( quit: make(chan struct{}), waitlist: make(map[common.Hash]map[string]struct{}), waittime: make(map[common.Hash]mclock.AbsTime), - waitslots: make(map[string]map[common.Hash]struct{}), - announces: make(map[string]map[common.Hash]struct{}), + waitslots: make(map[string]map[common.Hash]*txMetadata), + announces: make(map[string]map[common.Hash]*txMetadata), announced: make(map[common.Hash]map[string]struct{}), fetching: make(map[common.Hash]string), requests: make(map[string]*txRequest), @@ -209,6 +220,7 @@ func NewTxFetcherForTests( hasTx: hasTx, addTxs: addTxs, fetchTxs: fetchTxs, + dropPeer: dropPeer, clock: clock, rand: rand, } @@ -216,7 +228,7 @@ func NewTxFetcherForTests( // Notify announces the fetcher of the potential availability of a new batch of // transactions in the network. -func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { +func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []common.Hash) error { // Keep track of all the announced transactions txAnnounceInMeter.Mark(int64(len(hashes))) @@ -226,28 +238,35 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { // still valuable to check here because it runs concurrent to the internal // loop, so anything caught here is time saved internally. var ( - unknowns = make([]common.Hash, 0, len(hashes)) + unknownHashes = make([]common.Hash, 0, len(hashes)) + unknownMetas = make([]*txMetadata, 0, len(hashes)) + duplicate int64 underpriced int64 ) - for _, hash := range hashes { + for i, hash := range hashes { switch { case f.hasTx(hash): duplicate++ case f.isKnownUnderpriced(hash): underpriced++ default: - unknowns = append(unknowns, hash) + unknownHashes = append(unknownHashes, hash) + if types == nil { + unknownMetas = append(unknownMetas, nil) + } else { + unknownMetas = append(unknownMetas, &txMetadata{kind: types[i], size: sizes[i]}) + } } } txAnnounceKnownMeter.Mark(duplicate) txAnnounceUnderpricedMeter.Mark(underpriced) // If anything's left to announce, push it into the internal loop - if len(unknowns) == 0 { + if len(unknownHashes) == 0 { return nil } - announce := &txAnnounce{origin: peer, hashes: unknowns} + announce := &txAnnounce{origin: peer, hashes: unknownHashes, metas: unknownMetas} select { case f.notify <- announce: return nil @@ -290,6 +309,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) // re-requesting them and dropping the peer in case of malicious transfers. var ( added = make([]common.Hash, 0, len(txs)) + metas = make([]txMetadata, 0, len(txs)) ) // proceed in batches for i := 0; i < len(txs); i += 128 { @@ -325,6 +345,10 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) otherreject++ } added = append(added, batch[j].Hash()) + metas = append(metas, txMetadata{ + kind: batch[j].Type(), + size: uint32(batch[j].Size()), + }) } knownMeter.Mark(duplicate) underpricedMeter.Mark(underpriced) @@ -337,7 +361,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) } } select { - case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}: + case f.cleanup <- &txDelivery{origin: peer, hashes: added, metas: metas, direct: direct}: return nil case <-f.quit: return errTerminated @@ -394,13 +418,15 @@ func (f *TxFetcher) loop() { want := used + len(ann.hashes) if want > maxTxAnnounces { txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces)) + ann.hashes = ann.hashes[:want-maxTxAnnounces] + ann.metas = ann.metas[:want-maxTxAnnounces] } // All is well, schedule the remainder of the transactions idleWait := len(f.waittime) == 0 _, oldPeer := f.announces[ann.origin] - for _, hash := range ann.hashes { + for i, hash := range ann.hashes { // If the transaction is already downloading, add it to the list // of possible alternates (in case the current retrieval fails) and // also account it for the peer. @@ -409,9 +435,9 @@ func (f *TxFetcher) loop() { // Stage 2 and 3 share the set of origins per tx if announces := f.announces[ann.origin]; announces != nil { - announces[hash] = struct{}{} + announces[hash] = ann.metas[i] } else { - f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} + f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]} } continue } @@ -422,9 +448,9 @@ func (f *TxFetcher) loop() { // Stage 2 and 3 share the set of origins per tx if announces := f.announces[ann.origin]; announces != nil { - announces[hash] = struct{}{} + announces[hash] = ann.metas[i] } else { - f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} + f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]} } continue } @@ -432,12 +458,18 @@ func (f *TxFetcher) loop() { // yet downloading, add the peer as an alternate origin in the // waiting list. if f.waitlist[hash] != nil { + // Ignore double announcements from the same peer. This is + // especially important if metadata is also passed along to + // prevent malicious peers flip-flopping good/bad values. + if _, ok := f.waitlist[hash][ann.origin]; ok { + continue + } f.waitlist[hash][ann.origin] = struct{}{} if waitslots := f.waitslots[ann.origin]; waitslots != nil { - waitslots[hash] = struct{}{} + waitslots[hash] = ann.metas[i] } else { - f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} + f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]} } continue } @@ -446,9 +478,9 @@ func (f *TxFetcher) loop() { f.waittime[hash] = f.clock.Now() if waitslots := f.waitslots[ann.origin]; waitslots != nil { - waitslots[hash] = struct{}{} + waitslots[hash] = ann.metas[i] } else { - f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} + f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]} } } // If a new item was added to the waitlist, schedule it into the fetcher @@ -474,9 +506,9 @@ func (f *TxFetcher) loop() { f.announced[hash] = f.waitlist[hash] for peer := range f.waitlist[hash] { if announces := f.announces[peer]; announces != nil { - announces[hash] = struct{}{} + announces[hash] = f.waitslots[peer][hash] } else { - f.announces[peer] = map[common.Hash]struct{}{hash: {}} + f.announces[peer] = map[common.Hash]*txMetadata{hash: f.waitslots[peer][hash]} } delete(f.waitslots[peer], hash) if len(f.waitslots[peer]) == 0 { @@ -545,10 +577,27 @@ func (f *TxFetcher) loop() { case delivery := <-f.cleanup: // Independent if the delivery was direct or broadcast, remove all - // traces of the hash from internal trackers - for _, hash := range delivery.hashes { + // traces of the hash from internal trackers. That said, compare any + // advertised metadata with the real ones and drop bad peers. + for i, hash := range delivery.hashes { if _, ok := f.waitlist[hash]; ok { for peer, txset := range f.waitslots { + if meta := txset[hash]; meta != nil { + if delivery.metas[i].kind != meta.kind { + log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind) + f.dropPeer(peer) + } else if delivery.metas[i].size != meta.size { + log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size) + if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 { + // Normally we should drop a peer considering this is a protocol violation. + // However, due to the RLP vs consensus format messyness, allow a few bytes + // wiggle-room where we only warn, but don't drop. + // + // TODO(karalabe): Get rid of this relaxation when clients are proven stable. + f.dropPeer(peer) + } + } + } delete(txset, hash) if len(txset) == 0 { delete(f.waitslots, peer) @@ -558,6 +607,22 @@ func (f *TxFetcher) loop() { delete(f.waittime, hash) } else { for peer, txset := range f.announces { + if meta := txset[hash]; meta != nil { + if delivery.metas[i].kind != meta.kind { + log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind) + f.dropPeer(peer) + } else if delivery.metas[i].size != meta.size { + log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size) + if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 { + // Normally we should drop a peer considering this is a protocol violation. + // However, due to the RLP vs consensus format messyness, allow a few bytes + // wiggle-room where we only warn, but don't drop. + // + // TODO(karalabe): Get rid of this relaxation when clients are proven stable. + f.dropPeer(peer) + } + } + } delete(txset, hash) if len(txset) == 0 { delete(f.announces, peer) @@ -859,7 +924,7 @@ func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) // forEachHash does a range loop over a map of hashes in production, but during // testing it does a deterministic sorted random to allow reproducing issues. -func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) { +func (f *TxFetcher) forEachHash(hashes map[common.Hash]*txMetadata, do func(hash common.Hash) bool) { // If we're running production, use whatever Go's map gives us if f.rand == nil { for hash := range hashes { diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index 980c1a6c26..371e7dd310 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -41,9 +41,20 @@ var ( testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()} ) +type announce struct { + hash common.Hash + kind *byte + size *uint32 +} + +func typeptr(t byte) *byte { return &t } +func sizeptr(n uint32) *uint32 { return &n } + type doTxNotify struct { peer string hashes []common.Hash + types []byte + sizes []uint32 } type doTxEnqueue struct { peer string @@ -57,7 +68,14 @@ type doWait struct { type doDrop string type doFunc func() +type isWaitingWithMeta map[string][]announce type isWaiting map[string][]common.Hash + +type isScheduledWithMeta struct { + tracking map[string][]announce + fetching map[string][]common.Hash + dangling map[string][]common.Hash +} type isScheduled struct { tracking map[string][]common.Hash fetching map[string][]common.Hash @@ -81,6 +99,7 @@ func TestTransactionFetcherWaiting(t *testing.T) { func(common.Hash) bool { return false }, nil, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -162,6 +181,212 @@ func TestTransactionFetcherWaiting(t *testing.T) { }) } +// Tests that transaction announcements with associated metadata are added to a +// waitlist, and none of them are scheduled for retrieval until the wait expires. +// +// This test is an extended version of TestTransactionFetcherWaiting. It's mostly +// to cover the metadata checkes without bloating up the basic behavioral tests +// with all the useless extra fields. +func TestTransactionFetcherWaitingWithMeta(t *testing.T) { + testTransactionFetcherParallel(t, txFetcherTest{ + init: func() *TxFetcher { + return NewTxFetcher( + func(common.Hash) bool { return false }, + nil, + func(string, []common.Hash) error { return nil }, + nil, + ) + }, + steps: []interface{}{ + // Initial announcement to get something into the waitlist + doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, + isWaitingWithMeta(map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + }, + }), + // Announce from a new peer to check that no overwrite happens + doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{333, 444}}, + isWaitingWithMeta(map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + }, + "B": { + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + }), + // Announce clashing hashes but unique new peer + doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 444}}, + isWaitingWithMeta(map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + }, + "B": { + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "C": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + }), + // Announce existing and clashing hashes from existing peer. Clashes + // should not overwrite previous announcements. + doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{999, 333, 555}}, + isWaitingWithMeta(map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, + }, + "B": { + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "C": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + }), + // Announce clashing hashes with conflicting metadata. Somebody will + // be in the wrong, but we don't know yet who. + doTxNotify{peer: "D", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.BlobTxType}, sizes: []uint32{999, 222}}, + isWaitingWithMeta(map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, + }, + "B": { + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "C": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "D": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, + {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, + }, + }), + isScheduled{tracking: nil, fetching: nil}, + + // Wait for the arrival timeout which should move all expired items + // from the wait list to the scheduler + doWait{time: txArriveTimeout, step: true}, + isWaiting(nil), + isScheduledWithMeta{ + tracking: map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, + }, + "B": { + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "C": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "D": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, + {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, + }, + }, + fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer + "A": {{0x03}, {0x05}}, + "C": {{0x01}, {0x04}}, + "D": {{0x02}}, + }, + }, + // Queue up a non-fetchable transaction and then trigger it with a new + // peer (weird case to test 1 line in the fetcher) + doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}}, + isWaitingWithMeta(map[string][]announce{ + "C": { + {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, + {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, + }, + }), + doWait{time: txArriveTimeout, step: true}, + isScheduledWithMeta{ + tracking: map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, + }, + "B": { + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "C": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, + {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, + }, + "D": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, + {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, + }, + }, + fetching: map[string][]common.Hash{ + "A": {{0x03}, {0x05}}, + "C": {{0x01}, {0x04}}, + "D": {{0x02}}, + }, + }, + doTxNotify{peer: "E", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}}, + isScheduledWithMeta{ + tracking: map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, + }, + "B": { + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + }, + "C": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, + {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, + {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, + }, + "D": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, + {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, + }, + "E": { + {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, + {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, + }, + }, + fetching: map[string][]common.Hash{ + "A": {{0x03}, {0x05}}, + "C": {{0x01}, {0x04}}, + "D": {{0x02}}, + "E": {{0x06}, {0x07}}, + }, + }, + }, + }) +} + // Tests that transaction announcements skip the waiting list if they are // already scheduled. func TestTransactionFetcherSkipWaiting(t *testing.T) { @@ -171,6 +396,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) { func(common.Hash) bool { return false }, nil, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -234,6 +460,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) { func(common.Hash) bool { return false }, nil, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -313,6 +540,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) { <-proceed return errors.New("peer disconnected") }, + nil, ) }, steps: []interface{}{ @@ -382,6 +610,7 @@ func TestTransactionFetcherCleanup(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -421,6 +650,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -459,6 +689,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -505,6 +736,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -543,6 +775,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -591,6 +824,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) { func(common.Hash) bool { return false }, nil, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -648,6 +882,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -713,6 +948,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { func(common.Hash) bool { return false }, nil, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -772,6 +1008,7 @@ func TestTransactionFetcherRateLimiting(t *testing.T) { func(common.Hash) bool { return false }, nil, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -810,6 +1047,7 @@ func TestTransactionFetcherDoSProtection(t *testing.T) { func(common.Hash) bool { return false }, nil, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -877,6 +1115,7 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) { return errs }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -946,6 +1185,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { return errs }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: append(steps, []interface{}{ @@ -968,6 +1208,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -1021,6 +1262,7 @@ func TestTransactionFetcherDrop(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -1087,6 +1329,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -1120,6 +1363,74 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) { }) } +// Tests that announced transactions with the wrong transaction type or size will +// result in a dropped peer. +func TestInvalidAnnounceMetadata(t *testing.T) { + drop := make(chan string, 2) + testTransactionFetcherParallel(t, txFetcherTest{ + init: func() *TxFetcher { + return NewTxFetcher( + func(common.Hash) bool { return false }, + func(txs []*types.Transaction) []error { + return make([]error, len(txs)) + }, + func(string, []common.Hash) error { return nil }, + func(peer string) { drop <- peer }, + ) + }, + steps: []interface{}{ + // Initial announcement to get something into the waitlist + doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, types: []byte{testTxs[0].Type(), testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}}, + isWaitingWithMeta(map[string][]announce{ + "A": { + {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, + {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, + }, + }), + // Announce from new peers conflicting transactions + doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{1024 + uint32(testTxs[0].Size())}}, + doTxNotify{peer: "C", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{1 + testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}}, + isWaitingWithMeta(map[string][]announce{ + "A": { + {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, + {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, + }, + "B": { + {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))}, + }, + "C": { + {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, + }, + }), + // Schedule all the transactions for retrieval + doWait{time: txArriveTimeout, step: true}, + isWaitingWithMeta(nil), + isScheduledWithMeta{ + tracking: map[string][]announce{ + "A": { + {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, + {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, + }, + "B": { + {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))}, + }, + "C": { + {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, + }, + }, + fetching: map[string][]common.Hash{ + "A": {testTxsHashes[0]}, + "C": {testTxsHashes[1]}, + }, + }, + // Deliver the transactions and wait for B to be dropped + doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, + doFunc(func() { <-drop }), + doFunc(func() { <-drop }), + }, + }) +} + // This test reproduces a crash caught by the fuzzer. The root cause was a // dangling transaction timing out and clashing on re-add with a concurrently // announced one. @@ -1132,6 +1443,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -1159,6 +1471,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -1188,6 +1501,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, ) }, steps: []interface{}{ @@ -1224,6 +1538,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) { <-proceed return errors.New("peer disconnected") }, + nil, ) }, steps: []interface{}{ @@ -1274,9 +1589,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { // Crunch through all the test steps and execute them for i, step := range tt.steps { + // Auto-expand certain steps to ones with metadata + switch old := step.(type) { + case isWaiting: + new := make(isWaitingWithMeta) + for peer, hashes := range old { + for _, hash := range hashes { + new[peer] = append(new[peer], announce{hash, nil, nil}) + } + } + step = new + + case isScheduled: + new := isScheduledWithMeta{ + tracking: make(map[string][]announce), + fetching: old.fetching, + dangling: old.dangling, + } + for peer, hashes := range old.tracking { + for _, hash := range hashes { + new.tracking[peer] = append(new.tracking[peer], announce{hash, nil, nil}) + } + } + step = new + } + // Process the original or expanded steps switch step := step.(type) { case doTxNotify: - if err := fetcher.Notify(step.peer, step.hashes); err != nil { + if err := fetcher.Notify(step.peer, step.types, step.sizes, step.hashes); err != nil { t.Errorf("step %d: %v", i, err) } <-wait // Fetcher needs to process this, wait until it's done @@ -1307,24 +1647,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { case doFunc: step() - case isWaiting: + case isWaitingWithMeta: // We need to check that the waiting list (stage 1) internals // match with the expected set. Check the peer->hash mappings // first. - for peer, hashes := range step { + for peer, announces := range step { waiting := fetcher.waitslots[peer] if waiting == nil { t.Errorf("step %d: peer %s missing from waitslots", i, peer) continue } - for _, hash := range hashes { - if _, ok := waiting[hash]; !ok { - t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash) + for _, ann := range announces { + if meta, ok := waiting[ann.hash]; !ok { + t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, ann.hash) + } else { + if (meta == nil && (ann.kind != nil || ann.size != nil)) || + (meta != nil && (ann.kind == nil || ann.size == nil)) || + (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { + t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, ann.kind, ann.size) + } } } - for hash := range waiting { - if !containsHash(hashes, hash) { - t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash) + for hash, meta := range waiting { + ann := announce{hash: hash} + if meta != nil { + ann.kind, ann.size = &meta.kind, &meta.size + } + if !containsAnnounce(announces, ann) { + t.Errorf("step %d, peer %s: announce %v extra in waitslots", i, peer, ann) } } } @@ -1334,13 +1684,13 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { } } // Peer->hash sets correct, check the hash->peer and timeout sets - for peer, hashes := range step { - for _, hash := range hashes { - if _, ok := fetcher.waitlist[hash][peer]; !ok { - t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer) + for peer, announces := range step { + for _, ann := range announces { + if _, ok := fetcher.waitlist[ann.hash][peer]; !ok { + t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, ann.hash, peer) } - if _, ok := fetcher.waittime[hash]; !ok { - t.Errorf("step %d: hash %x missing from waittime", i, hash) + if _, ok := fetcher.waittime[ann.hash]; !ok { + t.Errorf("step %d: hash %x missing from waittime", i, ann.hash) } } } @@ -1349,15 +1699,15 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash) } for peer := range peers { - if !containsHash(step[peer], hash) { + if !containsHashInAnnounces(step[peer], hash) { t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer) } } } for hash := range fetcher.waittime { var found bool - for _, hashes := range step { - if containsHash(hashes, hash) { + for _, announces := range step { + if containsHashInAnnounces(announces, hash) { found = true break } @@ -1367,23 +1717,33 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { } } - case isScheduled: + case isScheduledWithMeta: // Check that all scheduled announces are accounted for and no // extra ones are present. - for peer, hashes := range step.tracking { + for peer, announces := range step.tracking { scheduled := fetcher.announces[peer] if scheduled == nil { t.Errorf("step %d: peer %s missing from announces", i, peer) continue } - for _, hash := range hashes { - if _, ok := scheduled[hash]; !ok { - t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash) + for _, ann := range announces { + if meta, ok := scheduled[ann.hash]; !ok { + t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, ann.hash) + } else { + if (meta == nil && (ann.kind != nil || ann.size != nil)) || + (meta != nil && (ann.kind == nil || ann.size == nil)) || + (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { + t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, ann.kind, ann.size) + } } } - for hash := range scheduled { - if !containsHash(hashes, hash) { - t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash) + for hash, meta := range scheduled { + ann := announce{hash: hash} + if meta != nil { + ann.kind, ann.size = &meta.kind, &meta.size + } + if !containsAnnounce(announces, ann) { + t.Errorf("step %d, peer %s: announce %x extra in announces", i, peer, hash) } } } @@ -1483,17 +1843,17 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { // retrieval but not actively being downloaded are tracked only // in the stage 2 `announced` map. var queued []common.Hash - for _, hashes := range step.tracking { - for _, hash := range hashes { + for _, announces := range step.tracking { + for _, ann := range announces { var found bool for _, hs := range step.fetching { - if containsHash(hs, hash) { + if containsHash(hs, ann.hash) { found = true break } } if !found { - queued = append(queued, hash) + queued = append(queued, ann.hash) } } } @@ -1526,6 +1886,42 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { } } +// containsAnnounce returns whether an announcement is contained within a slice +// of announcements. +func containsAnnounce(slice []announce, ann announce) bool { + for _, have := range slice { + if have.hash == ann.hash { + if have.kind == nil || ann.kind == nil { + if have.kind != ann.kind { + return false + } + } else if *have.kind != *ann.kind { + return false + } + if have.size == nil || ann.size == nil { + if have.size != ann.size { + return false + } + } else if *have.size != *ann.size { + return false + } + return true + } + } + return false +} + +// containsHashInAnnounces returns whether a hash is contained within a slice +// of announcements. +func containsHashInAnnounces(slice []announce, hash common.Hash) bool { + for _, have := range slice { + if have.hash == hash { + return true + } + } + return false +} + // containsHash returns whether a hash is contained within a hash slice. func containsHash(slice []common.Hash, hash common.Hash) bool { for _, have := range slice { diff --git a/eth/handler.go b/eth/handler.go index 0c0c17fee1..f0021e5644 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -277,7 +277,7 @@ func newHandler(config *handlerConfig) (*handler, error) { addTxs := func(txs []*types.Transaction) []error { return h.txpool.Add(txs, false, false) } - h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx) + h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer) h.chainSync = newChainSyncer(h) return h, nil } diff --git a/eth/handler_eth.go b/eth/handler_eth.go index e844b36cca..2a839f615f 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -68,10 +68,10 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { return h.handleBlockBroadcast(peer, packet.Block, packet.TD) case *eth.NewPooledTransactionHashesPacket67: - return h.txFetcher.Notify(peer.ID(), *packet) + return h.txFetcher.Notify(peer.ID(), nil, nil, *packet) case *eth.NewPooledTransactionHashesPacket68: - return h.txFetcher.Notify(peer.ID(), packet.Hashes) + return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes) case *eth.TransactionsPacket: for _, tx := range *packet { diff --git a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go index d1d6fdc665..8b501645b6 100644 --- a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go +++ b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go @@ -83,6 +83,7 @@ func Fuzz(input []byte) int { return make([]error, len(txs)) }, func(string, []common.Hash) error { return nil }, + nil, clock, rand, ) f.Start() @@ -116,6 +117,8 @@ func Fuzz(input []byte) int { var ( announceIdxs = make([]int, announce) announces = make([]common.Hash, announce) + types = make([]byte, announce) + sizes = make([]uint32, announce) ) for i := 0; i < len(announces); i++ { annBuf := make([]byte, 2) @@ -124,11 +127,13 @@ func Fuzz(input []byte) int { } announceIdxs[i] = (int(annBuf[0])*256 + int(annBuf[1])) % len(txs) announces[i] = txs[announceIdxs[i]].Hash() + types[i] = txs[announceIdxs[i]].Type() + sizes[i] = uint32(txs[announceIdxs[i]].Size()) } if verbose { fmt.Println("Notify", peer, announceIdxs) } - if err := f.Notify(peer, announces); err != nil { + if err := f.Notify(peer, types, sizes, announces); err != nil { panic(err) } From 5e43ed0d72be5afe8a36a587299843e5c9301275 Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 10 Oct 2023 02:56:50 -0600 Subject: [PATCH 86/98] git: ignore tests/spec-tests folder (#28254) --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index e24e1d1677..3f27cdc00f 100644 --- a/.gitignore +++ b/.gitignore @@ -47,4 +47,6 @@ profile.cov /dashboard/assets/package-lock.json **/yarn-error.log -logs/ \ No newline at end of file +logs/ + +tests/spec-tests/ From 2c007cfed7db238ba038b8748ce2aabd108ac874 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 10 Oct 2023 15:53:01 +0300 Subject: [PATCH 87/98] accounts/abi/bind/backend: use requested header for gas prices and gas limits (#28280) --- accounts/abi/bind/backends/simulated.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index dbdcd17823..8549976480 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -606,8 +606,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") } - head := b.blockchain.CurrentHeader() - if !b.blockchain.Config().IsLondon(head.Number) { + if !b.blockchain.Config().IsLondon(header.Number) { // If there's no basefee, then it must be a non-1559 execution if call.GasPrice == nil { call.GasPrice = new(big.Int) @@ -629,13 +628,13 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes call.GasPrice = new(big.Int) if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { - call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) + call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap) } } } // Ensure message is initialized properly. if call.Gas == 0 { - call.Gas = 50000000 + call.Gas = 10 * header.GasLimit } if call.Value == nil { call.Value = new(big.Int) From 8976a0c97a2e309abcb9d5f301fc8ef815cb5095 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 11 Oct 2023 06:12:45 +0200 Subject: [PATCH 88/98] trie: remove owner and binary marshaling from stacktrie (#28291) This change - Removes the owner-notion from a stacktrie; the owner is only ever needed for comitting to the database, but the commit-function, the `writeFn` is provided by the caller, so the caller can just set the owner into the `writeFn` instead of having it passed through the stacktrie. - Removes the `encoding.BinaryMarshaler`/`encoding.BinaryUnmarshaler` interface from stacktrie. We're not using it, and it is doubtful whether anyone downstream is either. --- core/state/snapshot/conversion.go | 4 +- core/state/statedb.go | 2 +- eth/protocols/snap/sync.go | 29 +++--- tests/fuzzers/stacktrie/trie_fuzzer.go | 9 +- trie/stacktrie.go | 15 +--- trie/stacktrie_marshalling.go | 120 ------------------------- trie/stacktrie_test.go | 48 ---------- trie/trie_test.go | 8 +- 8 files changed, 29 insertions(+), 206 deletions(-) delete mode 100644 trie/stacktrie_marshalling.go diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index 1e683f76ce..321bfbc6a2 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -364,11 +364,11 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) { var nodeWriter trie.NodeWriteFunc if db != nil { - nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + nodeWriter = func(path []byte, hash common.Hash, blob []byte) { rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme) } } - t := trie.NewStackTrieWithOwner(nodeWriter, owner) + t := trie.NewStackTrie(nodeWriter) for leaf := range in { t.Update(leaf.key[:], leaf.value) } diff --git a/core/state/statedb.go b/core/state/statedb.go index a59de16a70..d28cd29b30 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -964,7 +964,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo nodes = trienode.NewNodeSet(addrHash) slots = make(map[common.Hash][]byte) ) - stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { nodes.AddNode(path, trienode.NewDeleted()) size += common.StorageSize(len(path)) }) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 6a2d92c009..df1473e999 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -738,8 +738,8 @@ func (s *Syncer) loadSyncStatus() { s.accountBytes += common.StorageSize(len(key) + len(value)) }, } - task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { - rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme) + task.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, val, s.scheme) }) for accountHash, subtasks := range task.SubTasks { for _, subtask := range subtasks { @@ -751,9 +751,10 @@ func (s *Syncer) loadSyncStatus() { s.storageBytes += common.StorageSize(len(key) + len(value)) }, } - subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + owner := accountHash // local assignment for stacktrie writer closure + subtask.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) { rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme) - }, accountHash) + }) } } } @@ -810,8 +811,8 @@ func (s *Syncer) loadSyncStatus() { Last: last, SubTasks: make(map[common.Hash][]*storageTask), genBatch: batch, - genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { - rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) + genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, val, s.scheme) }), }) log.Debug("Created account sync task", "from", next, "last", last) @@ -2004,14 +2005,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { s.storageBytes += common.StorageSize(len(key) + len(value)) }, } + owner := account // local assignment for stacktrie writer closure tasks = append(tasks, &storageTask{ Next: common.Hash{}, Last: r.End(), root: acc.Root, genBatch: batch, - genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) { rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) - }, account), + }), }) for r.Next() { batch := ethdb.HookedBatch{ @@ -2025,9 +2027,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { Last: r.End(), root: acc.Root, genBatch: batch, - genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) { rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) - }, account), + }), }) } for _, task := range tasks { @@ -2072,9 +2074,10 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { slots += len(res.hashes[i]) if i < len(res.hashes)-1 || res.subTask == nil { - tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { - rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) - }, account) + // no need to make local reassignment of account: this closure does not outlive the loop + tr := trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(batch, account, path, hash, val, s.scheme) + }) for j := 0; j < len(res.hashes[i]); j++ { tr.Update(res.hashes[i][j][:], res.slots[i][j]) } diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go index 3d65524095..20b8ca24b3 100644 --- a/tests/fuzzers/stacktrie/trie_fuzzer.go +++ b/tests/fuzzers/stacktrie/trie_fuzzer.go @@ -140,8 +140,8 @@ func (f *fuzzer) fuzz() int { trieA = trie.NewEmpty(dbA) spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()} dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil) - trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { - rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme()) + trieB = trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme()) }) vals []kv useful bool @@ -205,13 +205,10 @@ func (f *fuzzer) fuzz() int { // Ensure all the nodes are persisted correctly var ( nodeset = make(map[string][]byte) // path -> blob - trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + trieC = trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { if crypto.Keccak256Hash(blob) != hash { panic("invalid node blob") } - if owner != (common.Hash{}) { - panic("invalid node owner") - } nodeset[string(path)] = common.CopyBytes(blob) }) checked int diff --git a/trie/stacktrie.go b/trie/stacktrie.go index 781c842961..35208e1cb3 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -33,13 +33,12 @@ var ( // NodeWriteFunc is used to provide all information of a dirty node for committing // so that callers can flush nodes into database with desired scheme. -type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) +type NodeWriteFunc = func(path []byte, hash common.Hash, blob []byte) // StackTrie is a trie implementation that expects keys to be inserted // in order. Once it determines that a subtree will no longer be inserted // into, it will hash it and free up the memory it uses. type StackTrie struct { - owner common.Hash // the owner of the trie writeFn NodeWriteFunc // function for committing nodes, can be nil root *stNode h *hasher @@ -54,14 +53,6 @@ func NewStackTrie(writeFn NodeWriteFunc) *StackTrie { } } -// NewStackTrieWithOwner allocates and initializes an empty trie, but with -// the additional owner field. -func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie { - stack := NewStackTrie(writeFn) - stack.owner = owner - return stack -} - // Update inserts a (key, value) pair into the stack trie. func (t *StackTrie) Update(key, value []byte) error { k := keybytesToHex(key) @@ -371,7 +362,7 @@ func (t *StackTrie) hash(st *stNode, path []byte) { // input values st.val = t.h.hashData(encodedNode) if t.writeFn != nil { - t.writeFn(t.owner, path, common.BytesToHash(st.val), encodedNode) + t.writeFn(path, common.BytesToHash(st.val), encodedNode) } } @@ -416,6 +407,6 @@ func (t *StackTrie) Commit() (h common.Hash, err error) { t.h.sha.Write(st.val) t.h.sha.Read(h[:]) - t.writeFn(t.owner, nil, h, st.val) + t.writeFn(nil, h, st.val) return h, nil } diff --git a/trie/stacktrie_marshalling.go b/trie/stacktrie_marshalling.go deleted file mode 100644 index c0bb07f868..0000000000 --- a/trie/stacktrie_marshalling.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bufio" - "bytes" - "encoding" - "encoding/gob" -) - -// Compile-time interface checks. -var ( - _ = encoding.BinaryMarshaler((*StackTrie)(nil)) - _ = encoding.BinaryUnmarshaler((*StackTrie)(nil)) -) - -// NewFromBinaryV2 initialises a serialized stacktrie with the given db. -// OBS! Format was changed along with the name of this constructor. -func NewFromBinaryV2(data []byte) (*StackTrie, error) { - stack := NewStackTrie(nil) - if err := stack.UnmarshalBinary(data); err != nil { - return nil, err - } - return stack, nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (t *StackTrie) MarshalBinary() (data []byte, err error) { - var ( - b bytes.Buffer - w = bufio.NewWriter(&b) - ) - if err := gob.NewEncoder(w).Encode(t.owner); err != nil { - return nil, err - } - if err := t.root.marshalInto(w); err != nil { - return nil, err - } - w.Flush() - return b.Bytes(), nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (t *StackTrie) UnmarshalBinary(data []byte) error { - r := bytes.NewReader(data) - if err := gob.NewDecoder(r).Decode(&t.owner); err != nil { - return err - } - if err := t.root.unmarshalFrom(r); err != nil { - return err - } - return nil -} - -type stackNodeMarshaling struct { - Typ uint8 - Key []byte - Val []byte -} - -func (n *stNode) marshalInto(w *bufio.Writer) (err error) { - enc := stackNodeMarshaling{ - Typ: n.typ, - Key: n.key, - Val: n.val, - } - if err := gob.NewEncoder(w).Encode(enc); err != nil { - return err - } - for _, child := range n.children { - if child == nil { - w.WriteByte(0) - continue - } - w.WriteByte(1) - if err := child.marshalInto(w); err != nil { - return err - } - } - return nil -} - -func (n *stNode) unmarshalFrom(r *bytes.Reader) error { - var dec stackNodeMarshaling - if err := gob.NewDecoder(r).Decode(&dec); err != nil { - return err - } - n.typ = dec.Typ - n.key = dec.Key - n.val = dec.Val - - for i := range n.children { - if b, err := r.ReadByte(); err != nil { - return err - } else if b == 0 { - continue - } - var child stNode - if err := child.unmarshalFrom(r); err != nil { - return err - } - n.children[i] = &child - } - return nil -} diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index 5b86a971e1..0e52781c62 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -376,51 +376,3 @@ func TestStacktrieNotModifyValues(t *testing.T) { } } } - -// TestStacktrieSerialization tests that the stacktrie works well if we -// serialize/unserialize it a lot -func TestStacktrieSerialization(t *testing.T) { - var ( - st = NewStackTrieWithOwner(nil, common.Hash{0x12}) - nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) - keyB = big.NewInt(1) - keyDelta = big.NewInt(1) - vals [][]byte - keys [][]byte - ) - getValue := func(i int) []byte { - if i%2 == 0 { // large - return crypto.Keccak256(big.NewInt(int64(i)).Bytes()) - } else { //small - return big.NewInt(int64(i)).Bytes() - } - } - for i := 0; i < 10; i++ { - vals = append(vals, getValue(i)) - keys = append(keys, common.BigToHash(keyB).Bytes()) - keyB = keyB.Add(keyB, keyDelta) - keyDelta.Add(keyDelta, common.Big1) - } - for i, k := range keys { - nt.Update(k, common.CopyBytes(vals[i])) - } - - for i, k := range keys { - blob, err := st.MarshalBinary() - if err != nil { - t.Fatal(err) - } - newSt, err := NewFromBinaryV2(blob) - if err != nil { - t.Fatal(err) - } - st = newSt - st.Update(k, common.CopyBytes(vals[i])) - } - if have, want := st.Hash(), nt.Hash(); have != want { - t.Fatalf("have %#x want %#x", have, want) - } - if have, want := st.owner, (common.Hash{0x12}); have != want { - t.Fatalf("have %#x want %#x", have, want) - } -} diff --git a/trie/trie_test.go b/trie/trie_test.go index 8078770e7a..2dfe81ef81 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -912,8 +912,8 @@ func TestCommitSequenceStackTrie(t *testing.T) { trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} - stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { - rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme()) + stTrie := NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme()) }) // Fill the trie with elements for i := 0; i < count; i++ { @@ -971,8 +971,8 @@ func TestCommitSequenceSmallRoot(t *testing.T) { trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} - stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { - rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme()) + stTrie := NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme()) }) // Add a single small-element to the trie(s) key := make([]byte, 5) From 7776a3214ab40b29fa649d9b59b097bfd5fd6a8b Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 11 Oct 2023 15:18:18 +0800 Subject: [PATCH 89/98] ethdb/pebble: add level file metrics (#28271) --- ethdb/pebble/pebble.go | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 5aa00aad4e..07dcf5933c 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -71,6 +71,8 @@ type Database struct { seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated + levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels + quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag quitChan chan chan error // Quit channel to stop the metrics collection before closing the database closed bool // keep track of whether we're Closed @@ -230,7 +232,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) // Start up the metrics gathering and return - go db.meter(metricsGatheringInterval) + go db.meter(metricsGatheringInterval, namespace) return db, nil } @@ -427,7 +429,7 @@ func (d *Database) Path() string { // meter periodically retrieves internal pebble counters and reports them to // the metrics subsystem. -func (d *Database) meter(refresh time.Duration) { +func (d *Database) meter(refresh time.Duration, namespace string) { var errc chan error timer := time.NewTimer(refresh) defer timer.Stop() @@ -450,7 +452,7 @@ func (d *Database) meter(refresh time.Duration) { compRead int64 nWrite int64 - metrics = d.db.Metrics() + stats = d.db.Metrics() compTime = d.compTime.Load() writeDelayCount = d.writeDelayCount.Load() writeDelayTime = d.writeDelayTime.Load() @@ -461,14 +463,14 @@ func (d *Database) meter(refresh time.Duration) { writeDelayCounts[i%2] = writeDelayCount compTimes[i%2] = compTime - for _, levelMetrics := range metrics.Levels { + for _, levelMetrics := range stats.Levels { nWrite += int64(levelMetrics.BytesCompacted) nWrite += int64(levelMetrics.BytesFlushed) compWrite += int64(levelMetrics.BytesCompacted) compRead += int64(levelMetrics.BytesRead) } - nWrite += int64(metrics.WAL.BytesWritten) + nWrite += int64(stats.WAL.BytesWritten) compWrites[i%2] = compWrite compReads[i%2] = compRead @@ -490,7 +492,7 @@ func (d *Database) meter(refresh time.Duration) { d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2]) } if d.diskSizeGauge != nil { - d.diskSizeGauge.Update(int64(metrics.DiskSpaceUsage())) + d.diskSizeGauge.Update(int64(stats.DiskSpaceUsage())) } if d.diskReadMeter != nil { d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads @@ -499,12 +501,20 @@ func (d *Database) meter(refresh time.Duration) { d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2]) } // See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054 - manuallyAllocated := metrics.BlockCache.Size + int64(metrics.MemTable.Size) + int64(metrics.MemTable.ZombieSize) + manuallyAllocated := stats.BlockCache.Size + int64(stats.MemTable.Size) + int64(stats.MemTable.ZombieSize) d.manualMemAllocGauge.Update(manuallyAllocated) - d.memCompGauge.Update(metrics.Flush.Count) + d.memCompGauge.Update(stats.Flush.Count) d.nonlevel0CompGauge.Update(nonLevel0CompCount) d.level0CompGauge.Update(level0CompCount) - d.seekCompGauge.Update(metrics.Compact.ReadCount) + d.seekCompGauge.Update(stats.Compact.ReadCount) + + for i, level := range stats.Levels { + // Append metrics for additional layers + if i >= len(d.levelsGauge) { + d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) + } + d.levelsGauge[i].Update(level.NumFiles) + } // Sleep a bit, then repeat the stats collection select { From a6deb2d994e644300bac43455b1c954976e7382e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 11 Oct 2023 10:50:09 +0300 Subject: [PATCH 90/98] eth/fetcher: throttle tx fetches to 128KB responses (#28304) * eth/fetcher: throttle tx fetches to 128KB responses * eth/fetcher: unindent a clause per review request --- eth/fetcher/tx_fetcher.go | 82 +++++++++++++++++++++------------- eth/fetcher/tx_fetcher_test.go | 72 ++++++++++++++++++++++++++--- 2 files changed, 117 insertions(+), 37 deletions(-) diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 8bb5d579dc..7a958d2d3b 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -39,16 +39,22 @@ const ( // can announce in a short time. maxTxAnnounces = 4096 - // maxTxRetrievals is the maximum transaction number can be fetched in one - // request. The rationale to pick 256 is: - // - In eth protocol, the softResponseLimit is 2MB. Nowadays according to - // Etherscan the average transaction size is around 200B, so in theory - // we can include lots of transaction in a single protocol packet. - // - However the maximum size of a single transaction is raised to 128KB, - // so pick a middle value here to ensure we can maximize the efficiency - // of the retrieval and response size overflow won't happen in most cases. + // maxTxRetrievals is the maximum number of transactions that can be fetched + // in one request. The rationale for picking 256 is to have a reasonabe lower + // bound for the transferred data (don't waste RTTs, transfer more meaningful + // batch sizes), but also have an upper bound on the sequentiality to allow + // using our entire peerset for deliveries. + // + // This number also acts as a failsafe against malicious announces which might + // cause us to request more data than we'd expect. maxTxRetrievals = 256 + // maxTxRetrievalSize is the max number of bytes that delivered transactions + // should weigh according to the announcements. The 128KB was chosen to limit + // retrieving a maximum of one blob transaction at a time to minimize hogging + // a connection between two peers. + maxTxRetrievalSize = 128 * 1024 + // maxTxUnderpricedSetSize is the size of the underpriced transaction set that // is used to track recent transactions that have been dropped so we don't // re-request them. @@ -859,25 +865,36 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, if len(f.announces[peer]) == 0 { return // continue in the for-each } - hashes := make([]common.Hash, 0, maxTxRetrievals) - f.forEachHash(f.announces[peer], func(hash common.Hash) bool { - if _, ok := f.fetching[hash]; !ok { - // Mark the hash as fetching and stash away possible alternates - f.fetching[hash] = peer - - if _, ok := f.alternates[hash]; ok { - panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash])) - } - f.alternates[hash] = f.announced[hash] - delete(f.announced, hash) + var ( + hashes = make([]common.Hash, 0, maxTxRetrievals) + bytes uint64 + ) + f.forEachAnnounce(f.announces[peer], func(hash common.Hash, meta *txMetadata) bool { + // If the transaction is alcear fetching, skip to the next one + if _, ok := f.fetching[hash]; ok { + return true + } + // Mark the hash as fetching and stash away possible alternates + f.fetching[hash] = peer - // Accumulate the hash and stop if the limit was reached - hashes = append(hashes, hash) - if len(hashes) >= maxTxRetrievals { - return false // break in the for-each + if _, ok := f.alternates[hash]; ok { + panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash])) + } + f.alternates[hash] = f.announced[hash] + delete(f.announced, hash) + + // Accumulate the hash and stop if the limit was reached + hashes = append(hashes, hash) + if len(hashes) >= maxTxRetrievals { + return false // break in the for-each + } + if meta != nil { // Only set eth/68 and upwards + bytes += uint64(meta.size) + if bytes >= maxTxRetrievalSize { + return false } } - return true // continue in the for-each + return true // scheduled, try to add more }) // If any hashes were allocated, request them from the peer if len(hashes) > 0 { @@ -922,27 +939,28 @@ func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) } } -// forEachHash does a range loop over a map of hashes in production, but during -// testing it does a deterministic sorted random to allow reproducing issues. -func (f *TxFetcher) forEachHash(hashes map[common.Hash]*txMetadata, do func(hash common.Hash) bool) { +// forEachAnnounce does a range loop over a map of announcements in production, +// but during testing it does a deterministic sorted random to allow reproducing +// issues. +func (f *TxFetcher) forEachAnnounce(announces map[common.Hash]*txMetadata, do func(hash common.Hash, meta *txMetadata) bool) { // If we're running production, use whatever Go's map gives us if f.rand == nil { - for hash := range hashes { - if !do(hash) { + for hash, meta := range announces { + if !do(hash, meta) { return } } return } // We're running the test suite, make iteration deterministic - list := make([]common.Hash, 0, len(hashes)) - for hash := range hashes { + list := make([]common.Hash, 0, len(announces)) + for hash := range announces { list = append(list, hash) } sortHashes(list) rotateHashes(list, f.rand.Intn(len(list))) for _, hash := range list { - if !do(hash) { + if !do(hash, announces[hash]) { return } } diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index 371e7dd310..fbb9ff9dcc 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" ) var ( @@ -993,15 +994,14 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { }) } -// Tests that if thousands of transactions are announces, only a small +// Tests that if thousands of transactions are announced, only a small // number of them will be requested at a time. func TestTransactionFetcherRateLimiting(t *testing.T) { - // Create a slew of transactions and to announce them + // Create a slew of transactions and announce them var hashes []common.Hash for i := 0; i < maxTxAnnounces; i++ { hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)}) } - testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( @@ -1029,6 +1029,68 @@ func TestTransactionFetcherRateLimiting(t *testing.T) { }) } +// Tests that if huge transactions are announced, only a small number of them will +// be requested at a time, to keep the responses below a resonable level. +func TestTransactionFetcherBandwidthLimiting(t *testing.T) { + testTransactionFetcherParallel(t, txFetcherTest{ + init: func() *TxFetcher { + return NewTxFetcher( + func(common.Hash) bool { return false }, + nil, + func(string, []common.Hash) error { return nil }, + nil, + ) + }, + steps: []interface{}{ + // Announce mid size transactions from A to verify that multiple + // ones can be piled into a single request. + doTxNotify{peer: "A", + hashes: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}}, + types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, + sizes: []uint32{48 * 1024, 48 * 1024, 48 * 1024, 48 * 1024}, + }, + // Announce exactly on the limit transactions to see that only one + // gets requested + doTxNotify{peer: "B", + hashes: []common.Hash{{0x05}, {0x06}}, + types: []byte{types.LegacyTxType, types.LegacyTxType}, + sizes: []uint32{maxTxRetrievalSize, maxTxRetrievalSize}, + }, + // Announce oversized blob transactions to see that overflows are ok + doTxNotify{peer: "C", + hashes: []common.Hash{{0x07}, {0x08}}, + types: []byte{types.BlobTxType, types.BlobTxType}, + sizes: []uint32{params.MaxBlobGasPerBlock, params.MaxBlobGasPerBlock}, + }, + doWait{time: txArriveTimeout, step: true}, + isWaiting(nil), + isScheduledWithMeta{ + tracking: map[string][]announce{ + "A": { + {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, + {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, + {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, + {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, + }, + "B": { + {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)}, + {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)}, + }, + "C": { + {common.Hash{0x07}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)}, + {common.Hash{0x08}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)}, + }, + }, + fetching: map[string][]common.Hash{ + "A": {{0x02}, {0x03}, {0x04}}, + "B": {{0x06}}, + "C": {{0x08}}, + }, + }, + }, + }) +} + // Tests that then number of transactions a peer is allowed to announce and/or // request at the same time is hard capped. func TestTransactionFetcherDoSProtection(t *testing.T) { @@ -1664,7 +1726,7 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { if (meta == nil && (ann.kind != nil || ann.size != nil)) || (meta != nil && (ann.kind == nil || ann.size == nil)) || (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { - t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, ann.kind, ann.size) + t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size) } } } @@ -1733,7 +1795,7 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { if (meta == nil && (ann.kind != nil || ann.size != nil)) || (meta != nil && (ann.kind == nil || ann.size == nil)) || (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { - t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, ann.kind, ann.size) + t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size) } } } From 13d1d425acacd54fa7eba40402c9e21b9146095c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 11 Oct 2023 11:23:57 +0300 Subject: [PATCH 91/98] eth/fetcher: fix typo --- eth/fetcher/tx_fetcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 7a958d2d3b..5747626968 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -870,7 +870,7 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, bytes uint64 ) f.forEachAnnounce(f.announces[peer], func(hash common.Hash, meta *txMetadata) bool { - // If the transaction is alcear fetching, skip to the next one + // If the transaction is already fetching, skip to the next one if _, ok := f.fetching[hash]; ok { return true } From eeb5dc3ccf69cfdb3a9f66685a2ed3bb50f06a86 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 11 Oct 2023 16:27:44 +0800 Subject: [PATCH 92/98] cmd, core: resolve scheme from a read-write database (#28313) * cmd, core: resolve scheme from a read-write database * cmd, core, eth: move the scheme check in the ethereum constructor * cmd/geth: dump should in ro mode * cmd: reverts --- cmd/geth/chaincmd.go | 2 +- cmd/utils/flags.go | 50 +++--------------------------------- core/genesis.go | 45 +++++--------------------------- core/genesis_test.go | 2 +- core/rawdb/accessors_trie.go | 35 +++++++++++++++++++++++++ eth/backend.go | 8 ++++-- eth/ethconfig/config.go | 8 +++--- 7 files changed, 58 insertions(+), 92 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index aebcc29eb8..a6bb2c2d2c 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -474,7 +474,7 @@ func dump(ctx *cli.Context) error { if err != nil { return err } - triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup + triedb := utils.MakeTrieDatabase(ctx, db, true, true) // always enable preimage lookup defer triedb.Close() state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 9743a7b9ca..f24ca66581 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -268,7 +268,6 @@ var ( StateSchemeFlag = &cli.StringFlag{ Name: "state.scheme", Usage: "Scheme to use for storing ethereum state ('hash' or 'path')", - Value: rawdb.HashScheme, Category: flags.StateCategory, } StateHistoryFlag = &cli.Uint64Flag{ @@ -1721,15 +1720,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(StateHistoryFlag.Name) { cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name) } - // Parse state scheme, abort the process if it's not compatible. - chaindb := tryMakeReadOnlyDatabase(ctx, stack) - scheme, err := ParseStateScheme(ctx, chaindb) - chaindb.Close() - if err != nil { - Fatalf("%v", err) + if ctx.IsSet(StateSchemeFlag.Name) { + cfg.StateScheme = ctx.String(StateSchemeFlag.Name) } - cfg.StateScheme = scheme - // Parse transaction history flag, if user is still using legacy config // file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'. if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit { @@ -2165,7 +2158,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } - scheme, err := ParseStateScheme(ctx, chainDb) + scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb) if err != nil { Fatalf("%v", err) } @@ -2224,47 +2217,12 @@ func MakeConsolePreloads(ctx *cli.Context) []string { return preloads } -// ParseStateScheme resolves scheme identifier from CLI flag. If the provided -// state scheme is not compatible with the one of persistent scheme, an error -// will be returned. -// -// - none: use the scheme consistent with persistent state, or fallback -// to hash-based scheme if state is empty. -// - hash: use hash-based scheme or error out if not compatible with -// persistent state scheme. -// - path: use path-based scheme or error out if not compatible with -// persistent state scheme. -func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) { - // If state scheme is not specified, use the scheme consistent - // with persistent state, or fallback to hash mode if database - // is empty. - stored := rawdb.ReadStateScheme(disk) - if !ctx.IsSet(StateSchemeFlag.Name) { - if stored == "" { - // use default scheme for empty database, flip it when - // path mode is chosen as default - log.Info("State schema set to default", "scheme", "hash") - return rawdb.HashScheme, nil - } - log.Info("State scheme set to already existing", "scheme", stored) - return stored, nil // reuse scheme of persistent scheme - } - // If state scheme is specified, ensure it's compatible with - // persistent state. - scheme := ctx.String(StateSchemeFlag.Name) - if stored == "" || scheme == stored { - log.Info("State scheme set by user", "scheme", scheme) - return scheme, nil - } - return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme) -} - // MakeTrieDatabase constructs a trie database based on the configured scheme. func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database { config := &trie.Config{ Preimages: preimage, } - scheme, err := ParseStateScheme(ctx, disk) + scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk) if err != nil { Fatalf("%v", err) } diff --git a/core/genesis.go b/core/genesis.go index baace3f991..0f1e8baf43 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -120,8 +120,8 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error { return nil } -// deriveHash computes the state root according to the genesis specification. -func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { +// hash computes the state root according to the genesis specification. +func (ga *GenesisAlloc) hash() (common.Hash, error) { // Create an ephemeral in-memory database for computing hash, // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) @@ -142,9 +142,9 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { return statedb.Commit(0, false) } -// flush is very similar with deriveHash, but the main difference is -// all the generated states will be persisted into the given database. -// Also, the genesis state specification will be flushed as well. +// flush is very similar with hash, but the main difference is all the generated +// states will be persisted into the given database. Also, the genesis state +// specification will be flushed as well. func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { @@ -179,39 +179,6 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas return nil } -// CommitGenesisState loads the stored genesis state with the given block -// hash and commits it into the provided trie database. -func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { - var alloc GenesisAlloc - blob := rawdb.ReadGenesisStateSpec(db, blockhash) - if len(blob) != 0 { - if err := alloc.UnmarshalJSON(blob); err != nil { - return err - } - } else { - // Genesis allocation is missing and there are several possibilities: - // the node is legacy which doesn't persist the genesis allocation or - // the persisted allocation is just lost. - // - supported networks(mainnet, testnets), recover with defined allocations - // - private network, can't recover - var genesis *Genesis - switch blockhash { - case params.MainnetGenesisHash: - genesis = DefaultGenesisBlock() - case params.GoerliGenesisHash: - genesis = DefaultGoerliGenesisBlock() - case params.SepoliaGenesisHash: - genesis = DefaultSepoliaGenesisBlock() - } - if genesis != nil { - alloc = genesis.Alloc - } else { - return errors.New("not found") - } - } - return alloc.flush(db, triedb, blockhash) -} - // GenesisAccount is an account in the state of the genesis block. type GenesisAccount struct { Code []byte `json:"code,omitempty"` @@ -444,7 +411,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { // ToBlock returns the genesis block according to genesis specification. func (g *Genesis) ToBlock() *types.Block { - root, err := g.Alloc.deriveHash() + root, err := g.Alloc.hash() if err != nil { panic(err) } diff --git a/core/genesis_test.go b/core/genesis_test.go index 6a0f2df085..fac88ff373 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -231,7 +231,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) { {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, } - hash, _ = alloc.deriveHash() + hash, _ = alloc.hash() ) blob, _ := json.Marshal(alloc) rawdb.WriteGenesisStateSpec(db, hash, blob) diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index ea437b8114..78f1a70b1c 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -305,3 +305,38 @@ func ReadStateScheme(db ethdb.Reader) string { } return HashScheme } + +// ParseStateScheme checks if the specified state scheme is compatible with +// the stored state. +// +// - If the provided scheme is none, use the scheme consistent with persistent +// state, or fallback to hash-based scheme if state is empty. +// +// - If the provided scheme is hash, use hash-based scheme or error out if not +// compatible with persistent state scheme. +// +// - If the provided scheme is path: use path-based scheme or error out if not +// compatible with persistent state scheme. +func ParseStateScheme(provided string, disk ethdb.Database) (string, error) { + // If state scheme is not specified, use the scheme consistent + // with persistent state, or fallback to hash mode if database + // is empty. + stored := ReadStateScheme(disk) + if provided == "" { + if stored == "" { + // use default scheme for empty database, flip it when + // path mode is chosen as default + log.Info("State schema set to default", "scheme", "hash") + return HashScheme, nil + } + log.Info("State scheme set to already existing", "scheme", stored) + return stored, nil // reuse scheme of persistent scheme + } + // If state scheme is specified, ensure it's compatible with + // persistent state. + if stored == "" || provided == stored { + log.Info("State scheme set by user", "scheme", provided) + return provided, nil + } + return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, provided) +} diff --git a/eth/backend.go b/eth/backend.go index af03517792..c6787870ca 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -133,8 +133,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } + scheme, err := rawdb.ParseStateScheme(config.StateScheme, chainDb) + if err != nil { + return nil, err + } // Try to recover offline state pruning only in hash-based. - if config.StateScheme == rawdb.HashScheme { + if scheme == rawdb.HashScheme { if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil { log.Error("Failed to recover state", "error", err) } @@ -194,7 +198,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { SnapshotLimit: config.SnapshotCache, Preimages: config.Preimages, StateHistory: config.StateHistory, - StateScheme: config.StateScheme, + StateScheme: scheme, } ) // Override the chain config with provided settings. diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 342ff3da9e..bfb1df3fb1 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/eth/downloader" @@ -64,7 +63,6 @@ var Defaults = Config{ TxLookupLimit: 2350000, TransactionHistory: 2350000, StateHistory: params.FullImmutabilityThreshold, - StateScheme: rawdb.HashScheme, LightPeers: 100, DatabaseCache: 512, TrieCleanCache: 154, @@ -105,7 +103,11 @@ type Config struct { TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. - StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top + + // State scheme represents the scheme used to store ethereum states and trie + // nodes on top. It can be 'hash', 'path', or none which means use the scheme + // consistent with persistent state. + StateScheme string `toml:",omitempty"` // RequiredBlocks is a set of block number -> hash mappings which must be in the // canonical chain of all remote peers. Setting the option makes geth verify the From 0004c6b229b787281760b14fb9460ffd9c2496f1 Mon Sep 17 00:00:00 2001 From: vuittont60 <81072379+vuittont60@users.noreply.github.com> Date: Wed, 11 Oct 2023 16:29:10 +0800 Subject: [PATCH 93/98] accounts, cmd: fix typos (#28300) --- accounts/scwallet/README.md | 2 +- cmd/evm/testdata/9/readme.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts/scwallet/README.md b/accounts/scwallet/README.md index 4313d9c6b2..28079c4743 100644 --- a/accounts/scwallet/README.md +++ b/accounts/scwallet/README.md @@ -8,7 +8,7 @@ ## Preparing the smartcard - **WARNING: FOILLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS** + **WARNING: FOLLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS** You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap) diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md index 5394780288..357e200682 100644 --- a/cmd/evm/testdata/9/readme.md +++ b/cmd/evm/testdata/9/readme.md @@ -1,6 +1,6 @@ ## EIP-1559 testing -This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. +This test contains testcases for EIP-1559, which uses a new transaction type and has a new block parameter. ### Prestate From d2c0bed9d5c49803562077688bde114d5ca40959 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Thu, 12 Oct 2023 09:54:00 +0200 Subject: [PATCH 94/98] cmd/geth: fix failing test (#28322) --- cmd/geth/genesis_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index 2506b42d1e..ffe8176b01 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) { { // Can't start pebble on top of leveldb initArgs: []string{"--db.engine", "leveldb"}, execArgs: []string{"--db.engine", "pebble"}, - execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, + execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, }, { // Can't start leveldb on top of pebble initArgs: []string{"--db.engine", "pebble"}, execArgs: []string{"--db.engine", "leveldb"}, - execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, + execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, }, { // Reject invalid backend choice initArgs: []string{"--db.engine", "mssql"}, From 0d45d72d703ea16b05cc17366d1f931bace28189 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 12 Oct 2023 14:36:49 +0300 Subject: [PATCH 95/98] params: release Geth v.1.13.3 --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index bcffd292a5..130acceef7 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 13 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 13 // Minor version component of the current release + VersionPatch = 3 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 6a1f5088d4f62c5211e9d923a62790bb1eba68bb Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Tue, 23 Jan 2024 20:57:13 -0700 Subject: [PATCH 96/98] update sliding time window for new geth metrics --- metrics/sliding_time_window_array_sample.go | 120 ++++---------------- 1 file changed, 19 insertions(+), 101 deletions(-) diff --git a/metrics/sliding_time_window_array_sample.go b/metrics/sliding_time_window_array_sample.go index 8aecf444ff..f50bf5206c 100644 --- a/metrics/sliding_time_window_array_sample.go +++ b/metrics/sliding_time_window_array_sample.go @@ -1,6 +1,7 @@ package metrics import ( + "math" "sync" "time" ) @@ -52,65 +53,6 @@ func (s *SlidingTimeWindowArraySample) Clear() { s.measurements.Clear() } -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *SlidingTimeWindowArraySample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *SlidingTimeWindowArraySample) Max() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SampleMax(s.measurements.Values()) -} - -// Mean returns the mean of the values in the sample. -func (s *SlidingTimeWindowArraySample) Mean() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SampleMean(s.measurements.Values()) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *SlidingTimeWindowArraySample) Min() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SampleMin(s.measurements.Values()) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *SlidingTimeWindowArraySample) Percentile(p float64) float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SamplePercentile(s.measurements.Values(), p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *SlidingTimeWindowArraySample) Percentiles(ps []float64) []float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SamplePercentiles(s.measurements.Values(), ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *SlidingTimeWindowArraySample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return s.measurements.Size() -} - // trim requires s.mutex to already be acquired func (s *SlidingTimeWindowArraySample) trim() { now := s.getTick() @@ -139,33 +81,28 @@ func (s *SlidingTimeWindowArraySample) getTick() int64 { } // Snapshot returns a read-only copy of the sample. -func (s *SlidingTimeWindowArraySample) Snapshot() Sample { +func (s *SlidingTimeWindowArraySample) Snapshot() SampleSnapshot { s.mutex.Lock() defer s.mutex.Unlock() s.trim() - origValues := s.measurements.Values() - values := make([]int64, len(origValues)) - copy(values, origValues) - return &SampleSnapshot{ - count: s.count, - values: values, + var ( + samples = s.measurements.Values() + values = make([]int64, len(samples)) + max int64 = math.MinInt64 + min int64 = math.MaxInt64 + sum int64 + ) + for i, v := range samples { + values[i] = v + sum += v + if v > max { + max = v + } + if v < min { + min = v + } } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *SlidingTimeWindowArraySample) StdDev() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SampleStdDev(s.measurements.Values()) -} - -// Sum returns the sum of the values in the sample. -func (s *SlidingTimeWindowArraySample) Sum() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SampleSum(s.measurements.Values()) + return newSampleSnapshotPrecalculated(s.count, values, min, max, sum) } // Update samples a new value. @@ -184,22 +121,3 @@ func (s *SlidingTimeWindowArraySample) Update(v int64) { } s.measurements.Put(newTick, v) } - -// Values returns a copy of the values in the sample. -func (s *SlidingTimeWindowArraySample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - origValues := s.measurements.Values() - values := make([]int64, len(origValues)) - copy(values, origValues) - return values -} - -// Variance returns the variance of the values in the sample. -func (s *SlidingTimeWindowArraySample) Variance() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - s.trim() - return SampleVariance(s.measurements.Values()) -} From e5ecb9d3f4305cd83e458cc99b256ba4dfd05451 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 25 Jan 2024 13:20:51 -0700 Subject: [PATCH 97/98] update github actions and use go 1.20 --- .github/workflows/ci.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd067aa32e..53ebb0f925 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,12 +13,14 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + with: + submodules: true - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.19 + go-version: 1.20.x - name: Test run: make test @@ -27,12 +29,14 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + with: + submodules: true - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.19 + go-version: 1.20.x - name: Test run: make lint From 18a471f98ecac1b0877bb64773f482446aba191b Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Wed, 24 Jan 2024 10:27:55 -0700 Subject: [PATCH 98/98] Cleanup function ordering --- metrics/sample.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/metrics/sample.go b/metrics/sample.go index 23128e7e25..fd1e537c19 100644 --- a/metrics/sample.go +++ b/metrics/sample.go @@ -24,10 +24,6 @@ type SampleSnapshot interface { Variance() float64 } -func NewBoundedHistogramSample() Sample { - return NewSlidingTimeWindowArraySample(time.Minute * 1) -} - // Samples maintain a statistically-significant selection of values from // a stream. type Sample interface { @@ -36,6 +32,10 @@ type Sample interface { Update(int64) } +func NewBoundedHistogramSample() Sample { + return NewSlidingTimeWindowArraySample(time.Minute * 1) +} + // ExpDecaySample is an exponentially-decaying sample using a forward-decaying // priority reservoir. See Cormode et al's "Forward Decay: A Practical Time // Decay Model for Streaming Systems".