From 2e6c42a4b3d925ea186c793c746089e0d78fef33 Mon Sep 17 00:00:00 2001 From: IronGauntlets Date: Tue, 19 Nov 2024 09:30:19 +0000 Subject: [PATCH] Fix too many blocks back condition --- rpc/events_test.go | 44 -------------------- rpc/subscriptions.go | 2 +- rpc/subscriptions_test.go | 84 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 45 deletions(-) create mode 100644 rpc/subscriptions_test.go diff --git a/rpc/events_test.go b/rpc/events_test.go index f7715c1f31..7f8b483987 100644 --- a/rpc/events_test.go +++ b/rpc/events_test.go @@ -229,50 +229,6 @@ func (fc *fakeConn) Equal(other jsonrpc.Conn) bool { return fc.w == fc2.w } -func TestSubscribeEventsAndUnsubscribe(t *testing.T) { - t.Parallel() - log := utils.NewNopZapLogger() - n := utils.Ptr(utils.Mainnet) - client := feeder.NewTestClient(t, n) - gw := adaptfeeder.New(client) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - chain := blockchain.New(pebble.NewMemTest(t), n) - syncer := sync.New(chain, gw, log, 0, false) - handler := rpc.New(chain, syncer, nil, "", log) - - go func() { - require.NoError(t, handler.Run(ctx)) - }() - // Technically, there's a race between goroutine above and the SubscribeNewHeads call down below. - // Sleep for a moment just in case. - time.Sleep(50 * time.Millisecond) - - serverConn, clientConn := net.Pipe() - t.Cleanup(func() { - require.NoError(t, serverConn.Close()) - require.NoError(t, clientConn.Close()) - }) - - t.Run("Too many keys in filter", func(t *testing.T) { - keys := make([][]felt.Felt, 1024+1) - fromAddr := new(felt.Felt).SetBytes([]byte("from_address")) - id, rpcErr := handler.SubscribeEvents(ctx, fromAddr, keys, nil) - assert.Zero(t, id) - assert.Equal(t, rpc.ErrTooManyKeysInFilter, rpcErr) - }) - - // Todo: use mocks to fix the tests - t.Run("Too many blocks back", func(t *testing.T) { - keys := make([][]felt.Felt, 1) - fromAddr := new(felt.Felt).SetBytes([]byte("from_address")) - blockID := &rpc.BlockID{Number: 0} - id, rpcErr := handler.SubscribeEvents(ctx, fromAddr, keys, blockID) - assert.Zero(t, id) - assert.Equal(t, rpc.ErrTooManyBlocksBack, rpcErr) - }) -} - func TestSubscribeNewHeadsAndUnsubscribe(t *testing.T) { t.Parallel() log := utils.NewNopZapLogger() diff --git a/rpc/subscriptions.go b/rpc/subscriptions.go index 26c64ab215..2c9fbdf5ae 100644 --- a/rpc/subscriptions.go +++ b/rpc/subscriptions.go @@ -45,7 +45,7 @@ func (h *Handler) SubscribeEvents(ctx context.Context, fromAddr *felt.Felt, keys } // Todo: should the pending block be included in the head count? - if requestedHeader.Number >= maxBlocksBack && requestedHeader.Number <= headHeader.Number-maxBlocksBack { + if headHeader.Number >= maxBlocksBack && requestedHeader.Number <= headHeader.Number-maxBlocksBack { return nil, ErrTooManyBlocksBack } } diff --git a/rpc/subscriptions_test.go b/rpc/subscriptions_test.go new file mode 100644 index 0000000000..dc635d732b --- /dev/null +++ b/rpc/subscriptions_test.go @@ -0,0 +1,84 @@ +package rpc_test + +import ( + "context" + "net" + "testing" + + "github.com/NethermindEth/juno/core" + + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/jsonrpc" + "github.com/NethermindEth/juno/mocks" + "github.com/NethermindEth/juno/rpc" + "github.com/NethermindEth/juno/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSubscribeEventsAndUnsubscribe(t *testing.T) { + mockCtrl := gomock.NewController(t) + t.Cleanup(mockCtrl.Finish) + + mockChain := mocks.NewMockReader(mockCtrl) + mockSyncer := mocks.NewMockSyncReader(mockCtrl) + + log := utils.NewNopZapLogger() + handler := rpc.New(mockChain, mockSyncer, nil, "", log) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + t.Run("Too many keys in filter", func(t *testing.T) { + keys := make([][]felt.Felt, 1024+1) + fromAddr := new(felt.Felt).SetBytes([]byte("from_address")) + + serverConn, clientConn := net.Pipe() + t.Cleanup(func() { + require.NoError(t, serverConn.Close()) + require.NoError(t, clientConn.Close()) + }) + + subCtx := context.WithValue(ctx, jsonrpc.ConnKey{}, &fakeConn{w: serverConn}) + + id, rpcErr := handler.SubscribeEvents(subCtx, fromAddr, keys, nil) + assert.Zero(t, id) + assert.Equal(t, rpc.ErrTooManyKeysInFilter, rpcErr) + }) + + t.Run("Too many blocks back", func(t *testing.T) { + keys := make([][]felt.Felt, 1) + fromAddr := new(felt.Felt).SetBytes([]byte("from_address")) + blockID := &rpc.BlockID{Number: 0} + + serverConn, clientConn := net.Pipe() + t.Cleanup(func() { + require.NoError(t, serverConn.Close()) + require.NoError(t, clientConn.Close()) + }) + + subCtx := context.WithValue(ctx, jsonrpc.ConnKey{}, &fakeConn{w: serverConn}) + + // Note the end of the window doesn't need to be tested because if a requested number is more than the + // head a block not found error will be returned. This behaviour has been tested in various other test, and we + // don't need to test it here again. + t.Run("head is 1024", func(t *testing.T) { + mockChain.EXPECT().HeadsHeader().Return(&core.Header{Number: 1024}, nil) + mockChain.EXPECT().BlockHeaderByNumber(blockID.Number).Return(&core.Header{Number: 0}, nil) + + id, rpcErr := handler.SubscribeEvents(subCtx, fromAddr, keys, blockID) + assert.Zero(t, id) + assert.Equal(t, rpc.ErrTooManyBlocksBack, rpcErr) + }) + + t.Run("head is more than 1024", func(t *testing.T) { + mockChain.EXPECT().HeadsHeader().Return(&core.Header{Number: 2024}, nil) + mockChain.EXPECT().BlockHeaderByNumber(blockID.Number).Return(&core.Header{Number: 0}, nil) + + id, rpcErr := handler.SubscribeEvents(subCtx, fromAddr, keys, blockID) + assert.Zero(t, id) + assert.Equal(t, rpc.ErrTooManyBlocksBack, rpcErr) + }) + }) +}