From 909f644d766b315c86e64c934af750745055dbcc Mon Sep 17 00:00:00 2001 From: Cirrus Gai Date: Wed, 21 Dec 2022 16:32:26 +0800 Subject: [PATCH 01/37] feat: Add new query for the last checkpoint with a given status (#253) --- proto/babylon/checkpointing/query.proto | 50 +- testutil/datagen/raw_checkpoint.go | 10 + .../keeper/grpc_query_checkpoint.go | 48 +- .../keeper/grpc_query_checkpoint_test.go | 43 + x/checkpointing/types/querier.go | 4 + x/checkpointing/types/query.pb.go | 1521 ++++++----------- x/checkpointing/types/query.pb.gw.go | 184 -- x/checkpointing/types/types.go | 4 + 8 files changed, 635 insertions(+), 1229 deletions(-) diff --git a/proto/babylon/checkpointing/query.proto b/proto/babylon/checkpointing/query.proto index 0fb338e3a..c33dbd0f7 100644 --- a/proto/babylon/checkpointing/query.proto +++ b/proto/babylon/checkpointing/query.proto @@ -16,21 +16,12 @@ service Query { rpc RawCheckpointList(QueryRawCheckpointListRequest) returns (QueryRawCheckpointListResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/raw_checkpoints/{status}"; } - // RawCheckpointList queries a list of checkpoints starting from a given epoch number to the current epoch number. - rpc RecentRawCheckpointList(QueryRecentRawCheckpointListRequest) returns (QueryRecentRawCheckpointListResponse) { - option (google.api.http).get = "/babylon/checkpointing/v1/recent_raw_checkpoints/{from_epoch_num}"; - } // RawCheckpoint queries a checkpoints at a given epoch number. rpc RawCheckpoint(QueryRawCheckpointRequest) returns (QueryRawCheckpointResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/raw_checkpoint/{epoch_num}"; } - // LatestCheckpoint queries the checkpoint with the highest epoch num. - rpc LatestCheckpoint(QueryLatestCheckpointRequest) returns (QueryLatestCheckpointResponse) { - option (google.api.http).get = "/babylon/checkpointing/v1/latest_checkpoint"; - } - // BlsPublicKeyList queries a list of bls public keys of the validators at a given epoch number. rpc BlsPublicKeyList(QueryBlsPublicKeyListRequest) returns (QueryBlsPublicKeyListResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/bls_public_keys/{epoch_num}"; @@ -46,6 +37,9 @@ service Query { option (google.api.http).get = "/babylon/checkpointing/v1/epochs:status_count"; } + // LastCheckpointWithStatus queries the last checkpoint with a given status or a more matured status + rpc LastCheckpointWithStatus(QueryLastCheckpointWithStatusRequest) returns (QueryLastCheckpointWithStatusResponse); + // Parameters queries the parameters of the module. rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/params"; @@ -72,26 +66,6 @@ message QueryRawCheckpointListResponse { cosmos.base.query.v1beta1.PageResponse pagination = 2; } -// QueryRecentRawCheckpointListRequest is the request type for the Query/RecentRawCheckpoints -// RPC method. -message QueryRecentRawCheckpointListRequest { - // from_epoch defines the start epoch of the query, which is inclusive - uint64 from_epoch_num = 1; - - // pagination defines an optional pagination for the request. - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryRecentRawCheckpointListResponse is the response type for the Query/RecentRawCheckpoints -// RPC method. -message QueryRecentRawCheckpointListResponse { - // the order is going from the newest to oldest based on the epoch number - repeated RawCheckpointWithMeta raw_checkpoints = 1; - - // pagination defines the pagination in the response. - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - // QueryRawCheckpointRequest is the request type for the Query/RawCheckpoint // RPC method. message QueryRawCheckpointRequest { @@ -105,16 +79,6 @@ message QueryRawCheckpointResponse { RawCheckpointWithMeta raw_checkpoint = 1; } -// QueryLatestCheckpointRequest is the request type for the Query/LatestCheckpoint -// RPC method. -message QueryLatestCheckpointRequest {} - -// QueryLatestCheckpointResponse is the response type for the Query/LatestCheckpoint -// RPC method. -message QueryLatestCheckpointResponse { - RawCheckpointWithMeta latest_checkpoint = 1; -} - // QueryBlsPublicKeyListRequest is the request type for the Query/BlsPublicKeys // RPC method. message QueryBlsPublicKeyListRequest { @@ -161,6 +125,14 @@ message QueryRecentEpochStatusCountResponse { map status_count = 3; } +message QueryLastCheckpointWithStatusRequest { + CheckpointStatus status = 1; +} + +message QueryLastCheckpointWithStatusResponse { + RawCheckpoint raw_checkpoint = 1; +} + // QueryParamsRequest is request type for the Query/Params RPC method. message QueryParamsRequest {} diff --git a/testutil/datagen/raw_checkpoint.go b/testutil/datagen/raw_checkpoint.go index 8707ac35f..12db94241 100644 --- a/testutil/datagen/raw_checkpoint.go +++ b/testutil/datagen/raw_checkpoint.go @@ -82,6 +82,16 @@ func GenRandomSequenceRawCheckpointsWithMeta() []*types.RawCheckpointWithMeta { return checkpoints } +func GenSequenceRawCheckpointsWithMeta(tipEpoch uint64) []*types.RawCheckpointWithMeta { + ckpts := make([]*types.RawCheckpointWithMeta, int(tipEpoch)) + for e := uint64(0); e < tipEpoch; e++ { + ckpt := GenRandomRawCheckpointWithMeta() + ckpt.Ckpt.EpochNum = e + ckpts[int(e)] = ckpt + } + return ckpts +} + func GenerateBLSSigs(keys []bls12381.PrivateKey, msg []byte) []bls12381.Signature { var sigs []bls12381.Signature for _, privkey := range keys { diff --git a/x/checkpointing/keeper/grpc_query_checkpoint.go b/x/checkpointing/keeper/grpc_query_checkpoint.go index 108fb8bbc..685c63bb0 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint.go @@ -2,6 +2,7 @@ package keeper import ( "context" + "fmt" "github.com/babylonchain/babylon/x/checkpointing/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -80,10 +81,9 @@ func (k Keeper) RecentEpochStatusCount(ctx context.Context, req *types.QueryRece } sdkCtx := sdk.UnwrapSDKContext(ctx) - // minus 1 is because the current epoch is not finished - tipEpoch := k.GetEpoch(sdkCtx).EpochNumber - 1 - if tipEpoch < 0 { //nolint:staticcheck // uint64 doesn't go below zero but we want to let people know that's an invalid request. - return nil, status.Error(codes.InvalidArgument, "invalid request") + tipEpoch, err := k.GetLastCheckpointedEpoch(sdkCtx) + if err != nil { + return nil, fmt.Errorf("failed to get the last checkpointed epoch") } targetEpoch := tipEpoch - req.EpochCount + 1 if targetEpoch < 0 { //nolint:staticcheck // uint64 doesn't go below zero @@ -109,10 +109,42 @@ func (k Keeper) RecentEpochStatusCount(ctx context.Context, req *types.QueryRece }, nil } -func (k Keeper) RecentRawCheckpointList(c context.Context, req *types.QueryRecentRawCheckpointListRequest) (*types.QueryRecentRawCheckpointListResponse, error) { - panic("TODO: implement this") +// LastCheckpointWithStatus returns the last checkpoint with the given status +// if the checkpoint with the given status does not exist, return the last +// checkpoint that is more mature than the given status +func (k Keeper) LastCheckpointWithStatus(ctx context.Context, req *types.QueryLastCheckpointWithStatusRequest) (*types.QueryLastCheckpointWithStatusResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + tipCheckpointedEpoch, err := k.GetLastCheckpointedEpoch(sdkCtx) + if err != nil { + return nil, fmt.Errorf("failed to get the last checkpointed epoch number: %w", err) + } + for e := int(tipCheckpointedEpoch); e >= 0; e-- { + ckpt, err := k.GetRawCheckpoint(sdkCtx, uint64(e)) + if err != nil { + return nil, fmt.Errorf("failed to get the raw checkpoint at epoch %v: %w", e, err) + } + if ckpt.Status == req.Status || ckpt.IsMoreMatureThanStatus(req.Status) { + return &types.QueryLastCheckpointWithStatusResponse{RawCheckpoint: ckpt.Ckpt}, nil + } + } + return nil, fmt.Errorf("cannot find checkpoint with status %v", req.Status) } -func (k Keeper) LatestCheckpoint(c context.Context, req *types.QueryLatestCheckpointRequest) (*types.QueryLatestCheckpointResponse, error) { - panic("TODO: implement this") +// GetLastCheckpointedEpoch returns the last epoch number that associates with a checkpoint +func (k Keeper) GetLastCheckpointedEpoch(ctx sdk.Context) (uint64, error) { + curEpoch := k.GetEpoch(ctx).EpochNumber + if curEpoch <= 0 { + return 0, fmt.Errorf("current epoch should be more than 0") + } + // minus 1 is because the current epoch is not finished + tipEpoch := curEpoch - 1 + _, err := k.GetRawCheckpoint(ctx, tipEpoch) + if err != nil { + return 0, fmt.Errorf("cannot get raw checkpoint at epoch %v", tipEpoch) + } + return tipEpoch, nil } diff --git a/x/checkpointing/keeper/grpc_query_checkpoint_test.go b/x/checkpointing/keeper/grpc_query_checkpoint_test.go index 00c55378a..fb0e98267 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint_test.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint_test.go @@ -82,3 +82,46 @@ func FuzzQueryStatusCount(f *testing.F) { require.Equal(t, expectedResp, resp) }) } + +func FuzzQueryLastCheckpointWithStatus(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + // test querying recent epoch counts with each status in recent epochs + tipEpoch := datagen.RandomInt(100) + 10 + ctrl := gomock.NewController(t) + defer ctrl.Finish() + ek := mocks.NewMockEpochingKeeper(ctrl) + ek.EXPECT().GetEpoch(gomock.Any()).Return(&epochingtypes.Epoch{EpochNumber: tipEpoch}).AnyTimes() + ckptKeeper, ctx, _ := testkeeper.CheckpointingKeeper(t, ek, nil, client.Context{}) + checkpoints := datagen.GenSequenceRawCheckpointsWithMeta(tipEpoch) + finalizedEpoch := datagen.RandomInt(int(tipEpoch)) + for e := uint64(0); e < tipEpoch; e++ { + if e <= finalizedEpoch { + checkpoints[int(e)].Status = types.Finalized + } else { + checkpoints[int(e)].Status = types.Sealed + } + err := ckptKeeper.AddRawCheckpoint(ctx, checkpoints[int(e)]) + require.NoError(t, err) + } + // request the last finalized checkpoint + req := types.NewQueryLastCheckpointWithStatus(types.Finalized) + expectedResp := &types.QueryLastCheckpointWithStatusResponse{ + RawCheckpoint: checkpoints[int(finalizedEpoch)].Ckpt, + } + resp, err := ckptKeeper.LastCheckpointWithStatus(ctx, req) + require.NoError(t, err) + require.Equal(t, expectedResp, resp) + + // request the last confirmed checkpoint + req = types.NewQueryLastCheckpointWithStatus(types.Confirmed) + expectedResp = &types.QueryLastCheckpointWithStatusResponse{ + RawCheckpoint: checkpoints[int(finalizedEpoch)].Ckpt, + } + resp, err = ckptKeeper.LastCheckpointWithStatus(ctx, req) + require.NoError(t, err) + require.Equal(t, expectedResp, resp) + }) +} diff --git a/x/checkpointing/types/querier.go b/x/checkpointing/types/querier.go index 984fcb043..b27ede678 100644 --- a/x/checkpointing/types/querier.go +++ b/x/checkpointing/types/querier.go @@ -22,3 +22,7 @@ func NewQueryEpochStatusRequest(epochNum uint64) *QueryEpochStatusRequest { func NewQueryRecentEpochStatusCountRequest(epochNum uint64) *QueryRecentEpochStatusCountRequest { return &QueryRecentEpochStatusCountRequest{EpochCount: epochNum} } + +func NewQueryLastCheckpointWithStatus(status CheckpointStatus) *QueryLastCheckpointWithStatusRequest { + return &QueryLastCheckpointWithStatusRequest{Status: status} +} diff --git a/x/checkpointing/types/query.pb.go b/x/checkpointing/types/query.pb.go index 428e18dca..06abf8cf7 100644 --- a/x/checkpointing/types/query.pb.go +++ b/x/checkpointing/types/query.pb.go @@ -142,118 +142,6 @@ func (m *QueryRawCheckpointListResponse) GetPagination() *query.PageResponse { return nil } -// QueryRecentRawCheckpointListRequest is the request type for the Query/RecentRawCheckpoints -// RPC method. -type QueryRecentRawCheckpointListRequest struct { - // from_epoch defines the start epoch of the query, which is inclusive - FromEpochNum uint64 `protobuf:"varint,1,opt,name=from_epoch_num,json=fromEpochNum,proto3" json:"from_epoch_num,omitempty"` - // pagination defines an optional pagination for the request. - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryRecentRawCheckpointListRequest) Reset() { *m = QueryRecentRawCheckpointListRequest{} } -func (m *QueryRecentRawCheckpointListRequest) String() string { return proto.CompactTextString(m) } -func (*QueryRecentRawCheckpointListRequest) ProtoMessage() {} -func (*QueryRecentRawCheckpointListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{2} -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryRecentRawCheckpointListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRecentRawCheckpointListRequest.Merge(m, src) -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryRecentRawCheckpointListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRecentRawCheckpointListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRecentRawCheckpointListRequest proto.InternalMessageInfo - -func (m *QueryRecentRawCheckpointListRequest) GetFromEpochNum() uint64 { - if m != nil { - return m.FromEpochNum - } - return 0 -} - -func (m *QueryRecentRawCheckpointListRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryRecentRawCheckpointListResponse is the response type for the Query/RecentRawCheckpoints -// RPC method. -type QueryRecentRawCheckpointListResponse struct { - // the order is going from the newest to oldest based on the epoch number - RawCheckpoints []*RawCheckpointWithMeta `protobuf:"bytes,1,rep,name=raw_checkpoints,json=rawCheckpoints,proto3" json:"raw_checkpoints,omitempty"` - // pagination defines the pagination in the response. - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryRecentRawCheckpointListResponse) Reset() { *m = QueryRecentRawCheckpointListResponse{} } -func (m *QueryRecentRawCheckpointListResponse) String() string { return proto.CompactTextString(m) } -func (*QueryRecentRawCheckpointListResponse) ProtoMessage() {} -func (*QueryRecentRawCheckpointListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{3} -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryRecentRawCheckpointListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRecentRawCheckpointListResponse.Merge(m, src) -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryRecentRawCheckpointListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRecentRawCheckpointListResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRecentRawCheckpointListResponse proto.InternalMessageInfo - -func (m *QueryRecentRawCheckpointListResponse) GetRawCheckpoints() []*RawCheckpointWithMeta { - if m != nil { - return m.RawCheckpoints - } - return nil -} - -func (m *QueryRecentRawCheckpointListResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - // QueryRawCheckpointRequest is the request type for the Query/RawCheckpoint // RPC method. type QueryRawCheckpointRequest struct { @@ -265,7 +153,7 @@ func (m *QueryRawCheckpointRequest) Reset() { *m = QueryRawCheckpointReq func (m *QueryRawCheckpointRequest) String() string { return proto.CompactTextString(m) } func (*QueryRawCheckpointRequest) ProtoMessage() {} func (*QueryRawCheckpointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{4} + return fileDescriptor_a0fdb8f0f85bb51e, []int{2} } func (m *QueryRawCheckpointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -311,7 +199,7 @@ func (m *QueryRawCheckpointResponse) Reset() { *m = QueryRawCheckpointRe func (m *QueryRawCheckpointResponse) String() string { return proto.CompactTextString(m) } func (*QueryRawCheckpointResponse) ProtoMessage() {} func (*QueryRawCheckpointResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{5} + return fileDescriptor_a0fdb8f0f85bb51e, []int{3} } func (m *QueryRawCheckpointResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -347,90 +235,6 @@ func (m *QueryRawCheckpointResponse) GetRawCheckpoint() *RawCheckpointWithMeta { return nil } -// QueryLatestCheckpointRequest is the request type for the Query/LatestCheckpoint -// RPC method. -type QueryLatestCheckpointRequest struct { -} - -func (m *QueryLatestCheckpointRequest) Reset() { *m = QueryLatestCheckpointRequest{} } -func (m *QueryLatestCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLatestCheckpointRequest) ProtoMessage() {} -func (*QueryLatestCheckpointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{6} -} -func (m *QueryLatestCheckpointRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLatestCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLatestCheckpointRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLatestCheckpointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLatestCheckpointRequest.Merge(m, src) -} -func (m *QueryLatestCheckpointRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLatestCheckpointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLatestCheckpointRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLatestCheckpointRequest proto.InternalMessageInfo - -// QueryLatestCheckpointResponse is the response type for the Query/LatestCheckpoint -// RPC method. -type QueryLatestCheckpointResponse struct { - LatestCheckpoint *RawCheckpointWithMeta `protobuf:"bytes,1,opt,name=latest_checkpoint,json=latestCheckpoint,proto3" json:"latest_checkpoint,omitempty"` -} - -func (m *QueryLatestCheckpointResponse) Reset() { *m = QueryLatestCheckpointResponse{} } -func (m *QueryLatestCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLatestCheckpointResponse) ProtoMessage() {} -func (*QueryLatestCheckpointResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{7} -} -func (m *QueryLatestCheckpointResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLatestCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLatestCheckpointResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLatestCheckpointResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLatestCheckpointResponse.Merge(m, src) -} -func (m *QueryLatestCheckpointResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLatestCheckpointResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLatestCheckpointResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLatestCheckpointResponse proto.InternalMessageInfo - -func (m *QueryLatestCheckpointResponse) GetLatestCheckpoint() *RawCheckpointWithMeta { - if m != nil { - return m.LatestCheckpoint - } - return nil -} - // QueryBlsPublicKeyListRequest is the request type for the Query/BlsPublicKeys // RPC method. type QueryBlsPublicKeyListRequest struct { @@ -444,7 +248,7 @@ func (m *QueryBlsPublicKeyListRequest) Reset() { *m = QueryBlsPublicKeyL func (m *QueryBlsPublicKeyListRequest) String() string { return proto.CompactTextString(m) } func (*QueryBlsPublicKeyListRequest) ProtoMessage() {} func (*QueryBlsPublicKeyListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{8} + return fileDescriptor_a0fdb8f0f85bb51e, []int{4} } func (m *QueryBlsPublicKeyListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +303,7 @@ func (m *QueryBlsPublicKeyListResponse) Reset() { *m = QueryBlsPublicKey func (m *QueryBlsPublicKeyListResponse) String() string { return proto.CompactTextString(m) } func (*QueryBlsPublicKeyListResponse) ProtoMessage() {} func (*QueryBlsPublicKeyListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{9} + return fileDescriptor_a0fdb8f0f85bb51e, []int{5} } func (m *QueryBlsPublicKeyListResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -552,7 +356,7 @@ func (m *QueryEpochStatusRequest) Reset() { *m = QueryEpochStatusRequest func (m *QueryEpochStatusRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochStatusRequest) ProtoMessage() {} func (*QueryEpochStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{10} + return fileDescriptor_a0fdb8f0f85bb51e, []int{6} } func (m *QueryEpochStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -598,7 +402,7 @@ func (m *QueryEpochStatusResponse) Reset() { *m = QueryEpochStatusRespon func (m *QueryEpochStatusResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochStatusResponse) ProtoMessage() {} func (*QueryEpochStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{11} + return fileDescriptor_a0fdb8f0f85bb51e, []int{7} } func (m *QueryEpochStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -645,7 +449,7 @@ func (m *QueryRecentEpochStatusCountRequest) Reset() { *m = QueryRecentE func (m *QueryRecentEpochStatusCountRequest) String() string { return proto.CompactTextString(m) } func (*QueryRecentEpochStatusCountRequest) ProtoMessage() {} func (*QueryRecentEpochStatusCountRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{12} + return fileDescriptor_a0fdb8f0f85bb51e, []int{8} } func (m *QueryRecentEpochStatusCountRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -693,7 +497,7 @@ func (m *QueryRecentEpochStatusCountResponse) Reset() { *m = QueryRecent func (m *QueryRecentEpochStatusCountResponse) String() string { return proto.CompactTextString(m) } func (*QueryRecentEpochStatusCountResponse) ProtoMessage() {} func (*QueryRecentEpochStatusCountResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{13} + return fileDescriptor_a0fdb8f0f85bb51e, []int{9} } func (m *QueryRecentEpochStatusCountResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -743,6 +547,94 @@ func (m *QueryRecentEpochStatusCountResponse) GetStatusCount() map[string]uint64 return nil } +type QueryLastCheckpointWithStatusRequest struct { + Status CheckpointStatus `protobuf:"varint,1,opt,name=status,proto3,enum=babylon.checkpointing.v1.CheckpointStatus" json:"status,omitempty"` +} + +func (m *QueryLastCheckpointWithStatusRequest) Reset() { *m = QueryLastCheckpointWithStatusRequest{} } +func (m *QueryLastCheckpointWithStatusRequest) String() string { return proto.CompactTextString(m) } +func (*QueryLastCheckpointWithStatusRequest) ProtoMessage() {} +func (*QueryLastCheckpointWithStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0fdb8f0f85bb51e, []int{10} +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLastCheckpointWithStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLastCheckpointWithStatusRequest.Merge(m, src) +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLastCheckpointWithStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLastCheckpointWithStatusRequest proto.InternalMessageInfo + +func (m *QueryLastCheckpointWithStatusRequest) GetStatus() CheckpointStatus { + if m != nil { + return m.Status + } + return Accumulating +} + +type QueryLastCheckpointWithStatusResponse struct { + RawCheckpoint *RawCheckpoint `protobuf:"bytes,1,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` +} + +func (m *QueryLastCheckpointWithStatusResponse) Reset() { *m = QueryLastCheckpointWithStatusResponse{} } +func (m *QueryLastCheckpointWithStatusResponse) String() string { return proto.CompactTextString(m) } +func (*QueryLastCheckpointWithStatusResponse) ProtoMessage() {} +func (*QueryLastCheckpointWithStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0fdb8f0f85bb51e, []int{11} +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLastCheckpointWithStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLastCheckpointWithStatusResponse.Merge(m, src) +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLastCheckpointWithStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLastCheckpointWithStatusResponse proto.InternalMessageInfo + +func (m *QueryLastCheckpointWithStatusResponse) GetRawCheckpoint() *RawCheckpoint { + if m != nil { + return m.RawCheckpoint + } + return nil +} + // QueryParamsRequest is request type for the Query/Params RPC method. type QueryParamsRequest struct { } @@ -751,7 +643,7 @@ func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } func (*QueryParamsRequest) ProtoMessage() {} func (*QueryParamsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{14} + return fileDescriptor_a0fdb8f0f85bb51e, []int{12} } func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -790,7 +682,7 @@ func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } func (*QueryParamsResponse) ProtoMessage() {} func (*QueryParamsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{15} + return fileDescriptor_a0fdb8f0f85bb51e, []int{13} } func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -829,12 +721,8 @@ func (m *QueryParamsResponse) GetParams() Params { func init() { proto.RegisterType((*QueryRawCheckpointListRequest)(nil), "babylon.checkpointing.v1.QueryRawCheckpointListRequest") proto.RegisterType((*QueryRawCheckpointListResponse)(nil), "babylon.checkpointing.v1.QueryRawCheckpointListResponse") - proto.RegisterType((*QueryRecentRawCheckpointListRequest)(nil), "babylon.checkpointing.v1.QueryRecentRawCheckpointListRequest") - proto.RegisterType((*QueryRecentRawCheckpointListResponse)(nil), "babylon.checkpointing.v1.QueryRecentRawCheckpointListResponse") proto.RegisterType((*QueryRawCheckpointRequest)(nil), "babylon.checkpointing.v1.QueryRawCheckpointRequest") proto.RegisterType((*QueryRawCheckpointResponse)(nil), "babylon.checkpointing.v1.QueryRawCheckpointResponse") - proto.RegisterType((*QueryLatestCheckpointRequest)(nil), "babylon.checkpointing.v1.QueryLatestCheckpointRequest") - proto.RegisterType((*QueryLatestCheckpointResponse)(nil), "babylon.checkpointing.v1.QueryLatestCheckpointResponse") proto.RegisterType((*QueryBlsPublicKeyListRequest)(nil), "babylon.checkpointing.v1.QueryBlsPublicKeyListRequest") proto.RegisterType((*QueryBlsPublicKeyListResponse)(nil), "babylon.checkpointing.v1.QueryBlsPublicKeyListResponse") proto.RegisterType((*QueryEpochStatusRequest)(nil), "babylon.checkpointing.v1.QueryEpochStatusRequest") @@ -842,6 +730,8 @@ func init() { proto.RegisterType((*QueryRecentEpochStatusCountRequest)(nil), "babylon.checkpointing.v1.QueryRecentEpochStatusCountRequest") proto.RegisterType((*QueryRecentEpochStatusCountResponse)(nil), "babylon.checkpointing.v1.QueryRecentEpochStatusCountResponse") proto.RegisterMapType((map[string]uint64)(nil), "babylon.checkpointing.v1.QueryRecentEpochStatusCountResponse.StatusCountEntry") + proto.RegisterType((*QueryLastCheckpointWithStatusRequest)(nil), "babylon.checkpointing.v1.QueryLastCheckpointWithStatusRequest") + proto.RegisterType((*QueryLastCheckpointWithStatusResponse)(nil), "babylon.checkpointing.v1.QueryLastCheckpointWithStatusResponse") proto.RegisterType((*QueryParamsRequest)(nil), "babylon.checkpointing.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.checkpointing.v1.QueryParamsResponse") } @@ -849,72 +739,67 @@ func init() { func init() { proto.RegisterFile("babylon/checkpointing/query.proto", fileDescriptor_a0fdb8f0f85bb51e) } var fileDescriptor_a0fdb8f0f85bb51e = []byte{ - // 1032 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x97, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0x33, 0x4e, 0x1b, 0x91, 0xe7, 0x34, 0xb8, 0x43, 0x44, 0x82, 0x5b, 0xdc, 0xb0, 0xad, - 0x4a, 0x54, 0xc8, 0xae, 0xec, 0xfc, 0x54, 0x68, 0x22, 0xe1, 0x28, 0x20, 0xd4, 0x52, 0xc2, 0x22, - 0x0a, 0x42, 0x08, 0x6b, 0xec, 0x0e, 0xf6, 0x2a, 0xeb, 0x9d, 0x8d, 0x77, 0xd6, 0xc1, 0xaa, 0x72, - 0x29, 0x7f, 0x00, 0x48, 0x95, 0xf8, 0x27, 0x38, 0x71, 0xe3, 0x0c, 0x97, 0x22, 0x21, 0x54, 0x89, - 0x0b, 0x27, 0x84, 0x12, 0xfe, 0x10, 0xb4, 0xb3, 0xb3, 0xb1, 0x77, 0xd7, 0x13, 0xdb, 0xc1, 0x97, - 0xde, 0x36, 0x6f, 0xdf, 0x9b, 0xf9, 0x7c, 0xbf, 0x3b, 0xf3, 0x5e, 0x0c, 0x6f, 0x54, 0x49, 0xb5, - 0x63, 0x33, 0xc7, 0xa8, 0x35, 0x68, 0xed, 0xc0, 0x65, 0x96, 0xc3, 0x2d, 0xa7, 0x6e, 0x1c, 0xfa, - 0xb4, 0xd5, 0xd1, 0xdd, 0x16, 0xe3, 0x0c, 0x2f, 0xc8, 0x14, 0x3d, 0x96, 0xa2, 0xb7, 0x8b, 0xf9, - 0x9b, 0xfd, 0x8b, 0xab, 0xb6, 0x57, 0x39, 0xa0, 0xb2, 0x3c, 0x7f, 0xa7, 0xc6, 0xbc, 0x26, 0xf3, - 0x8c, 0x2a, 0xf1, 0x68, 0xb8, 0xae, 0xd1, 0x2e, 0x56, 0x29, 0x27, 0x45, 0xc3, 0x25, 0x75, 0xcb, - 0x21, 0xdc, 0x62, 0x8e, 0xcc, 0x9d, 0xab, 0xb3, 0x3a, 0x13, 0x8f, 0x46, 0xf0, 0x24, 0xa3, 0xd7, - 0xeb, 0x8c, 0xd5, 0x6d, 0x6a, 0x10, 0xd7, 0x32, 0x88, 0xe3, 0x30, 0x2e, 0x4a, 0x3c, 0xf9, 0x56, - 0xeb, 0x0f, 0xe1, 0x92, 0x16, 0x69, 0x46, 0x39, 0xb7, 0xfb, 0xe7, 0x74, 0xff, 0x0a, 0xf3, 0xb4, - 0x1f, 0x11, 0xbc, 0xfe, 0x71, 0x80, 0x68, 0x92, 0xa3, 0xdd, 0xb3, 0x97, 0xf7, 0x2d, 0x8f, 0x9b, - 0xf4, 0xd0, 0xa7, 0x1e, 0xc7, 0x65, 0x98, 0xf2, 0x38, 0xe1, 0xbe, 0xb7, 0x80, 0x16, 0xd1, 0xd2, - 0x6c, 0xe9, 0x8e, 0xae, 0x72, 0x47, 0xef, 0x2e, 0xf0, 0x89, 0xa8, 0x30, 0x65, 0x25, 0x7e, 0x0f, - 0xa0, 0xab, 0x7c, 0x21, 0xb3, 0x88, 0x96, 0xb2, 0xa5, 0xdb, 0x7a, 0x68, 0x93, 0x1e, 0xd8, 0xa4, - 0x87, 0xf6, 0x4b, 0x9b, 0xf4, 0x7d, 0x52, 0xa7, 0x72, 0x7f, 0xb3, 0xa7, 0x52, 0xfb, 0x15, 0x41, - 0x41, 0x45, 0xeb, 0xb9, 0xcc, 0xf1, 0x28, 0xfe, 0x1c, 0x5e, 0x6e, 0x91, 0xa3, 0x4a, 0x97, 0x2d, - 0xe0, 0x9e, 0x5c, 0xca, 0x96, 0x0c, 0x35, 0x77, 0x6c, 0xb5, 0xcf, 0x2c, 0xde, 0xf8, 0x90, 0x72, - 0x62, 0xce, 0xb6, 0x7a, 0xc3, 0x1e, 0x7e, 0xbf, 0x8f, 0x88, 0x37, 0x07, 0x8a, 0x08, 0xb1, 0x62, - 0x2a, 0x9e, 0x22, 0xb8, 0x19, 0xaa, 0xa0, 0x35, 0xea, 0x70, 0xa5, 0xf3, 0xb7, 0x60, 0xf6, 0xeb, - 0x16, 0x6b, 0x56, 0xa8, 0xcb, 0x6a, 0x8d, 0x8a, 0xe3, 0x37, 0xc5, 0x17, 0xb8, 0x64, 0xce, 0x04, - 0xd1, 0xbd, 0x20, 0xf8, 0xc0, 0x6f, 0x8e, 0xcd, 0xdb, 0xdf, 0x10, 0xdc, 0x3a, 0x9f, 0xea, 0xc5, - 0x71, 0x78, 0x13, 0x5e, 0x4b, 0x1f, 0x93, 0xc8, 0xd6, 0x6b, 0x30, 0x9d, 0x74, 0xf4, 0x25, 0x2a, - 0xdd, 0xd4, 0x38, 0xe4, 0xfb, 0x55, 0x4a, 0xe9, 0x0f, 0x61, 0x36, 0x2e, 0x5d, 0xd4, 0x5f, 0x40, - 0xf9, 0x95, 0x98, 0x72, 0xad, 0x00, 0xd7, 0xc5, 0xae, 0xf7, 0x09, 0xa7, 0x1e, 0x4f, 0x21, 0x6b, - 0xc7, 0xf2, 0x92, 0xa6, 0xdf, 0x4b, 0xb0, 0x2f, 0xe1, 0xaa, 0x2d, 0xde, 0x8d, 0x81, 0x2d, 0x67, - 0x27, 0x76, 0xd1, 0xbe, 0x45, 0x92, 0xaf, 0x6c, 0x7b, 0xfb, 0x7e, 0xd5, 0xb6, 0x6a, 0xf7, 0x68, - 0xa7, 0xf7, 0xa4, 0x9e, 0x67, 0xe9, 0xd8, 0x0e, 0xe8, 0x1f, 0x51, 0xab, 0x4a, 0x53, 0x48, 0x17, - 0x1e, 0xc1, 0x7c, 0x9b, 0xd8, 0xd6, 0x23, 0xc2, 0x59, 0xab, 0x72, 0x64, 0xf1, 0x46, 0x45, 0x36, - 0xe6, 0xe8, 0x84, 0x2e, 0xab, 0xbd, 0x78, 0x18, 0x15, 0x06, 0x3e, 0x94, 0x6d, 0xef, 0x1e, 0xed, - 0x98, 0x73, 0xed, 0x74, 0x70, 0x8c, 0xa7, 0x74, 0x1d, 0xe6, 0x85, 0x1e, 0x71, 0x95, 0x65, 0xc7, - 0x1c, 0xe6, 0x8c, 0x7e, 0x05, 0x0b, 0xe9, 0x3a, 0x69, 0xc1, 0x18, 0xba, 0xb5, 0xb6, 0x07, 0x5a, - 0x4f, 0x23, 0xe8, 0xd9, 0x65, 0x97, 0xf9, 0xdd, 0x6b, 0x74, 0x03, 0xb2, 0x21, 0x62, 0x2d, 0x88, - 0x4a, 0x48, 0x10, 0x21, 0x91, 0xa7, 0xfd, 0x90, 0x89, 0xb5, 0xb9, 0xf4, 0x3a, 0x12, 0xf9, 0x1a, - 0x4c, 0x73, 0xcb, 0x0d, 0xbb, 0x5c, 0xa4, 0x95, 0x5b, 0xae, 0xc8, 0x4f, 0xee, 0x92, 0x49, 0xee, - 0x82, 0x0f, 0x61, 0x26, 0xc4, 0x96, 0x19, 0x93, 0xe2, 0x43, 0x3f, 0x50, 0xcb, 0x1e, 0x02, 0x49, - 0xef, 0x89, 0xed, 0x39, 0xbc, 0xd5, 0x31, 0xb3, 0x5e, 0x37, 0x92, 0xdf, 0x81, 0x5c, 0x32, 0x01, - 0xe7, 0x60, 0xf2, 0x80, 0x76, 0x04, 0xfe, 0xb4, 0x19, 0x3c, 0xe2, 0x39, 0xb8, 0xdc, 0x26, 0xb6, - 0x4f, 0x25, 0x73, 0xf8, 0xc7, 0x56, 0x66, 0x13, 0x69, 0x73, 0x80, 0x05, 0xc4, 0xbe, 0x18, 0xd8, - 0xd1, 0x1d, 0xff, 0x14, 0x5e, 0x89, 0x45, 0xa5, 0x3b, 0x3b, 0x30, 0x15, 0x0e, 0x76, 0x79, 0x9d, - 0x17, 0xd5, 0xca, 0xc2, 0xca, 0xf2, 0xa5, 0x67, 0x7f, 0xdf, 0x98, 0x30, 0x65, 0x55, 0xe9, 0xc9, - 0x0c, 0x5c, 0x16, 0xeb, 0xe2, 0x5f, 0x10, 0x5c, 0x4d, 0x75, 0x75, 0xbc, 0x31, 0xc8, 0x29, 0xc5, - 0x74, 0xca, 0x6f, 0x8e, 0x5e, 0x18, 0x4a, 0xd2, 0xb6, 0x9e, 0xfc, 0xf9, 0xef, 0xd3, 0xcc, 0x2a, - 0x2e, 0x19, 0xfd, 0xff, 0x49, 0x69, 0x17, 0x8d, 0xc4, 0x80, 0x31, 0x1e, 0x87, 0xfe, 0x1f, 0xe3, - 0x53, 0x04, 0xf3, 0x8a, 0x01, 0x85, 0xb7, 0x87, 0xfa, 0xe8, 0x4a, 0x41, 0x3b, 0x17, 0x2d, 0x97, - 0xb2, 0x3e, 0x10, 0xb2, 0x76, 0xf1, 0xbb, 0xe7, 0xc8, 0x12, 0x4b, 0x54, 0x52, 0xea, 0xe2, 0x63, - 0xfe, 0x18, 0xff, 0x8c, 0xe0, 0x4a, 0x6c, 0x23, 0xbc, 0x32, 0x8a, 0xdb, 0x91, 0xa2, 0xd5, 0xd1, - 0x8a, 0xa4, 0x8e, 0xbb, 0x42, 0xc7, 0x3a, 0x5e, 0x1d, 0xf6, 0xf3, 0x18, 0x8f, 0xe3, 0xe8, 0xb9, - 0xe4, 0x98, 0xc2, 0xeb, 0x03, 0x40, 0x14, 0x73, 0x2f, 0xbf, 0x31, 0x72, 0x9d, 0xd4, 0xb0, 0x22, - 0x34, 0x2c, 0xe3, 0xb7, 0xd4, 0x1a, 0x52, 0xf3, 0x32, 0xb8, 0x20, 0xb9, 0xe4, 0x6c, 0x19, 0x88, - 0xae, 0x18, 0x89, 0x03, 0xd1, 0x55, 0x43, 0x4c, 0xdb, 0x16, 0xe8, 0x1b, 0x78, 0x4d, 0x8d, 0x1e, - 0x4c, 0x35, 0x57, 0x14, 0x8b, 0xe1, 0x16, 0xf3, 0xff, 0x27, 0x04, 0xd9, 0x9e, 0xbe, 0x86, 0x8b, - 0x03, 0x38, 0xd2, 0xc3, 0x27, 0x5f, 0x1a, 0xa5, 0x44, 0x52, 0xbf, 0x23, 0xa8, 0xd7, 0xf0, 0x8a, - 0x9a, 0x5a, 0x40, 0xc6, 0x60, 0x0d, 0xf9, 0xf3, 0xe0, 0x77, 0x04, 0xaf, 0xf6, 0xef, 0xc8, 0xf8, - 0xee, 0x05, 0x1b, 0x79, 0xa8, 0x64, 0xfb, 0x7f, 0x8d, 0x01, 0x6d, 0x4d, 0x88, 0x32, 0xf0, 0xf2, - 0x20, 0x51, 0x5b, 0xbd, 0x23, 0x08, 0x7f, 0x87, 0x60, 0x2a, 0xec, 0xc5, 0xf8, 0xed, 0x01, 0x00, - 0xb1, 0x11, 0x90, 0x5f, 0x1e, 0x32, 0x5b, 0xe2, 0x2d, 0x09, 0x3c, 0x0d, 0x2f, 0xaa, 0xf1, 0xc2, - 0x21, 0x50, 0xfe, 0xe8, 0xd9, 0x49, 0x01, 0x3d, 0x3f, 0x29, 0xa0, 0x7f, 0x4e, 0x0a, 0xe8, 0xfb, - 0xd3, 0xc2, 0xc4, 0xf3, 0xd3, 0xc2, 0xc4, 0x5f, 0xa7, 0x85, 0x89, 0x2f, 0xd6, 0xea, 0x16, 0x6f, - 0xf8, 0x55, 0xbd, 0xc6, 0x9a, 0xd1, 0x2a, 0xb5, 0x06, 0xb1, 0x9c, 0xb3, 0x25, 0xbf, 0x49, 0x2c, - 0xca, 0x3b, 0x2e, 0xf5, 0xaa, 0x53, 0xe2, 0xd7, 0xe3, 0xca, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x91, 0xee, 0xc3, 0xd0, 0x4d, 0x0f, 0x00, 0x00, + // 959 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xce, 0x38, 0x6d, 0x44, 0x9e, 0x69, 0x08, 0x43, 0x44, 0xcd, 0xb6, 0xb8, 0x61, 0x0b, 0x6d, + 0x54, 0x91, 0x5d, 0xd9, 0xf9, 0xa9, 0xd0, 0x06, 0xc9, 0x55, 0xe0, 0xd0, 0x12, 0xc2, 0x22, 0x0a, + 0xe2, 0x80, 0x35, 0xde, 0x8e, 0xec, 0x25, 0xf6, 0xce, 0xc6, 0x33, 0xeb, 0x60, 0x55, 0xbd, 0xc0, + 0x1f, 0x00, 0x12, 0x12, 0x17, 0xfe, 0x04, 0x4e, 0xdc, 0x38, 0xc3, 0xa5, 0x07, 0x84, 0x2a, 0x71, + 0xe1, 0x84, 0x50, 0xc2, 0xff, 0x01, 0xda, 0x99, 0xd9, 0xd8, 0x6b, 0x7b, 0xbb, 0x76, 0xea, 0xdb, + 0x7a, 0xf6, 0x7d, 0xef, 0x7d, 0xdf, 0x9b, 0xb7, 0xdf, 0x4b, 0xe0, 0x8d, 0x1a, 0xa9, 0x75, 0x9b, + 0xcc, 0xb7, 0xdd, 0x06, 0x75, 0x0f, 0x03, 0xe6, 0xf9, 0xc2, 0xf3, 0xeb, 0xf6, 0x51, 0x48, 0xdb, + 0x5d, 0x2b, 0x68, 0x33, 0xc1, 0x70, 0x41, 0x87, 0x58, 0x89, 0x10, 0xab, 0x53, 0x32, 0xae, 0x8f, + 0x06, 0xd7, 0x9a, 0xbc, 0x7a, 0x48, 0x35, 0xdc, 0xb8, 0xe5, 0x32, 0xde, 0x62, 0xdc, 0xae, 0x11, + 0x4e, 0x55, 0x5e, 0xbb, 0x53, 0xaa, 0x51, 0x41, 0x4a, 0x76, 0x40, 0xea, 0x9e, 0x4f, 0x84, 0xc7, + 0x7c, 0x1d, 0xbb, 0x54, 0x67, 0x75, 0x26, 0x1f, 0xed, 0xe8, 0x49, 0x9f, 0x5e, 0xad, 0x33, 0x56, + 0x6f, 0x52, 0x9b, 0x04, 0x9e, 0x4d, 0x7c, 0x9f, 0x09, 0x09, 0xe1, 0xfa, 0xad, 0x39, 0x9a, 0x44, + 0x40, 0xda, 0xa4, 0x15, 0xc7, 0xdc, 0x18, 0x1d, 0xd3, 0xfb, 0xa5, 0xe2, 0xcc, 0x9f, 0x10, 0xbc, + 0xfe, 0x51, 0x44, 0xd1, 0x21, 0xc7, 0x77, 0xcf, 0x5e, 0xde, 0xf7, 0xb8, 0x70, 0xe8, 0x51, 0x48, + 0xb9, 0xc0, 0x15, 0x98, 0xe3, 0x82, 0x88, 0x90, 0x17, 0xd0, 0x32, 0x5a, 0x59, 0x28, 0xdf, 0xb2, + 0xd2, 0xba, 0x63, 0xf5, 0x12, 0x7c, 0x2c, 0x11, 0x8e, 0x46, 0xe2, 0xf7, 0x00, 0x7a, 0xca, 0x0b, + 0xb9, 0x65, 0xb4, 0x92, 0x2f, 0xdf, 0xb0, 0x54, 0x9b, 0xac, 0xa8, 0x4d, 0x96, 0x6a, 0xbf, 0x6e, + 0x93, 0x75, 0x40, 0xea, 0x54, 0xd7, 0x77, 0xfa, 0x90, 0xe6, 0x6f, 0x08, 0x8a, 0x69, 0x6c, 0x79, + 0xc0, 0x7c, 0x4e, 0xf1, 0x67, 0xf0, 0x52, 0x9b, 0x1c, 0x57, 0x7b, 0xdc, 0x22, 0xde, 0xb3, 0x2b, + 0xf9, 0xb2, 0x9d, 0xce, 0x3b, 0x91, 0xed, 0x53, 0x4f, 0x34, 0x3e, 0xa0, 0x82, 0x38, 0x0b, 0xed, + 0xfe, 0x63, 0x8e, 0xdf, 0x1f, 0x21, 0xe2, 0x66, 0xa6, 0x08, 0x45, 0x2b, 0xa1, 0x62, 0x1b, 0x5e, + 0x1b, 0x16, 0x11, 0xb7, 0xfb, 0x0a, 0xcc, 0xd3, 0x80, 0xb9, 0x8d, 0xaa, 0x1f, 0xb6, 0x64, 0xc7, + 0x2f, 0x38, 0x2f, 0xc8, 0x83, 0xfd, 0xb0, 0x65, 0x0a, 0x30, 0x46, 0x21, 0xb5, 0xf4, 0x07, 0xb0, + 0x90, 0x94, 0x2e, 0xf1, 0xe7, 0x50, 0x7e, 0x29, 0xa1, 0xdc, 0xfc, 0x06, 0xc1, 0x55, 0x59, 0xb6, + 0xd2, 0xe4, 0x07, 0x61, 0xad, 0xe9, 0xb9, 0xf7, 0x68, 0xb7, 0x7f, 0x44, 0x9e, 0xc5, 0x79, 0x6a, + 0x77, 0xff, 0x47, 0x3c, 0xa9, 0xc3, 0x2c, 0xb4, 0xfe, 0x87, 0x70, 0xb9, 0x43, 0x9a, 0xde, 0x43, + 0x22, 0x58, 0xbb, 0x7a, 0xec, 0x89, 0x46, 0x55, 0x7f, 0x97, 0xf1, 0x08, 0xac, 0xa6, 0x37, 0xe2, + 0x41, 0x0c, 0x8c, 0x9a, 0x50, 0x69, 0xf2, 0x7b, 0xb4, 0xeb, 0x2c, 0x75, 0x86, 0x0f, 0xa7, 0x38, + 0x06, 0x9b, 0x70, 0x59, 0xea, 0xd9, 0x8b, 0x3a, 0xa5, 0x3f, 0x98, 0x71, 0x86, 0xe0, 0x0b, 0x28, + 0x0c, 0xe3, 0x74, 0x0b, 0xa6, 0xf0, 0xb1, 0x9a, 0x7b, 0x60, 0xaa, 0x21, 0xa3, 0x2e, 0xf5, 0x45, + 0x5f, 0x95, 0xbb, 0x2c, 0xec, 0xcd, 0xe9, 0x35, 0xc8, 0x2b, 0x8a, 0x6e, 0x74, 0xaa, 0x49, 0x82, + 0x3c, 0x92, 0x71, 0xe6, 0x0f, 0x39, 0xb8, 0xfe, 0xcc, 0x3c, 0x9a, 0xf2, 0x15, 0x98, 0x17, 0x5e, + 0x50, 0x95, 0xc8, 0x58, 0xab, 0xf0, 0x02, 0x19, 0x3f, 0x58, 0x25, 0x37, 0x58, 0x05, 0x1f, 0xc1, + 0x8b, 0x8a, 0xb6, 0x8e, 0x98, 0x95, 0x17, 0xbd, 0x9f, 0x2e, 0x7b, 0x0c, 0x4a, 0x56, 0xdf, 0xd9, + 0x9e, 0x2f, 0xda, 0x5d, 0x27, 0xcf, 0x7b, 0x27, 0xc6, 0x2e, 0x2c, 0x0e, 0x06, 0xe0, 0x45, 0x98, + 0x3d, 0xa4, 0x5d, 0x49, 0x7f, 0xde, 0x89, 0x1e, 0xf1, 0x12, 0x5c, 0xec, 0x90, 0x66, 0x48, 0x35, + 0x67, 0xf5, 0x63, 0x27, 0xb7, 0x8d, 0xcc, 0x2f, 0xe1, 0x4d, 0x49, 0xe2, 0x3e, 0xe1, 0x22, 0xf9, + 0xf1, 0x25, 0x87, 0x60, 0x1a, 0x77, 0x79, 0x0c, 0x6f, 0x65, 0xd4, 0xd2, 0xb7, 0xb0, 0x9f, 0xe2, + 0x1d, 0x37, 0xc7, 0xf4, 0x8e, 0x41, 0xcf, 0x58, 0x02, 0x2c, 0x0b, 0x1f, 0xc8, 0xa5, 0xa4, 0x25, + 0x99, 0x9f, 0xc0, 0x2b, 0x89, 0x53, 0x5d, 0x7c, 0x17, 0xe6, 0xd4, 0xf2, 0xd2, 0x45, 0x97, 0xd3, + 0x8b, 0x2a, 0x64, 0xe5, 0xc2, 0x93, 0xbf, 0xaf, 0xcd, 0x38, 0x1a, 0x55, 0xfe, 0x6f, 0x1e, 0x2e, + 0xca, 0xbc, 0xf8, 0x57, 0x04, 0x2f, 0x0f, 0xed, 0x06, 0xbc, 0x95, 0x35, 0x0e, 0x29, 0xbb, 0xcf, + 0xd8, 0x9e, 0x1c, 0xa8, 0x24, 0x99, 0x3b, 0x5f, 0xff, 0xf9, 0xef, 0xf7, 0xb9, 0x75, 0x5c, 0xb6, + 0x47, 0x2f, 0xe2, 0x4e, 0xc9, 0x1e, 0x58, 0x53, 0xf6, 0x23, 0x75, 0x67, 0x8f, 0xf1, 0x2f, 0x08, + 0x2e, 0x25, 0x32, 0xe3, 0xb5, 0x49, 0x78, 0xc4, 0xe4, 0xd7, 0x27, 0x03, 0x69, 0xe2, 0xb7, 0x25, + 0xf1, 0x4d, 0xbc, 0x3e, 0x2e, 0x71, 0xfb, 0xd1, 0x99, 0x55, 0x3d, 0x8e, 0xfa, 0xbf, 0x38, 0xe8, + 0xcf, 0x78, 0x33, 0x83, 0x48, 0xca, 0x5a, 0x31, 0xb6, 0x26, 0xc6, 0x69, 0x0d, 0x77, 0xa4, 0x86, + 0x2d, 0xbc, 0x91, 0xae, 0x21, 0xda, 0x0c, 0x81, 0x04, 0xcb, 0x05, 0x91, 0x10, 0xf1, 0x33, 0x82, + 0x7c, 0x9f, 0x37, 0xe0, 0x52, 0x06, 0x8f, 0x61, 0x03, 0x37, 0xca, 0x93, 0x40, 0x34, 0xeb, 0x77, + 0x24, 0xeb, 0x0d, 0xbc, 0x96, 0xce, 0x5a, 0x92, 0x4c, 0x90, 0xb5, 0xf5, 0x5f, 0x58, 0xbf, 0x23, + 0x78, 0x75, 0xb4, 0xab, 0xe1, 0xdb, 0xe7, 0x34, 0x43, 0xa5, 0xe4, 0xce, 0x73, 0x59, 0xa9, 0xb9, + 0x21, 0x45, 0xd9, 0x78, 0x35, 0x4b, 0xd4, 0x4e, 0xbf, 0x8d, 0xe3, 0x1f, 0x11, 0x14, 0xd2, 0x3c, + 0x0b, 0xef, 0x66, 0x50, 0xca, 0x30, 0x56, 0xe3, 0xdd, 0x73, 0xe3, 0xb5, 0x5f, 0x7d, 0x8b, 0x60, + 0x4e, 0x19, 0x11, 0x7e, 0x3b, 0x23, 0x57, 0xc2, 0xff, 0x8c, 0xd5, 0x31, 0xa3, 0x75, 0xf3, 0x56, + 0x64, 0xf3, 0x4c, 0xbc, 0x9c, 0xde, 0x3c, 0xe5, 0x80, 0x95, 0x0f, 0x9f, 0x9c, 0x14, 0xd1, 0xd3, + 0x93, 0x22, 0xfa, 0xe7, 0xa4, 0x88, 0xbe, 0x3b, 0x2d, 0xce, 0x3c, 0x3d, 0x2d, 0xce, 0xfc, 0x75, + 0x5a, 0x9c, 0xf9, 0x7c, 0xa3, 0xee, 0x89, 0x46, 0x58, 0xb3, 0x5c, 0xd6, 0x8a, 0xb3, 0xb8, 0x0d, + 0xe2, 0xf9, 0x67, 0x29, 0xbf, 0x1a, 0x48, 0x2a, 0xba, 0x01, 0xe5, 0xb5, 0x39, 0xf9, 0xef, 0xc1, + 0xda, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x92, 0xec, 0x6d, 0x2e, 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -931,18 +816,16 @@ const _ = grpc.SupportPackageIsVersion4 type QueryClient interface { // RawCheckpointList queries all checkpoints that match the given status. RawCheckpointList(ctx context.Context, in *QueryRawCheckpointListRequest, opts ...grpc.CallOption) (*QueryRawCheckpointListResponse, error) - // RawCheckpointList queries a list of checkpoints starting from a given epoch number to the current epoch number. - RecentRawCheckpointList(ctx context.Context, in *QueryRecentRawCheckpointListRequest, opts ...grpc.CallOption) (*QueryRecentRawCheckpointListResponse, error) // RawCheckpoint queries a checkpoints at a given epoch number. RawCheckpoint(ctx context.Context, in *QueryRawCheckpointRequest, opts ...grpc.CallOption) (*QueryRawCheckpointResponse, error) - // LatestCheckpoint queries the checkpoint with the highest epoch num. - LatestCheckpoint(ctx context.Context, in *QueryLatestCheckpointRequest, opts ...grpc.CallOption) (*QueryLatestCheckpointResponse, error) // BlsPublicKeyList queries a list of bls public keys of the validators at a given epoch number. BlsPublicKeyList(ctx context.Context, in *QueryBlsPublicKeyListRequest, opts ...grpc.CallOption) (*QueryBlsPublicKeyListResponse, error) // EpochStatus queries the status of the checkpoint at a given epoch EpochStatus(ctx context.Context, in *QueryEpochStatusRequest, opts ...grpc.CallOption) (*QueryEpochStatusResponse, error) // RecentEpochStatusCount queries the number of epochs with each status in recent epochs RecentEpochStatusCount(ctx context.Context, in *QueryRecentEpochStatusCountRequest, opts ...grpc.CallOption) (*QueryRecentEpochStatusCountResponse, error) + // LastCheckpointWithStatus queries the last checkpoint with a given status or a more matured status + LastCheckpointWithStatus(ctx context.Context, in *QueryLastCheckpointWithStatusRequest, opts ...grpc.CallOption) (*QueryLastCheckpointWithStatusResponse, error) // Parameters queries the parameters of the module. Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) } @@ -964,15 +847,6 @@ func (c *queryClient) RawCheckpointList(ctx context.Context, in *QueryRawCheckpo return out, nil } -func (c *queryClient) RecentRawCheckpointList(ctx context.Context, in *QueryRecentRawCheckpointListRequest, opts ...grpc.CallOption) (*QueryRecentRawCheckpointListResponse, error) { - out := new(QueryRecentRawCheckpointListResponse) - err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/RecentRawCheckpointList", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *queryClient) RawCheckpoint(ctx context.Context, in *QueryRawCheckpointRequest, opts ...grpc.CallOption) (*QueryRawCheckpointResponse, error) { out := new(QueryRawCheckpointResponse) err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/RawCheckpoint", in, out, opts...) @@ -982,15 +856,6 @@ func (c *queryClient) RawCheckpoint(ctx context.Context, in *QueryRawCheckpointR return out, nil } -func (c *queryClient) LatestCheckpoint(ctx context.Context, in *QueryLatestCheckpointRequest, opts ...grpc.CallOption) (*QueryLatestCheckpointResponse, error) { - out := new(QueryLatestCheckpointResponse) - err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/LatestCheckpoint", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *queryClient) BlsPublicKeyList(ctx context.Context, in *QueryBlsPublicKeyListRequest, opts ...grpc.CallOption) (*QueryBlsPublicKeyListResponse, error) { out := new(QueryBlsPublicKeyListResponse) err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/BlsPublicKeyList", in, out, opts...) @@ -1018,6 +883,15 @@ func (c *queryClient) RecentEpochStatusCount(ctx context.Context, in *QueryRecen return out, nil } +func (c *queryClient) LastCheckpointWithStatus(ctx context.Context, in *QueryLastCheckpointWithStatusRequest, opts ...grpc.CallOption) (*QueryLastCheckpointWithStatusResponse, error) { + out := new(QueryLastCheckpointWithStatusResponse) + err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/LastCheckpointWithStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { out := new(QueryParamsResponse) err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/Params", in, out, opts...) @@ -1031,18 +905,16 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts . type QueryServer interface { // RawCheckpointList queries all checkpoints that match the given status. RawCheckpointList(context.Context, *QueryRawCheckpointListRequest) (*QueryRawCheckpointListResponse, error) - // RawCheckpointList queries a list of checkpoints starting from a given epoch number to the current epoch number. - RecentRawCheckpointList(context.Context, *QueryRecentRawCheckpointListRequest) (*QueryRecentRawCheckpointListResponse, error) // RawCheckpoint queries a checkpoints at a given epoch number. RawCheckpoint(context.Context, *QueryRawCheckpointRequest) (*QueryRawCheckpointResponse, error) - // LatestCheckpoint queries the checkpoint with the highest epoch num. - LatestCheckpoint(context.Context, *QueryLatestCheckpointRequest) (*QueryLatestCheckpointResponse, error) // BlsPublicKeyList queries a list of bls public keys of the validators at a given epoch number. BlsPublicKeyList(context.Context, *QueryBlsPublicKeyListRequest) (*QueryBlsPublicKeyListResponse, error) // EpochStatus queries the status of the checkpoint at a given epoch EpochStatus(context.Context, *QueryEpochStatusRequest) (*QueryEpochStatusResponse, error) // RecentEpochStatusCount queries the number of epochs with each status in recent epochs RecentEpochStatusCount(context.Context, *QueryRecentEpochStatusCountRequest) (*QueryRecentEpochStatusCountResponse, error) + // LastCheckpointWithStatus queries the last checkpoint with a given status or a more matured status + LastCheckpointWithStatus(context.Context, *QueryLastCheckpointWithStatusRequest) (*QueryLastCheckpointWithStatusResponse, error) // Parameters queries the parameters of the module. Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) } @@ -1054,15 +926,9 @@ type UnimplementedQueryServer struct { func (*UnimplementedQueryServer) RawCheckpointList(ctx context.Context, req *QueryRawCheckpointListRequest) (*QueryRawCheckpointListResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RawCheckpointList not implemented") } -func (*UnimplementedQueryServer) RecentRawCheckpointList(ctx context.Context, req *QueryRecentRawCheckpointListRequest) (*QueryRecentRawCheckpointListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RecentRawCheckpointList not implemented") -} func (*UnimplementedQueryServer) RawCheckpoint(ctx context.Context, req *QueryRawCheckpointRequest) (*QueryRawCheckpointResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RawCheckpoint not implemented") } -func (*UnimplementedQueryServer) LatestCheckpoint(ctx context.Context, req *QueryLatestCheckpointRequest) (*QueryLatestCheckpointResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LatestCheckpoint not implemented") -} func (*UnimplementedQueryServer) BlsPublicKeyList(ctx context.Context, req *QueryBlsPublicKeyListRequest) (*QueryBlsPublicKeyListResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BlsPublicKeyList not implemented") } @@ -1072,6 +938,9 @@ func (*UnimplementedQueryServer) EpochStatus(ctx context.Context, req *QueryEpoc func (*UnimplementedQueryServer) RecentEpochStatusCount(ctx context.Context, req *QueryRecentEpochStatusCountRequest) (*QueryRecentEpochStatusCountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecentEpochStatusCount not implemented") } +func (*UnimplementedQueryServer) LastCheckpointWithStatus(ctx context.Context, req *QueryLastCheckpointWithStatusRequest) (*QueryLastCheckpointWithStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LastCheckpointWithStatus not implemented") +} func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") } @@ -1098,24 +967,6 @@ func _Query_RawCheckpointList_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _Query_RecentRawCheckpointList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryRecentRawCheckpointListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).RecentRawCheckpointList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.checkpointing.v1.Query/RecentRawCheckpointList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).RecentRawCheckpointList(ctx, req.(*QueryRecentRawCheckpointListRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Query_RawCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryRawCheckpointRequest) if err := dec(in); err != nil { @@ -1134,24 +985,6 @@ func _Query_RawCheckpoint_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } -func _Query_LatestCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLatestCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).LatestCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.checkpointing.v1.Query/LatestCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).LatestCheckpoint(ctx, req.(*QueryLatestCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Query_BlsPublicKeyList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryBlsPublicKeyListRequest) if err := dec(in); err != nil { @@ -1206,44 +1039,54 @@ func _Query_RecentEpochStatusCount_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryParamsRequest) +func _Query_LastCheckpointWithStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryLastCheckpointWithStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).Params(ctx, in) + return srv.(QueryServer).LastCheckpointWithStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/babylon.checkpointing.v1.Query/Params", + FullMethod: "/babylon.checkpointing.v1.Query/LastCheckpointWithStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + return srv.(QueryServer).LastCheckpointWithStatus(ctx, req.(*QueryLastCheckpointWithStatusRequest)) } return interceptor(ctx, in, info, handler) } -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "babylon.checkpointing.v1.Query", - HandlerType: (*QueryServer)(nil), +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.checkpointing.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "babylon.checkpointing.v1.Query", + HandlerType: (*QueryServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "RawCheckpointList", Handler: _Query_RawCheckpointList_Handler, }, - { - MethodName: "RecentRawCheckpointList", - Handler: _Query_RecentRawCheckpointList_Handler, - }, { MethodName: "RawCheckpoint", Handler: _Query_RawCheckpoint_Handler, }, - { - MethodName: "LatestCheckpoint", - Handler: _Query_LatestCheckpoint_Handler, - }, { MethodName: "BlsPublicKeyList", Handler: _Query_BlsPublicKeyList_Handler, @@ -1256,6 +1099,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "RecentEpochStatusCount", Handler: _Query_RecentEpochStatusCount_Handler, }, + { + MethodName: "LastCheckpointWithStatus", + Handler: _Query_LastCheckpointWithStatus_Handler, + }, { MethodName: "Params", Handler: _Query_Params_Handler, @@ -1354,95 +1201,6 @@ func (m *QueryRawCheckpointListResponse) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *QueryRecentRawCheckpointListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryRecentRawCheckpointListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryRecentRawCheckpointListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FromEpochNum != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.FromEpochNum)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryRecentRawCheckpointListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryRecentRawCheckpointListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryRecentRawCheckpointListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.RawCheckpoints) > 0 { - for iNdEx := len(m.RawCheckpoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.RawCheckpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *QueryRawCheckpointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1506,64 +1264,6 @@ func (m *QueryRawCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *QueryLatestCheckpointRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLatestCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLatestCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *QueryLatestCheckpointResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLatestCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLatestCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.LatestCheckpoint != nil { - { - size, err := m.LatestCheckpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *QueryBlsPublicKeyListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1787,6 +1487,69 @@ func (m *QueryRecentEpochStatusCountResponse) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } +func (m *QueryLastCheckpointWithStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLastCheckpointWithStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLastCheckpointWithStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryLastCheckpointWithStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLastCheckpointWithStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLastCheckpointWithStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RawCheckpoint != nil { + { + size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1889,42 +1652,32 @@ func (m *QueryRawCheckpointListResponse) Size() (n int) { return n } -func (m *QueryRecentRawCheckpointListRequest) Size() (n int) { +func (m *QueryRawCheckpointRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.FromEpochNum != 0 { - n += 1 + sovQuery(uint64(m.FromEpochNum)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) } return n } -func (m *QueryRecentRawCheckpointListResponse) Size() (n int) { +func (m *QueryRawCheckpointResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.RawCheckpoints) > 0 { - for _, e := range m.RawCheckpoints { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() + if m.RawCheckpoint != nil { + l = m.RawCheckpoint.Size() n += 1 + l + sovQuery(uint64(l)) } return n } -func (m *QueryRawCheckpointRequest) Size() (n int) { +func (m *QueryBlsPublicKeyListRequest) Size() (n int) { if m == nil { return 0 } @@ -1933,92 +1686,92 @@ func (m *QueryRawCheckpointRequest) Size() (n int) { if m.EpochNum != 0 { n += 1 + sovQuery(uint64(m.EpochNum)) } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } return n } -func (m *QueryRawCheckpointResponse) Size() (n int) { +func (m *QueryBlsPublicKeyListResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.RawCheckpoint != nil { - l = m.RawCheckpoint.Size() + if len(m.ValidatorWithBlsKeys) > 0 { + for _, e := range m.ValidatorWithBlsKeys { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() n += 1 + l + sovQuery(uint64(l)) } return n } -func (m *QueryLatestCheckpointRequest) Size() (n int) { +func (m *QueryEpochStatusRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } return n } -func (m *QueryLatestCheckpointResponse) Size() (n int) { +func (m *QueryEpochStatusResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.LatestCheckpoint != nil { - l = m.LatestCheckpoint.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.Status != 0 { + n += 1 + sovQuery(uint64(m.Status)) } return n } -func (m *QueryBlsPublicKeyListRequest) Size() (n int) { +func (m *QueryRecentEpochStatusCountRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.EpochNum != 0 { - n += 1 + sovQuery(uint64(m.EpochNum)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.EpochCount != 0 { + n += 1 + sovQuery(uint64(m.EpochCount)) } return n } -func (m *QueryBlsPublicKeyListResponse) Size() (n int) { +func (m *QueryRecentEpochStatusCountResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.ValidatorWithBlsKeys) > 0 { - for _, e := range m.ValidatorWithBlsKeys { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.TipEpoch != 0 { + n += 1 + sovQuery(uint64(m.TipEpoch)) } - return n -} - -func (m *QueryEpochStatusRequest) Size() (n int) { - if m == nil { - return 0 + if m.EpochCount != 0 { + n += 1 + sovQuery(uint64(m.EpochCount)) } - var l int - _ = l - if m.EpochNum != 0 { - n += 1 + sovQuery(uint64(m.EpochNum)) + if len(m.StatusCount) > 0 { + for k, v := range m.StatusCount { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovQuery(uint64(len(k))) + 1 + sovQuery(uint64(v)) + n += mapEntrySize + 1 + sovQuery(uint64(mapEntrySize)) + } } return n } -func (m *QueryEpochStatusResponse) Size() (n int) { +func (m *QueryLastCheckpointWithStatusRequest) Size() (n int) { if m == nil { return 0 } @@ -2030,37 +1783,15 @@ func (m *QueryEpochStatusResponse) Size() (n int) { return n } -func (m *QueryRecentEpochStatusCountRequest) Size() (n int) { +func (m *QueryLastCheckpointWithStatusResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.EpochCount != 0 { - n += 1 + sovQuery(uint64(m.EpochCount)) - } - return n -} - -func (m *QueryRecentEpochStatusCountResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TipEpoch != 0 { - n += 1 + sovQuery(uint64(m.TipEpoch)) - } - if m.EpochCount != 0 { - n += 1 + sovQuery(uint64(m.EpochCount)) - } - if len(m.StatusCount) > 0 { - for k, v := range m.StatusCount { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovQuery(uint64(len(k))) + 1 + sovQuery(uint64(v)) - n += mapEntrySize + 1 + sovQuery(uint64(mapEntrySize)) - } + if m.RawCheckpoint != nil { + l = m.RawCheckpoint.Size() + n += 1 + l + sovQuery(uint64(l)) } return n } @@ -2169,352 +1900,7 @@ func (m *QueryRawCheckpointListRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRawCheckpointListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRawCheckpointListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRawCheckpointListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawCheckpoints = append(m.RawCheckpoints, &RawCheckpointWithMeta{}) - if err := m.RawCheckpoints[len(m.RawCheckpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRecentRawCheckpointListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FromEpochNum", wireType) - } - m.FromEpochNum = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FromEpochNum |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRecentRawCheckpointListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawCheckpoints = append(m.RawCheckpoints, &RawCheckpointWithMeta{}) - if err := m.RawCheckpoints[len(m.RawCheckpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} + m.Pagination = &query.PageRequest{} } if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2541,7 +1927,7 @@ func (m *QueryRecentRawCheckpointListResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { +func (m *QueryRawCheckpointListResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2564,17 +1950,17 @@ func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryRawCheckpointRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRawCheckpointListResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRawCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRawCheckpointListResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoints", wireType) } - m.EpochNum = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2584,64 +1970,29 @@ func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EpochNum |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.RawCheckpoints = append(m.RawCheckpoints, &RawCheckpointWithMeta{}) + if err := m.RawCheckpoints[len(m.RawCheckpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRawCheckpointResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRawCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2668,10 +2019,10 @@ func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RawCheckpoint == nil { - m.RawCheckpoint = &RawCheckpointWithMeta{} + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} } - if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2696,7 +2047,7 @@ func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryLatestCheckpointRequest) Unmarshal(dAtA []byte) error { +func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2719,12 +2070,31 @@ func (m *QueryLatestCheckpointRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryLatestCheckpointRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRawCheckpointRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLatestCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRawCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -2746,7 +2116,7 @@ func (m *QueryLatestCheckpointRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryLatestCheckpointResponse) Unmarshal(dAtA []byte) error { +func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2769,15 +2139,15 @@ func (m *QueryLatestCheckpointResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryLatestCheckpointResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRawCheckpointResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLatestCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRawCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestCheckpoint", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2804,10 +2174,10 @@ func (m *QueryLatestCheckpointResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LatestCheckpoint == nil { - m.LatestCheckpoint = &RawCheckpointWithMeta{} + if m.RawCheckpoint == nil { + m.RawCheckpoint = &RawCheckpointWithMeta{} } - if err := m.LatestCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3465,6 +2835,161 @@ func (m *QueryRecentEpochStatusCountResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryLastCheckpointWithStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= CheckpointStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLastCheckpointWithStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RawCheckpoint == nil { + m.RawCheckpoint = &RawCheckpoint{} + } + if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/checkpointing/types/query.pb.gw.go b/x/checkpointing/types/query.pb.gw.go index 9483c6025..8d6963380 100644 --- a/x/checkpointing/types/query.pb.gw.go +++ b/x/checkpointing/types/query.pb.gw.go @@ -111,78 +111,6 @@ func local_request_Query_RawCheckpointList_0(ctx context.Context, marshaler runt } -var ( - filter_Query_RecentRawCheckpointList_0 = &utilities.DoubleArray{Encoding: map[string]int{"from_epoch_num": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_RecentRawCheckpointList_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryRecentRawCheckpointListRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["from_epoch_num"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_epoch_num") - } - - protoReq.FromEpochNum, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_epoch_num", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RecentRawCheckpointList_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RecentRawCheckpointList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_RecentRawCheckpointList_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryRecentRawCheckpointListRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["from_epoch_num"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_epoch_num") - } - - protoReq.FromEpochNum, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_epoch_num", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RecentRawCheckpointList_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.RecentRawCheckpointList(ctx, &protoReq) - return msg, metadata, err - -} - func request_Query_RawCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryRawCheckpointRequest var metadata runtime.ServerMetadata @@ -237,24 +165,6 @@ func local_request_Query_RawCheckpoint_0(ctx context.Context, marshaler runtime. } -func request_Query_LatestCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLatestCheckpointRequest - var metadata runtime.ServerMetadata - - msg, err := client.LatestCheckpoint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_LatestCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLatestCheckpointRequest - var metadata runtime.ServerMetadata - - msg, err := server.LatestCheckpoint(ctx, &protoReq) - return msg, metadata, err - -} - var ( filter_Query_BlsPublicKeyList_0 = &utilities.DoubleArray{Encoding: map[string]int{"epoch_num": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) @@ -464,29 +374,6 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_RecentRawCheckpointList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_RecentRawCheckpointList_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_RecentRawCheckpointList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("GET", pattern_Query_RawCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -510,29 +397,6 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_LatestCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_LatestCheckpoint_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_LatestCheckpoint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("GET", pattern_Query_BlsPublicKeyList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -686,26 +550,6 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_RecentRawCheckpointList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_RecentRawCheckpointList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_RecentRawCheckpointList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("GET", pattern_Query_RawCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -726,26 +570,6 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_LatestCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_LatestCheckpoint_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_LatestCheckpoint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("GET", pattern_Query_BlsPublicKeyList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -832,12 +656,8 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie var ( pattern_Query_RawCheckpointList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "raw_checkpoints", "status"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_RecentRawCheckpointList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "recent_raw_checkpoints", "from_epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_RawCheckpoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "raw_checkpoint", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_LatestCheckpoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "checkpointing", "v1", "latest_checkpoint"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_BlsPublicKeyList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "bls_public_keys", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_EpochStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"babylon", "checkpointing", "v1", "epochs", "epoch_num", "status"}, "", runtime.AssumeColonVerbOpt(false))) @@ -850,12 +670,8 @@ var ( var ( forward_Query_RawCheckpointList_0 = runtime.ForwardResponseMessage - forward_Query_RecentRawCheckpointList_0 = runtime.ForwardResponseMessage - forward_Query_RawCheckpoint_0 = runtime.ForwardResponseMessage - forward_Query_LatestCheckpoint_0 = runtime.ForwardResponseMessage - forward_Query_BlsPublicKeyList_0 = runtime.ForwardResponseMessage forward_Query_EpochStatus_0 = runtime.ForwardResponseMessage diff --git a/x/checkpointing/types/types.go b/x/checkpointing/types/types.go index ca4ece4eb..6b3896e29 100644 --- a/x/checkpointing/types/types.go +++ b/x/checkpointing/types/types.go @@ -104,6 +104,10 @@ func (cm *RawCheckpointWithMeta) Accumulate( return true, nil } +func (cm *RawCheckpointWithMeta) IsMoreMatureThanStatus(status CheckpointStatus) bool { + return cm.Status > status +} + func NewLastCommitHashFromHex(s string) (LastCommitHash, error) { bz, err := hex.DecodeString(s) if err != nil { From cdd25892bd637c89d597c0ef27456f9d4a1e3dac Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Thu, 22 Dec 2022 16:10:53 +1100 Subject: [PATCH 02/37] zoneconcierge: ignore out-of-order headers in ZoneConcierge (#255) --- x/zoneconcierge/keeper/chain_info_indexer.go | 11 +---------- x/zoneconcierge/keeper/hooks.go | 9 ++++++++- x/zoneconcierge/module_test.go | 18 ++++++++++++------ x/zoneconcierge/types/types.go | 8 ++++++++ 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/x/zoneconcierge/keeper/chain_info_indexer.go b/x/zoneconcierge/keeper/chain_info_indexer.go index b4b41d56d..72e6f27d1 100644 --- a/x/zoneconcierge/keeper/chain_info_indexer.go +++ b/x/zoneconcierge/keeper/chain_info_indexer.go @@ -33,20 +33,11 @@ func (k Keeper) GetChainInfo(ctx sdk.Context, chainID string) *types.ChainInfo { return &chainInfo } -func (k Keeper) tryToUpdateLatestHeader(ctx sdk.Context, chainID string, header *types.IndexedHeader) error { +func (k Keeper) updateLatestHeader(ctx sdk.Context, chainID string, header *types.IndexedHeader) error { if header == nil { return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") } - // NOTE: we can accept header without ancestor since IBC connection can be established at any height chainInfo := k.GetChainInfo(ctx, chainID) - if chainInfo.LatestHeader != nil { - // ensure the header is the latest one - // NOTE: submitting an old header is considered acceptable in IBC-Go (see Case_valid_past_update), - // but the chain info indexer will not record such old header since it's not the latest one - if chainInfo.LatestHeader.Height > header.Height { - return nil - } - } chainInfo.LatestHeader = header k.setChainInfo(ctx, chainInfo) return nil diff --git a/x/zoneconcierge/keeper/hooks.go b/x/zoneconcierge/keeper/hooks.go index af8dfa8cd..1e02adc7d 100644 --- a/x/zoneconcierge/keeper/hooks.go +++ b/x/zoneconcierge/keeper/hooks.go @@ -41,12 +41,19 @@ func (h Hooks) AfterHeaderWithValidCommit(ctx sdk.Context, txHash []byte, header panic(err) } } else { + // ensure the header is the latest one, otherwise ignore it + // NOTE: while an old header is considered acceptable in IBC-Go (see Case_valid_past_update), but + // ZoneConcierge should not checkpoint it since Babylon requires monotonic checkpointing + if !h.k.GetChainInfo(ctx, indexedHeader.ChainId).IsLatestHeader(&indexedHeader) { + return + } + // insert header to canonical chain index if err := h.k.insertHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { panic(err) } // update the latest canonical header in chain info - if err := h.k.tryToUpdateLatestHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { + if err := h.k.updateLatestHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { panic(err) } } diff --git a/x/zoneconcierge/module_test.go b/x/zoneconcierge/module_test.go index fb0829f5f..7ab128a8a 100644 --- a/x/zoneconcierge/module_test.go +++ b/x/zoneconcierge/module_test.go @@ -331,17 +331,23 @@ func (suite *ZoneConciergeTestSuite) TestUpdateClientTendermint() { ctx := suite.babylonChain.GetContext() czChainID := suite.czChain.ChainID updateHeaderHeight := uint64(updateHeader.Header.Height) - // updateHeader should be correctly recorded in canonical chain indexer - expUpdateHeader, err := suite.zcKeeper.GetHeader(ctx, czChainID, updateHeaderHeight) - suite.Require().NoError(err) - suite.Require().Equal(expUpdateHeader.Hash, updateHeader.Header.LastCommitHash) - suite.Require().Equal(expUpdateHeader.Height, updateHeaderHeight) + // updateHeader should be correctly recorded in chain info indexer if tc.name != "valid past update" { // we exclude the case of past update since chain info indexer does not record past update + // updateHeader should be correctly recorded in canonical chain indexer + expUpdateHeader, err := suite.zcKeeper.GetHeader(ctx, czChainID, updateHeaderHeight) + suite.Require().NoError(err) + suite.Require().Equal(expUpdateHeader.Hash, updateHeader.Header.LastCommitHash) + suite.Require().Equal(expUpdateHeader.Height, updateHeaderHeight) + // updateHeader should be correctly recorded in chain info indexer chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) suite.Require().Equal(chainInfo.LatestHeader.Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestHeader.Height, updateHeaderHeight) - } else { // in the test case where Babylon receives a past CZ header, the latest header should be the last header + } else { + // there should be no header in updateHeaderHeight + _, err := suite.zcKeeper.GetHeader(ctx, czChainID, updateHeaderHeight) + suite.Require().Error(err) + // the latest header in chain info indexer should be the last header chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) suite.Require().Equal(chainInfo.LatestHeader.Hash, suite.czChain.LastHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestHeader.Height, uint64(suite.czChain.LastHeader.Header.Height)) diff --git a/x/zoneconcierge/types/types.go b/x/zoneconcierge/types/types.go index ab1254f4c..6e95fb34e 100644 --- a/x/zoneconcierge/types/types.go +++ b/x/zoneconcierge/types/types.go @@ -1 +1,9 @@ package types + +// IsLatestHeader checks if a given header is higher than the latest header in chain info +func (ci *ChainInfo) IsLatestHeader(header *IndexedHeader) bool { + if ci.LatestHeader != nil && ci.LatestHeader.Height > header.Height { + return false + } + return true +} From 5166fa56a712b1daaaf69db01022aedea4a8198e Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Fri, 23 Dec 2022 10:37:52 +1100 Subject: [PATCH 03/37] zoneconcierge: API for listing the last checkpointed headers (#254) --- proto/babylon/zoneconcierge/query.proto | 19 + x/zoneconcierge/keeper/grpc_query.go | 32 ++ x/zoneconcierge/keeper/grpc_query_test.go | 34 ++ x/zoneconcierge/types/query.pb.go | 639 ++++++++++++++++++++-- x/zoneconcierge/types/query.pb.gw.go | 119 ++++ 5 files changed, 784 insertions(+), 59 deletions(-) diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 275c8a112..882449c2a 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -30,6 +30,10 @@ service Query { rpc ChainInfo(QueryChainInfoRequest) returns (QueryChainInfoResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/chain_info/{chain_id}"; } + // ListHeaders queries the headers of a chain in Babylon's view, with pagination support + rpc ListHeaders(QueryListHeadersRequest) returns (QueryListHeadersResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/headers/{chain_id}"; + } // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs rpc FinalizedChainInfo(QueryFinalizedChainInfoRequest) returns (QueryFinalizedChainInfoResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}"; @@ -64,6 +68,21 @@ message QueryChainInfoResponse { babylon.zoneconcierge.v1.ChainInfo chain_info = 1; } +// QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. +message QueryListHeadersRequest { + string chain_id = 1; + // pagination defines whether to have the pagination in the response + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryListHeadersResponse is response type for the Query/ListHeaders RPC method. +message QueryListHeadersResponse { + // headers is the list of headers + repeated babylon.zoneconcierge.v1.IndexedHeader headers = 1; + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + // QueryFinalizedChainInfoRequest is request type for the Query/FinalizedChainInfo RPC method. message QueryFinalizedChainInfoRequest { // chain_id is the ID of the CZ diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index 7101248eb..ecd8c8577 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -8,6 +8,7 @@ import ( checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -44,6 +45,37 @@ func (k Keeper) ChainInfo(c context.Context, req *types.QueryChainInfoRequest) ( return resp, nil } +// ListHeaders returns all headers of a chain with given ID, with pagination support +func (k Keeper) ListHeaders(c context.Context, req *types.QueryListHeadersRequest) (*types.QueryListHeadersResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + + headers := []*types.IndexedHeader{} + store := k.canonicalChainStore(ctx, req.ChainId) + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + var header types.IndexedHeader + k.cdc.MustUnmarshal(value, &header) + headers = append(headers, &header) + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + resp := &types.QueryListHeadersResponse{ + Headers: headers, + Pagination: pageRes, + } + return resp, nil +} + func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedChainInfoRequest) (*types.QueryFinalizedChainInfoResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index 16584ad1a..3dcbd79c6 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -10,6 +10,7 @@ import ( btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" zctypes "github.com/babylonchain/babylon/x/zoneconcierge/types" + "github.com/cosmos/cosmos-sdk/types/query" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" @@ -84,6 +85,39 @@ func FuzzChainInfo(f *testing.F) { }) } +func FuzzListHeaders(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, zcKeeper := SetupTest(t) + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + // invoke the hook a random number of times to simulate a random number of blocks + numHeaders := datagen.RandomInt(100) + 1 + numForkHeaders := datagen.RandomInt(10) + 1 + headers, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + + // a request with randomised pagination + limit := datagen.RandomInt(int(numHeaders)) + 1 + req := &zctypes.QueryListHeadersRequest{ + ChainId: czChain.ChainID, + Pagination: &query.PageRequest{ + Limit: limit, + }, + } + resp, err := zcKeeper.ListHeaders(ctx, req) + require.NoError(t, err) + require.Equal(t, int(limit), len(resp.Headers)) + for i := uint64(0); i < limit; i++ { + require.Equal(t, headers[i].Header.LastCommitHash, resp.Headers[i].Hash) + } + }) +} + func FuzzFinalizedChainInfo(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index 8533b2361..831d110dd 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -9,7 +9,7 @@ import ( types2 "github.com/babylonchain/babylon/x/btccheckpoint/types" types1 "github.com/babylonchain/babylon/x/checkpointing/types" types "github.com/babylonchain/babylon/x/epoching/types" - _ "github.com/cosmos/cosmos-sdk/types/query" + query "github.com/cosmos/cosmos-sdk/types/query" _ "github.com/gogo/protobuf/gogoproto" grpc1 "github.com/gogo/protobuf/grpc" proto "github.com/gogo/protobuf/proto" @@ -291,6 +291,115 @@ func (m *QueryChainInfoResponse) GetChainInfo() *ChainInfo { return nil } +// QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. +type QueryListHeadersRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // pagination defines whether to have the pagination in the response + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryListHeadersRequest) Reset() { *m = QueryListHeadersRequest{} } +func (m *QueryListHeadersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryListHeadersRequest) ProtoMessage() {} +func (*QueryListHeadersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{6} +} +func (m *QueryListHeadersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryListHeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListHeadersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryListHeadersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListHeadersRequest.Merge(m, src) +} +func (m *QueryListHeadersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryListHeadersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListHeadersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryListHeadersRequest proto.InternalMessageInfo + +func (m *QueryListHeadersRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *QueryListHeadersRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryListHeadersResponse is response type for the Query/ListHeaders RPC method. +type QueryListHeadersResponse struct { + // headers is the list of headers + Headers []*IndexedHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryListHeadersResponse) Reset() { *m = QueryListHeadersResponse{} } +func (m *QueryListHeadersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryListHeadersResponse) ProtoMessage() {} +func (*QueryListHeadersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{7} +} +func (m *QueryListHeadersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryListHeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListHeadersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryListHeadersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListHeadersResponse.Merge(m, src) +} +func (m *QueryListHeadersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryListHeadersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListHeadersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryListHeadersResponse proto.InternalMessageInfo + +func (m *QueryListHeadersResponse) GetHeaders() []*IndexedHeader { + if m != nil { + return m.Headers + } + return nil +} + +func (m *QueryListHeadersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + // QueryFinalizedChainInfoRequest is request type for the Query/FinalizedChainInfo RPC method. type QueryFinalizedChainInfoRequest struct { // chain_id is the ID of the CZ @@ -303,7 +412,7 @@ func (m *QueryFinalizedChainInfoRequest) Reset() { *m = QueryFinalizedCh func (m *QueryFinalizedChainInfoRequest) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoRequest) ProtoMessage() {} func (*QueryFinalizedChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{6} + return fileDescriptor_2caab7ee15063236, []int{8} } func (m *QueryFinalizedChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -371,7 +480,7 @@ func (m *QueryFinalizedChainInfoResponse) Reset() { *m = QueryFinalizedC func (m *QueryFinalizedChainInfoResponse) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoResponse) ProtoMessage() {} func (*QueryFinalizedChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{7} + return fileDescriptor_2caab7ee15063236, []int{9} } func (m *QueryFinalizedChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -463,6 +572,8 @@ func init() { proto.RegisterType((*QueryChainListResponse)(nil), "babylon.zoneconcierge.v1.QueryChainListResponse") proto.RegisterType((*QueryChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryChainInfoRequest") proto.RegisterType((*QueryChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryChainInfoResponse") + proto.RegisterType((*QueryListHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListHeadersRequest") + proto.RegisterType((*QueryListHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListHeadersResponse") proto.RegisterType((*QueryFinalizedChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoRequest") proto.RegisterType((*QueryFinalizedChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse") } @@ -470,62 +581,69 @@ func init() { func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } var fileDescriptor_2caab7ee15063236 = []byte{ - // 877 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0x5d, 0xef, 0x36, 0xdb, 0xcd, 0x54, 0x54, 0x65, 0xba, 0x05, 0x37, 0x80, 0x1b, 0x8c, 0x04, - 0x69, 0x05, 0x36, 0x5e, 0x54, 0xc1, 0x0a, 0x09, 0x89, 0x2d, 0x20, 0x56, 0x45, 0xd0, 0xba, 0xbb, - 0x12, 0x42, 0x20, 0x6b, 0xec, 0x4c, 0x1c, 0x6b, 0x93, 0x19, 0xd7, 0x33, 0x49, 0x93, 0x22, 0x6e, - 0x78, 0x01, 0x90, 0xb8, 0xe1, 0x09, 0xe0, 0x8a, 0xf7, 0xe8, 0x05, 0x17, 0x95, 0xb8, 0xe1, 0x0a, - 0xa1, 0x5d, 0x1e, 0x04, 0xf9, 0x9b, 0xb1, 0x63, 0xe7, 0x87, 0x0d, 0xbd, 0x89, 0xec, 0xf9, 0xce, - 0x39, 0xdf, 0xf9, 0xc6, 0x33, 0x27, 0xe8, 0xd5, 0x90, 0x84, 0xd3, 0x01, 0x67, 0xee, 0x63, 0xce, - 0x68, 0xc4, 0x59, 0x94, 0xd0, 0x2c, 0xa6, 0xee, 0xc3, 0x11, 0xcd, 0xa6, 0x4e, 0x9a, 0x71, 0xc9, - 0xb1, 0xa9, 0x21, 0x4e, 0x0d, 0xe2, 0x8c, 0xbd, 0xd6, 0x6e, 0xcc, 0x63, 0x0e, 0x20, 0x37, 0x7f, - 0x52, 0xf8, 0xd6, 0xcb, 0x31, 0xe7, 0xf1, 0x80, 0xba, 0x24, 0x4d, 0x5c, 0xc2, 0x18, 0x97, 0x44, - 0x26, 0x9c, 0x89, 0xa2, 0x2a, 0x29, 0xeb, 0xd2, 0x6c, 0x98, 0x30, 0xe9, 0xca, 0x69, 0x4a, 0x85, - 0xfa, 0xd5, 0xd5, 0x57, 0x2a, 0xd5, 0x28, 0x9b, 0xa6, 0x92, 0xbb, 0x69, 0xc6, 0x79, 0x4f, 0x97, - 0x6f, 0x45, 0x5c, 0x0c, 0xb9, 0x70, 0x43, 0x22, 0xb4, 0x47, 0x77, 0xec, 0x85, 0x54, 0x12, 0xcf, - 0x4d, 0x49, 0x9c, 0x30, 0xe8, 0xa4, 0xb1, 0x56, 0x31, 0x59, 0x28, 0xa3, 0xa8, 0x4f, 0xa3, 0x93, - 0x94, 0x43, 0xcf, 0x89, 0xae, 0xdf, 0x5c, 0x5e, 0xaf, 0xbd, 0x69, 0x68, 0xb9, 0x49, 0xb3, 0x4a, - 0xc2, 0xe2, 0xea, 0x26, 0xb5, 0x5e, 0x5f, 0x0e, 0x59, 0x90, 0xb2, 0x0b, 0x1c, 0x4d, 0x79, 0xd4, - 0xcf, 0x21, 0x63, 0xaf, 0x7c, 0x9e, 0xc7, 0xd4, 0xbf, 0x49, 0x4a, 0x32, 0x32, 0x14, 0xf3, 0xee, - 0xeb, 0x98, 0xfa, 0x27, 0x02, 0xa8, 0xbd, 0x8b, 0xf0, 0xfd, 0xdc, 0xe9, 0x3d, 0xe0, 0xfb, 0xf4, - 0xe1, 0x88, 0x0a, 0x69, 0x1f, 0xa3, 0xab, 0xb5, 0x55, 0x91, 0x72, 0x26, 0x28, 0xfe, 0x00, 0x6d, - 0xab, 0x3e, 0xa6, 0xd1, 0x36, 0x3a, 0x97, 0xf6, 0xda, 0xce, 0xaa, 0xaf, 0xef, 0x28, 0xe6, 0xc1, - 0x85, 0x27, 0x7f, 0xdd, 0xd8, 0xf0, 0x35, 0xcb, 0x7e, 0x11, 0x5d, 0x03, 0xd9, 0x3b, 0x7d, 0x92, - 0xb0, 0xcf, 0x12, 0x21, 0x8b, 0x7e, 0xb7, 0xd1, 0x0b, 0xf3, 0x05, 0xdd, 0xf2, 0x25, 0xd4, 0x8c, - 0xf2, 0xc5, 0x20, 0xe9, 0xe6, 0x5d, 0xb7, 0x3a, 0x4d, 0x7f, 0x07, 0x16, 0x0e, 0xbb, 0xc2, 0xde, - 0xab, 0xea, 0x1d, 0xb2, 0x1e, 0xd7, 0x7a, 0xf8, 0x3a, 0xda, 0x29, 0x58, 0x60, 0xb5, 0xe9, 0x5f, - 0xd4, 0x24, 0xfb, 0xeb, 0x6a, 0x2b, 0xc5, 0xd1, 0xad, 0x0e, 0x10, 0xd2, 0x24, 0xd6, 0xe3, 0x7a, - 0xc2, 0xd7, 0x56, 0x4f, 0x38, 0x13, 0x50, 0x0e, 0xf3, 0x47, 0xfb, 0x3e, 0xb2, 0x40, 0xfd, 0x93, - 0x84, 0x91, 0x41, 0xf2, 0x98, 0x76, 0xff, 0x87, 0x35, 0xbc, 0x8b, 0x1a, 0x69, 0xc6, 0xc7, 0xd4, - 0xdc, 0x6c, 0x1b, 0x9d, 0x1d, 0x5f, 0xbd, 0xd8, 0xbf, 0x34, 0xd0, 0x8d, 0x95, 0x9a, 0xda, 0xfa, - 0x31, 0xda, 0xed, 0x15, 0xd5, 0xe0, 0xd9, 0x86, 0xc0, 0xbd, 0x05, 0x79, 0xbc, 0x8f, 0x10, 0x9c, - 0x3e, 0x25, 0xb6, 0x09, 0x62, 0xad, 0x52, 0xac, 0x3c, 0x98, 0x63, 0xcf, 0xf9, 0x38, 0x7f, 0xf6, - 0x9b, 0xb0, 0x04, 0xd4, 0xcf, 0xd1, 0xe5, 0x8c, 0x3c, 0x0a, 0x66, 0x47, 0xdc, 0xdc, 0x02, 0xfa, - 0x1b, 0x25, 0xbd, 0x76, 0x17, 0x72, 0x0d, 0x9f, 0x3c, 0xba, 0x53, 0xae, 0xf9, 0xcf, 0x65, 0xd5, - 0x57, 0x7c, 0x8c, 0x70, 0x28, 0xa3, 0x40, 0x8c, 0xc2, 0x61, 0x22, 0x44, 0xc2, 0x59, 0x70, 0x42, - 0xa7, 0xe6, 0x85, 0x39, 0xcd, 0xfa, 0xfd, 0x1c, 0x7b, 0xce, 0x83, 0x12, 0x7f, 0x97, 0x4e, 0xfd, - 0x2b, 0xa1, 0x8c, 0x6a, 0x2b, 0xf8, 0x23, 0xf4, 0x3c, 0x44, 0x48, 0x20, 0x27, 0x41, 0xc2, 0x82, - 0x70, 0xc0, 0xa3, 0x13, 0xb3, 0x01, 0xaa, 0xd7, 0x9d, 0x59, 0xdc, 0x38, 0x2a, 0x86, 0x8e, 0x26, - 0xf7, 0x72, 0xb0, 0x7f, 0x19, 0x38, 0x47, 0x93, 0x43, 0x76, 0x90, 0x13, 0xf0, 0x5d, 0x74, 0x4d, - 0xa9, 0xf4, 0x29, 0xe9, 0xd2, 0x2c, 0x57, 0x82, 0x9d, 0x30, 0xb7, 0x41, 0xc9, 0xac, 0x2a, 0xa9, - 0xe0, 0x72, 0x94, 0x10, 0x06, 0xda, 0xa7, 0xc0, 0x3a, 0x64, 0xb0, 0x89, 0xf8, 0x4b, 0xa4, 0x56, - 0x95, 0x44, 0x20, 0x28, 0x19, 0xd0, 0xae, 0x79, 0x11, 0x94, 0x6e, 0xfd, 0xc7, 0x85, 0xcb, 0x39, - 0xa0, 0xf0, 0x00, 0x18, 0xfe, 0x95, 0x74, 0x6e, 0x05, 0x7f, 0x53, 0xd8, 0xd4, 0xca, 0xf9, 0x4e, - 0x48, 0x49, 0xbb, 0xe6, 0x4e, 0x7b, 0xab, 0x73, 0x69, 0xef, 0xe6, 0xea, 0x6d, 0x3c, 0xca, 0x08, - 0x13, 0x24, 0xca, 0x03, 0x14, 0x0e, 0xcb, 0xd5, 0x8a, 0x76, 0xa1, 0xb2, 0xf7, 0x5b, 0x03, 0x35, - 0xe0, 0xa0, 0xe2, 0x1f, 0x0c, 0xb4, 0xad, 0x02, 0x00, 0xbf, 0xb9, 0xda, 0xf1, 0x62, 0xee, 0xb4, - 0xde, 0x5a, 0x13, 0xad, 0x8e, 0xbd, 0xdd, 0xf9, 0xfe, 0x8f, 0x7f, 0x7e, 0xda, 0xb4, 0x71, 0xdb, - 0x5d, 0x1e, 0x78, 0x63, 0x4f, 0xe7, 0x22, 0xfe, 0xd9, 0x40, 0xcd, 0x32, 0x5c, 0xb0, 0x7b, 0x4e, - 0x9b, 0xf9, 0x7c, 0x6a, 0xbd, 0xbd, 0x3e, 0x61, 0x7d, 0x6b, 0x70, 0x4f, 0x05, 0xfe, 0xb5, 0xb0, - 0x06, 0xf7, 0x66, 0x2d, 0x6b, 0x95, 0x3c, 0x59, 0xcf, 0x5a, 0x35, 0x2c, 0xec, 0x77, 0xc1, 0x9a, - 0x87, 0xdd, 0x73, 0xac, 0xc1, 0xad, 0x77, 0xbf, 0x2d, 0xd2, 0xea, 0x3b, 0xfc, 0xbb, 0x81, 0xf0, - 0x62, 0x08, 0xe1, 0xf7, 0xce, 0x71, 0xb0, 0x32, 0x0b, 0x5b, 0xfb, 0xcf, 0xc0, 0xd4, 0x43, 0x7c, - 0x08, 0x43, 0xbc, 0x8f, 0xf7, 0x57, 0x0f, 0xb1, 0x2c, 0x11, 0x2b, 0xe3, 0x1c, 0x7c, 0xf1, 0xe4, - 0xd4, 0x32, 0x9e, 0x9e, 0x5a, 0xc6, 0xdf, 0xa7, 0x96, 0xf1, 0xe3, 0x99, 0xb5, 0xf1, 0xf4, 0xcc, - 0xda, 0xf8, 0xf3, 0xcc, 0xda, 0xf8, 0xea, 0x76, 0x9c, 0xc8, 0xfe, 0x28, 0x74, 0x22, 0x3e, 0x2c, - 0xe4, 0x81, 0x56, 0xf6, 0x9a, 0xcc, 0x75, 0x83, 0x78, 0x08, 0xb7, 0xe1, 0x2f, 0xf5, 0x9d, 0x7f, - 0x03, 0x00, 0x00, 0xff, 0xff, 0x0f, 0x5d, 0x8e, 0xb7, 0x37, 0x09, 0x00, 0x00, + // 987 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xd1, 0x8e, 0xdb, 0x44, + 0x14, 0x5d, 0xb7, 0xdd, 0xec, 0x66, 0x56, 0x54, 0x65, 0xba, 0xa5, 0x6e, 0x80, 0x74, 0x31, 0x52, + 0xbb, 0xad, 0x8a, 0x8d, 0x03, 0x15, 0xac, 0x90, 0x90, 0xba, 0x85, 0x42, 0x54, 0x04, 0x5b, 0x77, + 0x57, 0x42, 0x08, 0x64, 0x8d, 0xed, 0x89, 0x63, 0x6d, 0x32, 0xe3, 0x7a, 0xbc, 0x69, 0x52, 0xe0, + 0x85, 0x1f, 0x00, 0x89, 0x17, 0xbe, 0xa0, 0x48, 0x7c, 0x49, 0x1f, 0x78, 0xa8, 0xc4, 0x0b, 0xe2, + 0x01, 0xa1, 0x5d, 0x3e, 0x04, 0xf9, 0xce, 0x38, 0xb1, 0x93, 0x98, 0xa4, 0xfb, 0x12, 0xd9, 0x33, + 0xf7, 0x9c, 0x7b, 0xee, 0x9d, 0x3b, 0xc7, 0x41, 0x6f, 0x78, 0xc4, 0x1b, 0xf5, 0x38, 0xb3, 0x9e, + 0x70, 0x46, 0x7d, 0xce, 0xfc, 0x88, 0x26, 0x21, 0xb5, 0x1e, 0x1d, 0xd1, 0x64, 0x64, 0xc6, 0x09, + 0x4f, 0x39, 0xd6, 0x55, 0x88, 0x59, 0x0a, 0x31, 0x07, 0x76, 0x63, 0x33, 0xe4, 0x21, 0x87, 0x20, + 0x2b, 0x7b, 0x92, 0xf1, 0x8d, 0xd7, 0x42, 0xce, 0xc3, 0x1e, 0xb5, 0x48, 0x1c, 0x59, 0x84, 0x31, + 0x9e, 0x92, 0x34, 0xe2, 0x4c, 0xe4, 0xbb, 0x29, 0x65, 0x01, 0x4d, 0xfa, 0x11, 0x4b, 0xad, 0x74, + 0x14, 0x53, 0x21, 0x7f, 0xd5, 0xee, 0xeb, 0x85, 0x5d, 0x3f, 0x19, 0xc5, 0x29, 0xb7, 0xe2, 0x84, + 0xf3, 0x8e, 0xda, 0xbe, 0xe9, 0x73, 0xd1, 0xe7, 0xc2, 0xf2, 0x88, 0x50, 0x1a, 0xad, 0x81, 0xed, + 0xd1, 0x94, 0xd8, 0x56, 0x4c, 0xc2, 0x88, 0x41, 0x26, 0x15, 0xdb, 0xcc, 0x2b, 0xf3, 0x52, 0xdf, + 0xef, 0x52, 0xff, 0x30, 0xe6, 0x90, 0x73, 0xa8, 0xf6, 0x6f, 0xcc, 0xdf, 0x2f, 0xbd, 0xa9, 0xd0, + 0x71, 0x93, 0x26, 0x3b, 0x11, 0x0b, 0x8b, 0x4d, 0x6a, 0x5c, 0x9b, 0x1f, 0x32, 0x43, 0x65, 0xe4, + 0x71, 0x34, 0xe6, 0x7e, 0x37, 0x0b, 0x19, 0xd8, 0xe3, 0xe7, 0xe9, 0x98, 0xf2, 0x99, 0xc4, 0x24, + 0x21, 0x7d, 0x31, 0xad, 0xbe, 0x1c, 0x53, 0x3e, 0x22, 0x08, 0x35, 0x36, 0x11, 0x7e, 0x90, 0x29, + 0xdd, 0x03, 0xbc, 0x43, 0x1f, 0x1d, 0x51, 0x91, 0x1a, 0x07, 0xe8, 0x62, 0x69, 0x55, 0xc4, 0x9c, + 0x09, 0x8a, 0x3f, 0x44, 0x35, 0x99, 0x47, 0xd7, 0xb6, 0xb4, 0xed, 0x8d, 0xd6, 0x96, 0x59, 0x75, + 0xfa, 0xa6, 0x44, 0xee, 0x9e, 0x7b, 0xf6, 0xf7, 0xd5, 0x15, 0x47, 0xa1, 0x8c, 0xcb, 0xe8, 0x12, + 0xd0, 0xde, 0xed, 0x92, 0x88, 0x7d, 0x16, 0x89, 0x34, 0xcf, 0x77, 0x1b, 0xbd, 0x32, 0xbd, 0xa1, + 0x52, 0xbe, 0x8a, 0xea, 0x7e, 0xb6, 0xe8, 0x46, 0x41, 0x96, 0xf5, 0xec, 0x76, 0xdd, 0x59, 0x87, + 0x85, 0x76, 0x20, 0x8c, 0x56, 0x91, 0xaf, 0xcd, 0x3a, 0x5c, 0xf1, 0xe1, 0x2b, 0x68, 0x3d, 0x47, + 0x81, 0xd4, 0xba, 0xb3, 0xa6, 0x40, 0xc6, 0xd7, 0xc5, 0x54, 0x12, 0xa3, 0x52, 0xed, 0x22, 0xa4, + 0x40, 0xac, 0xc3, 0x55, 0x85, 0x6f, 0x56, 0x57, 0x38, 0x21, 0x90, 0x0a, 0xb3, 0x47, 0xe3, 0x3b, + 0x74, 0x19, 0xd8, 0xb3, 0x1a, 0x3e, 0xa5, 0x24, 0xa0, 0x89, 0x58, 0xac, 0x09, 0xdf, 0x43, 0x68, + 0x32, 0xa1, 0xfa, 0x19, 0xc8, 0x7c, 0xcd, 0x94, 0xe3, 0x6c, 0x66, 0xe3, 0x6c, 0xca, 0x69, 0x52, + 0xe3, 0x6c, 0xee, 0x91, 0x90, 0x2a, 0x5a, 0xa7, 0x80, 0x34, 0x9e, 0x6a, 0x48, 0x9f, 0x4d, 0xaf, + 0xca, 0xbb, 0x83, 0xd6, 0xba, 0x72, 0x09, 0xfa, 0xb8, 0xd1, 0xba, 0x5e, 0x5d, 0x5b, 0x9b, 0x05, + 0x74, 0x48, 0x03, 0x49, 0xe1, 0xe4, 0x38, 0xfc, 0xc9, 0x1c, 0x9d, 0xd7, 0x17, 0xea, 0x94, 0xf9, + 0x4b, 0x42, 0x1f, 0xa0, 0x26, 0xe8, 0xbc, 0x17, 0x31, 0xd2, 0x8b, 0x9e, 0xd0, 0xe0, 0x05, 0x4e, + 0x10, 0x6f, 0xa2, 0xd5, 0x38, 0xe1, 0x03, 0x0a, 0x02, 0xd6, 0x1d, 0xf9, 0x62, 0x3c, 0x5d, 0x45, + 0x57, 0x2b, 0x39, 0x55, 0x0b, 0x0e, 0xd0, 0x66, 0x27, 0xdf, 0x75, 0x4f, 0x77, 0xd6, 0xb8, 0x33, + 0x43, 0x8f, 0x77, 0x10, 0x82, 0x4b, 0x2a, 0xc9, 0x64, 0x5b, 0x1a, 0x63, 0xb2, 0xf1, 0xfd, 0x1d, + 0xd8, 0xe6, 0xc7, 0xd9, 0xb3, 0x53, 0x87, 0x25, 0x80, 0x7e, 0x8e, 0xce, 0x27, 0xe4, 0xb1, 0x3b, + 0x71, 0x02, 0xfd, 0xac, 0xea, 0x6a, 0x0e, 0x2f, 0x59, 0x46, 0xc6, 0xe1, 0x90, 0xc7, 0x77, 0xc7, + 0x6b, 0xce, 0x4b, 0x49, 0xf1, 0x15, 0x1f, 0x20, 0xec, 0xa5, 0xbe, 0x2b, 0x8e, 0xbc, 0x7e, 0x24, + 0x44, 0xc4, 0x99, 0x7b, 0x48, 0x47, 0xfa, 0xb9, 0x29, 0xce, 0xb2, 0x8d, 0x0d, 0x6c, 0xf3, 0xe1, + 0x38, 0xfe, 0x3e, 0x1d, 0x39, 0x17, 0xbc, 0xd4, 0x2f, 0xad, 0xe0, 0x8f, 0xd0, 0xcb, 0xe0, 0xb4, + 0x6e, 0x3a, 0x74, 0x23, 0xe6, 0x7a, 0x3d, 0xee, 0x1f, 0xea, 0xab, 0xc0, 0x7a, 0xc5, 0x9c, 0xb8, + 0xb2, 0x29, 0xdd, 0x7a, 0x7f, 0xb8, 0x97, 0x05, 0x3b, 0xe7, 0x01, 0xb3, 0x3f, 0x6c, 0xb3, 0xdd, + 0x0c, 0x80, 0xef, 0xa3, 0x4b, 0x92, 0x45, 0xce, 0x53, 0xc6, 0x04, 0x9d, 0xd0, 0x6b, 0xc0, 0xa4, + 0x17, 0x99, 0xa4, 0xbf, 0x9b, 0x92, 0x08, 0x03, 0x4c, 0x4e, 0x63, 0x9b, 0x41, 0x13, 0xf1, 0x97, + 0x48, 0xae, 0x4a, 0x0a, 0x57, 0x50, 0xd2, 0xa3, 0x81, 0xbe, 0x06, 0x4c, 0x37, 0xff, 0xc7, 0x97, + 0x32, 0x0c, 0x30, 0x3c, 0x04, 0x84, 0x73, 0x21, 0x9e, 0x5a, 0xc1, 0xdf, 0xe4, 0x32, 0x15, 0x73, + 0xd6, 0x89, 0x34, 0xa5, 0x81, 0xbe, 0x0e, 0xd7, 0xe6, 0x46, 0x75, 0x1b, 0xf7, 0x13, 0xc2, 0x04, + 0xf1, 0xb3, 0x11, 0x87, 0x61, 0xb9, 0x58, 0xe0, 0xce, 0x59, 0x5a, 0x7f, 0xd5, 0xd0, 0x2a, 0x0c, + 0x2a, 0xfe, 0x51, 0x43, 0x35, 0xe9, 0x93, 0xf8, 0x56, 0xb5, 0xe2, 0x59, 0x7b, 0x6e, 0xbc, 0xb5, + 0x64, 0xb4, 0x1c, 0x7b, 0x63, 0xfb, 0x87, 0x3f, 0xfe, 0xfd, 0xf9, 0x8c, 0x81, 0xb7, 0xac, 0xf9, + 0xdf, 0x85, 0x81, 0xad, 0x3e, 0x1f, 0xf8, 0x17, 0x0d, 0xd5, 0xc7, 0x1e, 0x8c, 0xad, 0x05, 0x69, + 0xa6, 0x6d, 0xbc, 0xf1, 0xf6, 0xf2, 0x80, 0xe5, 0xa5, 0xc1, 0x3d, 0x15, 0xf8, 0xd7, 0x5c, 0x1a, + 0xdc, 0x9b, 0xa5, 0xa4, 0x15, 0xfc, 0x64, 0x39, 0x69, 0x45, 0xb3, 0x30, 0xde, 0x03, 0x69, 0x36, + 0xb6, 0x16, 0x48, 0x83, 0x5b, 0x6f, 0x7d, 0x9b, 0xbb, 0xd5, 0xf7, 0xf8, 0x37, 0x0d, 0x6d, 0x14, + 0x0c, 0x18, 0xdb, 0x0b, 0x52, 0xcf, 0x7e, 0x2b, 0x1a, 0xad, 0x17, 0x81, 0x28, 0xbd, 0xef, 0x82, + 0x5e, 0x13, 0xdf, 0xaa, 0xd6, 0xab, 0x7c, 0xbc, 0x28, 0xf6, 0x77, 0x0d, 0xe1, 0x59, 0xc7, 0xc4, + 0xef, 0x2f, 0x10, 0x50, 0x69, 0xdc, 0x8d, 0x9d, 0x53, 0x20, 0x55, 0x05, 0x77, 0xa0, 0x82, 0x0f, + 0xf0, 0x4e, 0x75, 0x05, 0xf3, 0xec, 0xbb, 0x50, 0xce, 0xee, 0x17, 0xcf, 0x8e, 0x9b, 0xda, 0xf3, + 0xe3, 0xa6, 0xf6, 0xcf, 0x71, 0x53, 0xfb, 0xe9, 0xa4, 0xb9, 0xf2, 0xfc, 0xa4, 0xb9, 0xf2, 0xe7, + 0x49, 0x73, 0xe5, 0xab, 0xdb, 0x61, 0x94, 0x76, 0x8f, 0x3c, 0xd3, 0xe7, 0xfd, 0x9c, 0x1e, 0x60, + 0xe3, 0x5c, 0xc3, 0xa9, 0x6c, 0xe0, 0x65, 0x5e, 0x0d, 0xfe, 0x26, 0xbd, 0xf3, 0x5f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x41, 0xba, 0x43, 0xe4, 0x0b, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -546,6 +664,8 @@ type QueryClient interface { ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) // ChainInfo queries the latest info of a chain in Babylon's view ChainInfo(ctx context.Context, in *QueryChainInfoRequest, opts ...grpc.CallOption) (*QueryChainInfoResponse, error) + // ListHeaders queries the headers of a chain in Babylon's view, with pagination support + ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) } @@ -585,6 +705,15 @@ func (c *queryClient) ChainInfo(ctx context.Context, in *QueryChainInfoRequest, return out, nil } +func (c *queryClient) ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) { + out := new(QueryListHeadersResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ListHeaders", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) { out := new(QueryFinalizedChainInfoResponse) err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainInfo", in, out, opts...) @@ -602,6 +731,8 @@ type QueryServer interface { ChainList(context.Context, *QueryChainListRequest) (*QueryChainListResponse, error) // ChainInfo queries the latest info of a chain in Babylon's view ChainInfo(context.Context, *QueryChainInfoRequest) (*QueryChainInfoResponse, error) + // ListHeaders queries the headers of a chain in Babylon's view, with pagination support + ListHeaders(context.Context, *QueryListHeadersRequest) (*QueryListHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs FinalizedChainInfo(context.Context, *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) } @@ -619,6 +750,9 @@ func (*UnimplementedQueryServer) ChainList(ctx context.Context, req *QueryChainL func (*UnimplementedQueryServer) ChainInfo(ctx context.Context, req *QueryChainInfoRequest) (*QueryChainInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ChainInfo not implemented") } +func (*UnimplementedQueryServer) ListHeaders(ctx context.Context, req *QueryListHeadersRequest) (*QueryListHeadersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListHeaders not implemented") +} func (*UnimplementedQueryServer) FinalizedChainInfo(ctx context.Context, req *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfo not implemented") } @@ -681,6 +815,24 @@ func _Query_ChainInfo_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _Query_ListHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryListHeadersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ListHeaders(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/ListHeaders", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ListHeaders(ctx, req.(*QueryListHeadersRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Query_FinalizedChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryFinalizedChainInfoRequest) if err := dec(in); err != nil { @@ -715,6 +867,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "ChainInfo", Handler: _Query_ChainInfo_Handler, }, + { + MethodName: "ListHeaders", + Handler: _Query_ListHeaders_Handler, + }, { MethodName: "FinalizedChainInfo", Handler: _Query_FinalizedChainInfo_Handler, @@ -900,6 +1056,97 @@ func (m *QueryChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *QueryListHeadersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListHeadersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryListHeadersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListHeadersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListHeadersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *QueryFinalizedChainInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1142,6 +1389,42 @@ func (m *QueryChainInfoResponse) Size() (n int) { return n } +func (m *QueryListHeadersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryListHeadersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + func (m *QueryFinalizedChainInfoRequest) Size() (n int) { if m == nil { return 0 @@ -1640,6 +1923,244 @@ func (m *QueryChainInfoResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryListHeadersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryListHeadersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryListHeadersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryListHeadersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryListHeadersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryListHeadersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Headers = append(m.Headers, &IndexedHeader{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryFinalizedChainInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go index 3dac1f83e..7797403d4 100644 --- a/x/zoneconcierge/types/query.pb.gw.go +++ b/x/zoneconcierge/types/query.pb.gw.go @@ -123,6 +123,78 @@ func local_request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Mars } +var ( + filter_Query_ListHeaders_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ListHeaders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListHeaders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ListHeaders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListHeaders(ctx, &protoReq) + return msg, metadata, err + +} + var ( filter_Query_FinalizedChainInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) @@ -270,6 +342,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ListHeaders_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_FinalizedChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -394,6 +489,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ListHeaders_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_FinalizedChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -424,6 +539,8 @@ var ( pattern_Query_ChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_ListHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_FinalizedChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) ) @@ -434,5 +551,7 @@ var ( forward_Query_ChainInfo_0 = runtime.ForwardResponseMessage + forward_Query_ListHeaders_0 = runtime.ForwardResponseMessage + forward_Query_FinalizedChainInfo_0 = runtime.ForwardResponseMessage ) From 2b8e93ba08b035165974762054c6010a157f09e8 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Fri, 23 Dec 2022 16:49:00 +1100 Subject: [PATCH 04/37] zoneconcierge: fix flaky test `FuzzProofEpochSealed_BLSSig` (#256) --- x/zoneconcierge/keeper/proof_epoch_sealed_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x/zoneconcierge/keeper/proof_epoch_sealed_test.go b/x/zoneconcierge/keeper/proof_epoch_sealed_test.go index a3ef1038b..a357bff78 100644 --- a/x/zoneconcierge/keeper/proof_epoch_sealed_test.go +++ b/x/zoneconcierge/keeper/proof_epoch_sealed_test.go @@ -90,12 +90,12 @@ func FuzzProofEpochSealed_BLSSig(f *testing.F) { // verify err = zckeeper.VerifyEpochSealed(epoch, rawCkpt, proof) - if numSubSet <= numVals*1/3 { // BLS sig does not reach a quorum - require.LessOrEqual(t, subsetPower, uint64(numVals*1/3)) + if subsetPower <= valSet.GetTotalPower()*1/3 { // BLS sig does not reach a quorum + require.LessOrEqual(t, numSubSet, numVals*1/3) require.Error(t, err) require.NotErrorIs(t, err, zctypes.ErrInvalidMerkleProof) } else { // BLS sig has a valid quorum - require.Greater(t, subsetPower, valSet.GetTotalPower()*1/3) + require.Greater(t, numSubSet, numVals*1/3) require.Error(t, err) require.ErrorIs(t, err, zctypes.ErrInvalidMerkleProof) } From 72398bd8f9fd401a65dddba2b00831d013354a8e Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Thu, 29 Dec 2022 15:18:42 +0100 Subject: [PATCH 05/37] Bump golang to 1.19 (#257) * Bump golang to 1.19 --- .circleci/config.yml | 4 ++-- README.md | 6 +++--- contrib/images/babylond-dlv/Dockerfile | 2 +- contrib/images/babylond-env/Dockerfile | 2 +- go.mod | 2 +- proto/scripts/protocgen.sh | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1ae67cd65..f6a1fc8ba 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ jobs: # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub. # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor machine: - image: ubuntu-2204:2022.07.1 + image: ubuntu-2204:2022.10.1 resource_class: large # Add steps to the job # See: https://circleci.com/docs/2.0/configuration-reference/#steps @@ -31,7 +31,7 @@ jobs: - run: name: Lint command: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.50.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.50.1 ./bin/golangci-lint run - run: name: Run tests diff --git a/README.md b/README.md index 6009254fc..4f6321038 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,11 @@ ## Requirements -- Go 1.18 +- Go 1.19 ## Development requirements -- Go 1.18 +- Go 1.19 - Docker ## Building @@ -92,7 +92,7 @@ A brief description of the contents: ### Running the node ```console -babylond start --home ./.testnet/node0/babylond +babylond start --home ./.testnet/node0/babylond ``` ### Logs diff --git a/contrib/images/babylond-dlv/Dockerfile b/contrib/images/babylond-dlv/Dockerfile index 6291ca776..4b85bfa5e 100644 --- a/contrib/images/babylond-dlv/Dockerfile +++ b/contrib/images/babylond-dlv/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18-alpine AS build +FROM golang:1.19-alpine AS build RUN apk add build-base git linux-headers libc-dev RUN go install github.com/go-delve/delve/cmd/dlv@latest WORKDIR /work diff --git a/contrib/images/babylond-env/Dockerfile b/contrib/images/babylond-env/Dockerfile index 5bc6299b4..c6774dd59 100644 --- a/contrib/images/babylond-env/Dockerfile +++ b/contrib/images/babylond-env/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18-alpine AS build +FROM golang:1.19-alpine AS build RUN apk add build-base git linux-headers WORKDIR /work COPY go.mod go.sum /work/ diff --git a/go.mod b/go.mod index 27d75ae63..5adcbccec 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -go 1.18 +go 1.19 module github.com/babylonchain/babylon diff --git a/proto/scripts/protocgen.sh b/proto/scripts/protocgen.sh index d4ff67a7b..84e793d13 100755 --- a/proto/scripts/protocgen.sh +++ b/proto/scripts/protocgen.sh @@ -24,4 +24,4 @@ cd .. cp -r github.com/babylonchain/babylon/* ./ rm -rf github.com -go mod tidy -compat=1.18 +go mod tidy -compat=1.19 From 7c544bfb5a4bb988ef5bee311f673c6d5249f17e Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Mon, 2 Jan 2023 15:32:35 +0100 Subject: [PATCH 06/37] add e2e test to CI (#259) * Fix e2e tests wrapper script * Add e2e tests to CI --- .circleci/config.yml | 6 ++++++ test/e2e/containers/containers.go | 5 ++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f6a1fc8ba..ee3bc7769 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -43,6 +43,12 @@ jobs: make localnet-start-test sudo -E env "PATH=$PATH" make test-babylon-integration make localnet-stop + # TODO: If CI tests will take to long consider having only this e2e test + # instead of separate integration tests and e2e tests. + - run: + name: Run e2e tests + command: | + make test-e2e # Invoke jobs via workflows diff --git a/test/e2e/containers/containers.go b/test/e2e/containers/containers.go index 622326f67..3878d3ddd 100644 --- a/test/e2e/containers/containers.go +++ b/test/e2e/containers/containers.go @@ -206,8 +206,11 @@ func (m *Manager) RunNodeResource(chainId string, containerName, valCondifDir st NetworkID: m.network.Network.ID, User: "root:root", Cmd: []string{"start"}, + Env: []string{ + "HOME=/babylondata", + }, Mounts: []string{ - fmt.Sprintf("%s/:/data/node0/babylond", valCondifDir), + fmt.Sprintf("%s/:/babylondata", valCondifDir), }, } From e89d8e053ba64d90721fda301a55a9c16388ac4e Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Fri, 6 Jan 2023 10:58:37 +1100 Subject: [PATCH 07/37] zoneconcierge: API for querying the chain info of a given epoch (#260) --- client/docs/swagger-ui/swagger.yaml | 3093 +++++++++++------ proto/babylon/zoneconcierge/query.proto | 16 + .../keeper/epoch_chain_info_indexer_test.go | 2 +- x/zoneconcierge/keeper/fork_indexer_test.go | 2 +- x/zoneconcierge/keeper/grpc_query.go | 21 + x/zoneconcierge/keeper/grpc_query_test.go | 52 +- x/zoneconcierge/keeper/keeper_test.go | 6 +- x/zoneconcierge/types/query.pb.go | 564 ++- x/zoneconcierge/types/query.pb.gw.go | 123 + 9 files changed, 2725 insertions(+), 1154 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 55d132ecc..58561db06 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -4035,105 +4035,6 @@ paths: format: uint64 tags: - Query - /babylon/checkpointing/v1/latest_checkpoint: - get: - summary: LatestCheckpoint queries the checkpoint with the highest epoch num. - operationId: LatestCheckpoint - responses: - '200': - description: A successful response. - schema: - type: object - properties: - latest_checkpoint: - type: object - properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: >- - epoch_num defines the epoch number the raw checkpoint - is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers - of the BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated - from individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: - type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: - type: string - format: uint64 - title: >- - power_sum defines the accumulated voting power for the - checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - description: >- - QueryLatestCheckpointResponse is the response type for the - Query/LatestCheckpoint - - RPC method. - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - tags: - - Query /babylon/checkpointing/v1/params: get: summary: Parameters queries the parameters of the module. @@ -4473,253 +4374,63 @@ paths: type: boolean tags: - Query - /babylon/checkpointing/v1/recent_raw_checkpoints/{from_epoch_num}: + /babylon/zoneconcierge/v1/chain_info/{chain_id}: get: - summary: >- - RawCheckpointList queries a list of checkpoints starting from a given - epoch number to the current epoch number. - operationId: RecentRawCheckpointList + summary: ChainInfo queries the latest info of a chain in Babylon's view + operationId: ChainInfo responses: '200': description: A successful response. schema: type: object properties: - raw_checkpoints: - type: array - items: - type: object - properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: >- - epoch_num defines the epoch number the raw - checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers - of the BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is - aggregated from individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: - type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: - type: string - format: uint64 - title: >- - power_sum defines the accumulated voting power for the - checkpoint - description: >- - RawCheckpointWithMeta wraps the raw checkpoint with meta - data. - title: >- - the order is going from the newest to oldest based on the - epoch number - pagination: - description: pagination defines the pagination in the response. + chain_info: + title: chain_info is the info of the CZ type: object properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: + chain_id: type: string - format: uint64 + title: chain_id is the ID of the chain + latest_header: title: >- - total is total number of results available if - PageRequest.count_total + latest_header is the latest header in the canonical chain + of CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - was set, its value is undefined otherwise - description: >- - QueryRecentRawCheckpointListResponse is the response type for the - Query/RecentRawCheckpoints - - RPC method. - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - parameters: - - name: from_epoch_num - description: from_epoch defines the start epoch of the query, which is inclusive - in: path - required: true - type: string - format: uint64 - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean - tags: - - Query - /babylon/zoneconcierge/v1/chain_info/{chain_id}: - get: - summary: ChainInfo queries the latest info of a chain in Babylon's view - operationId: ChainInfo - responses: - '200': - description: A successful response. - schema: - type: object - properties: - chain_info: - title: chain_info is the info of the CZ - type: object - properties: - chain_id: - type: string - title: chain_id is the ID of the chain - latest_header: - title: >- - latest_header is the latest header in the canonical chain - of CZ - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the - header on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, including all blockchain data structures and the rules of the application's @@ -5369,20 +5080,20 @@ paths: } tags: - Query - /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}: + /babylon/zoneconcierge/v1/epochs/{epoch_num}/chain_info/{chain_id}: get: summary: >- - FinalizedChainInfo queries the BTC-finalised info of a chain, with - proofs - operationId: FinalizedChainInfo + EpochChainInfo queries the latest info of a chain in a given epoch of + Babylon's view + operationId: EpochChainInfo responses: '200': description: A successful response. schema: type: object properties: - finalized_chain_info: - title: finalized_chain_info is the info of the CZ + chain_info: + title: chain_info is the info of the CZ type: object properties: chain_id: @@ -5664,472 +5375,1344 @@ paths: the subsequent headers cannot be verified without knowing the validator set in the previous header. - epoch_info: - title: epoch_info is the metadata of the last BTC-finalised epoch - type: object - properties: - epoch_number: - type: string - format: uint64 - current_epoch_interval: - type: string - format: uint64 - first_block_height: - type: string - format: uint64 - last_block_header: - description: >- - last_block_header is the header of the last block in this - epoch. + description: >- + QueryEpochChainInfoResponse is response type for the + Query/EpochChainInfo RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - Babylon needs to remember the last header of each epoch to - complete unbonding validators/delegations when a previous - epoch's checkpoint is finalised. + protocol buffer message. This string must contain at + least - The last_block_header field is nil in the epoch's - beginning, and is set upon the end of this epoch. - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, + one "/" character. The last segment of the URL's path + must represent - including all blockchain data structures and the rules - of the application's + the fully qualified name of the type (as in - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - app_hash_root: - type: string - format: byte - title: >- - app_hash_root is the Merkle root of all AppHashs in this - epoch + `path/google.protobuf.Duration`). The name should be in + a canonical form - It will be used for proving a block is in an epoch - sealer_header: - title: >- - sealer_header is the 2nd header of the next epoch + (e.g., leading "." is not accepted). - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, - including all blockchain data structures and the rules - of the application's + In practice, teams usually precompile into the binary + all types that they - state transition machine. + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: epoch_num + in: path + required: true + type: string + format: uint64 + - name: chain_id + in: path + required: true + type: string + tags: + - Query + /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}: + get: + summary: >- + FinalizedChainInfo queries the BTC-finalised info of a chain, with + proofs + operationId: FinalizedChainInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_chain_info: + title: finalized_chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: >- + latest_header is the latest header in the canonical chain + of CZ + type: object + properties: chain_id: type: string - height: + title: chain_id is the unique ID of the chain + hash: type: string - format: int64 - time: + format: byte + title: hash is the hash of this header + height: type: string - format: date-time - last_block_id: - title: prev block info + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header type: object properties: - hash: - type: string - format: byte - part_set_header: + version: + title: basic block info type: object properties: - total: - type: integer - format: int64 - hash: + block: type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - raw_checkpoint: - title: raw_checkpoint is the raw checkpoint of this epoch - type: object - properties: - epoch_num: - type: string - format: uint64 - title: >- - epoch_num defines the epoch number the raw checkpoint is - for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of - the BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated - from individual BLS sigs - btc_submission_key: - title: >- - btc_submission_key is position of two BTC txs that include the - raw checkpoint of this epoch - type: object - properties: - key: - type: array - items: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - which transaction was included and transaction index in - the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header - is included in a certain Babylon block - type: object - properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: - type: object - properties: - total: - type: string - format: int64 - index: + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: type: string - format: int64 - leaf_hash: + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: type: string format: byte - aunts: - type: array - items: - type: string - format: byte - description: >- - TxProof represents a Merkle proof of the presence of a - transaction in the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is - in a certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting - power, and its bls public key - title: >- - validator_set is the validator set of the sealed epoch + title: >- + babylon_tx_hash is the hash of the tx that includes + this header - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon ledger + latest_forks: title: >- - proof_epoch_info is the Merkle proof that the epoch's - metadata is committed to `app_hash` of the sealer header + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) type: object properties: - ops: + headers: type: array items: type: object properties: - type: + chain_id: type: string - key: + title: chain_id is the unique ID of the chain + hash: type: string format: byte - data: + title: hash is the hash of this header + height: type: string - format: byte - title: >- - ProofOp defines an operation used for calculating - Merkle root - - The data could be arbitrary format, providing - nessecary data + format: uint64 + title: >- + height is the height of this header on CZ ledger - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - validator set is committed to `app_hash` of the sealer - header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: + (hash, height) jointly provides the position of + the header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon + block that includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and + the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + babylon_epoch: type: string - format: byte - data: + format: uint64 + title: >- + epoch is the epoch number of this header on + Babylon ledger + babylon_tx_hash: type: string format: byte - title: >- - ProofOp defines an operation used for calculating - Merkle root - - The data could be arbitrary format, providing - nessecary data + title: >- + babylon_tx_hash is the hash of the tx that + includes this header - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon + ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at + the same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the + same height. - which transaction was included and transaction index in - the block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain + For example, assuming the following blockchain - Although it is already a part of SubmissionKey, we store - it here again + ``` - to make TransactionInfo self-contained. + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - For example, storing the key allows TransactionInfo to - not relay on + Then the fork will be {[D1, D2]} where each item is in + struct `IndexedBlock`. - the fact that TransactionInfo will be ordered in the - same order as - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte - title: >- - proof is the Merkle proof that this tx is included in - the position in `key` + Note that each `IndexedHeader` in the fork should have a + valid quorum certificate. - TODO: maybe it could use here better format as we - already processed and + Such forks exist since Babylon considers CZs might have + dishonest majority. - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including + Also note that the IBC-Go implementation will only + consider the first header in a fork valid, since - - the position of the tx on BTC blockchain + the subsequent headers cannot be verified without knowing + the validator set in the previous header. + epoch_info: + title: epoch_info is the metadata of the last BTC-finalised epoch + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this + epoch. - - the full tx content + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint - is included in BTC ledger + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, - It is the two TransactionInfo in the best (i.e., earliest) - checkpoint submission - description: >- - QueryFinalizedChainInfoResponse is response type for the - Query/FinalizedChainInfo RPC method. - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + including all blockchain data structures and the rules + of the application's - protocol buffer message. This string must contain at - least + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch + + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: + type: string + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + btc_submission_key: + title: >- + btc_submission_key is position of two BTC txs that include the + raw checkpoint of this epoch + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header + is included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a + transaction in the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header is + in a certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting + power, and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + metadata is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating + Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating + Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + + Although it is already a part of SubmissionKey, we store + it here again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo to + not relay on + + the fact that TransactionInfo will be ordered in the + same order as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in + the position in `key` + + TODO: maybe it could use here better format as we + already processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint + is included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission + description: >- + QueryFinalizedChainInfoResponse is response type for the + Query/FinalizedChainInfo RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + description: chain_id is the ID of the CZ + in: path + required: true + type: string + - name: prove + description: >- + prove indicates whether the querier wants to get proofs of this + timestamp. + in: query + required: false + type: boolean + tags: + - Query + /babylon/zoneconcierge/v1/headers/{chain_id}: + get: + summary: >- + ListHeaders queries the headers of a chain in Babylon's view, with + pagination support + operationId: ListHeaders + responses: + '200': + description: A successful response. + schema: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + description: >- + QueryListHeadersResponse is response type for the + Query/ListHeaders RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least one "/" character. The last segment of the URL's path must represent @@ -6299,14 +6882,62 @@ paths: } parameters: - name: chain_id - description: chain_id is the ID of the CZ in: path required: true type: string - - name: prove + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset description: >- - prove indicates whether the querier wants to get proofs of this - timestamp. + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean @@ -9991,70 +10622,35 @@ definitions: description: |- QueryEpochStatusResponse is the response type for the Query/EpochStatus RPC method. - babylon.checkpointing.v1.QueryLatestCheckpointResponse: + babylon.checkpointing.v1.QueryLastCheckpointWithStatusResponse: type: object properties: - latest_checkpoint: + raw_checkpoint: type: object properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that individual - BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of the - BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: + epoch_num: type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: type: string format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: + title: >- + last_commit_hash defines the 'LastCommitHash' that individual BLS + sigs are signed on + bitmap: type: string - format: uint64 - title: power_sum defines the accumulated voting power for the checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - description: >- - QueryLatestCheckpointResponse is the response type for the - Query/LatestCheckpoint - - RPC method. + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the BLS + multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data babylon.checkpointing.v1.QueryParamsResponse: type: object properties: @@ -10224,101 +10820,13 @@ definitions: type: string format: uint64 status_count: - type: object - additionalProperties: - type: string - format: uint64 - description: >- - QueryRecentEpochStatusCountResponse is the response type for the - Query/EpochStatusCount - - RPC method. - babylon.checkpointing.v1.QueryRecentRawCheckpointListResponse: - type: object - properties: - raw_checkpoints: - type: array - items: - type: object - properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of the - BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: - type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: - type: string - format: uint64 - title: >- - power_sum defines the accumulated voting power for the - checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - title: the order is going from the newest to oldest based on the epoch number - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise + type: object + additionalProperties: + type: string + format: uint64 description: >- - QueryRecentRawCheckpointListResponse is the response type for the - Query/RecentRawCheckpoints + QueryRecentEpochStatusCountResponse is the response type for the + Query/EpochStatusCount RPC method. babylon.checkpointing.v1.RawCheckpoint: @@ -10908,180 +11416,468 @@ definitions: block: type: string format: uint64 - app: + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header + + (babylon_block_height, babylon_tx_hash) jointly provides the position + of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + babylon.zoneconcierge.v1.Params: + type: object + description: Params defines the parameters for the module. + babylon.zoneconcierge.v1.ProofEpochSealed: + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and its + bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on `last_commit_hash` + of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator set is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + title: >- + ProofEpochSealed is the proof that an epoch is sealed by the sealer + header, i.e., the 2nd header of the next epoch + + With the access of metadata + + - Metadata of this epoch, which includes the sealer header + + - Raw checkpoint of this epoch + + The verifier can perform the following verification rules: + + - The raw checkpoint's `last_commit_hash` is same as in the sealer header + + - More than 1/3 (in voting power) validators in the validator set of this + epoch have signed `last_commit_hash` of the sealer header + + - The epoch medatata is committed to the `app_hash` of the sealer header + + - The validator set is committed to the `app_hash` of the sealer header + babylon.zoneconcierge.v1.QueryChainInfoResponse: + type: object + properties: + chain_info: + title: chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in the canonical chain of CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on + CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in - the blockchain, - - including all blockchain data structures and the rules of the - application's + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) type: object properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this header + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - (babylon_block_height, babylon_tx_hash) jointly provides the position - of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - babylon.zoneconcierge.v1.Params: - type: object - description: Params defines the parameters for the module. - babylon.zoneconcierge.v1.ProofEpochSealed: - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and its - bls public key - title: >- - validator_set is the validator set of the sealed epoch + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - This validator set has generated a BLS multisig on `last_commit_hash` - of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: - type: string - format: byte - data: - type: string - format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's validator set is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: - type: string - format: byte - data: - type: string - format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - title: >- - ProofEpochSealed is the proof that an epoch is sealed by the sealer - header, i.e., the 2nd header of the next epoch + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. - With the access of metadata + For example, assuming the following blockchain - - Metadata of this epoch, which includes the sealer header + ``` - - Raw checkpoint of this epoch + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - The verifier can perform the following verification rules: + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - - The raw checkpoint's `last_commit_hash` is same as in the sealer header - - More than 1/3 (in voting power) validators in the validator set of this - epoch have signed `last_commit_hash` of the sealer header + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. - - The epoch medatata is committed to the `app_hash` of the sealer header + Such forks exist since Babylon considers CZs might have dishonest + majority. - - The validator set is committed to the `app_hash` of the sealer header - babylon.zoneconcierge.v1.QueryChainInfoResponse: + Also note that the IBC-Go implementation will only consider the + first header in a fork valid, since + + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + description: >- + QueryChainInfoResponse is response type for the Query/ChainInfo RPC + method. + babylon.zoneconcierge.v1.QueryChainListResponse: + type: object + properties: + chain_ids: + type: array + items: + type: string + title: QueryChainListResponse is response type for the Query/ChainList RPC method + babylon.zoneconcierge.v1.QueryEpochChainInfoResponse: type: object properties: chain_info: @@ -11359,16 +12155,8 @@ definitions: the subsequent headers cannot be verified without knowing the validator set in the previous header. description: >- - QueryChainInfoResponse is response type for the Query/ChainInfo RPC - method. - babylon.zoneconcierge.v1.QueryChainListResponse: - type: object - properties: - chain_ids: - type: array - items: - type: string - title: QueryChainListResponse is response type for the Query/ChainList RPC method + QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo + RPC method. babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse: type: object properties: @@ -12079,6 +12867,153 @@ definitions: description: >- QueryFinalizedChainInfoResponse is response type for the Query/FinalizedChainInfo RPC method. + babylon.zoneconcierge.v1.QueryListHeadersResponse: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header + + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + description: >- + QueryListHeadersResponse is response type for the Query/ListHeaders RPC + method. babylon.zoneconcierge.v1.QueryParamsResponse: type: object properties: diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 882449c2a..67342d490 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -30,6 +30,10 @@ service Query { rpc ChainInfo(QueryChainInfoRequest) returns (QueryChainInfoResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/chain_info/{chain_id}"; } + // EpochChainInfo queries the latest info of a chain in a given epoch of Babylon's view + rpc EpochChainInfo(QueryEpochChainInfoRequest) returns (QueryEpochChainInfoResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/chain_info/{chain_id}/epochs/{epoch_num}"; + } // ListHeaders queries the headers of a chain in Babylon's view, with pagination support rpc ListHeaders(QueryListHeadersRequest) returns (QueryListHeadersResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/headers/{chain_id}"; @@ -68,6 +72,18 @@ message QueryChainInfoResponse { babylon.zoneconcierge.v1.ChainInfo chain_info = 1; } +// QueryEpochChainInfoRequest is request type for the Query/EpochChainInfo RPC method. +message QueryEpochChainInfoRequest { + uint64 epoch_num = 1; + string chain_id = 2; +} + +// QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo RPC method. +message QueryEpochChainInfoResponse { + // chain_info is the info of the CZ + babylon.zoneconcierge.v1.ChainInfo chain_info = 1; +} + // QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. message QueryListHeadersRequest { string chain_id = 1; diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go index d0c68532e..dcc5bdd49 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go @@ -22,7 +22,7 @@ func FuzzEpochChainInfoIndexer(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) // simulate the scenario that a random epoch has ended epochNum := datagen.RandomInt(10) diff --git a/x/zoneconcierge/keeper/fork_indexer_test.go b/x/zoneconcierge/keeper/fork_indexer_test.go index bc6158853..42c65f29e 100644 --- a/x/zoneconcierge/keeper/fork_indexer_test.go +++ b/x/zoneconcierge/keeper/fork_indexer_test.go @@ -22,7 +22,7 @@ func FuzzForkIndexer(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - _, forkHeaders := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + _, forkHeaders := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) // check if the fork is updated or not forks := zcKeeper.GetForks(ctx, czChain.ChainID, numHeaders-1) diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index ecd8c8577..205453dc9 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -45,6 +45,27 @@ func (k Keeper) ChainInfo(c context.Context, req *types.QueryChainInfoRequest) ( return resp, nil } +// EpochChainInfo returns the info of a chain with given ID in a given epoch +func (k Keeper) EpochChainInfo(c context.Context, req *types.QueryEpochChainInfoRequest) (*types.QueryEpochChainInfoResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + + // find the chain info of the given epoch + chainInfo, err := k.GetEpochChainInfo(ctx, req.ChainId, req.EpochNum) + if err != nil { + return nil, err + } + resp := &types.QueryEpochChainInfoResponse{ChainInfo: chainInfo} + return resp, nil +} + // ListHeaders returns all headers of a chain with given ID, with pagination support func (k Keeper) ListHeaders(c context.Context, req *types.QueryListHeadersRequest) (*types.QueryListHeadersResponse, error) { if req == nil { diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index 3dcbd79c6..ad13d2a3f 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -74,7 +74,7 @@ func FuzzChainInfo(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) // check if the chain info of is recorded or not resp, err := zcKeeper.ChainInfo(ctx, &zctypes.QueryChainInfoRequest{ChainId: czChain.ChainID}) @@ -85,6 +85,52 @@ func FuzzChainInfo(f *testing.F) { }) } +func FuzzEpochChainInfo(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, zcKeeper := SetupTest(t) + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + numReqs := datagen.RandomInt(5) + 1 + + epochNumList := []uint64{datagen.RandomInt(10) + 1} + nextHeightList := []uint64{0} + numHeadersList := []uint64{} + numForkHeadersList := []uint64{} + + // we test the scenario of ending an epoch for multiple times, in order to ensure that + // consecutive epoch infos do not affect each other. + for i := uint64(0); i < numReqs; i++ { + // generate a random number of headers and fork headers + numHeadersList = append(numHeadersList, datagen.RandomInt(100)+1) + numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(10)+1) + // trigger hooks to append these headers and fork headers + SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) + // prepare nextHeight for the next request + nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) + + // simulate the scenario that a random epoch has ended + hooks.AfterEpochEnds(ctx, epochNumList[i]) + // prepare epochNum for the next request + epochNumList = append(epochNumList, epochNumList[i]+datagen.RandomInt(10)+1) + } + + // attest the correctness of epoch info for each tested epoch + for i := uint64(0); i < numReqs; i++ { + resp, err := zcKeeper.EpochChainInfo(ctx, &zctypes.QueryEpochChainInfoRequest{EpochNum: epochNumList[i], ChainId: czChain.ChainID}) + require.NoError(t, err) + chainInfo := resp.ChainInfo + require.Equal(t, nextHeightList[i+1]-1, chainInfo.LatestHeader.Height) + require.Equal(t, numForkHeadersList[i], uint64(len(chainInfo.LatestForks.Headers))) + } + }) +} + func FuzzListHeaders(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) @@ -99,7 +145,7 @@ func FuzzListHeaders(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - headers, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + headers, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) // a request with randomised pagination limit := datagen.RandomInt(int(numHeaders)) + 1 @@ -170,7 +216,7 @@ func FuzzFinalizedChainInfo(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - SimulateHeadersAndForksViaHook(ctx, hooks, czChainID, numHeaders, numForkHeaders) + SimulateHeadersAndForksViaHook(ctx, hooks, czChainID, 0, numHeaders, numForkHeaders) hooks.AfterEpochEnds(ctx, epoch.EpochNumber) err := hooks.AfterRawCheckpointFinalized(ctx, epoch.EpochNumber) diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go index 937754427..ff370bb88 100644 --- a/x/zoneconcierge/keeper/keeper_test.go +++ b/x/zoneconcierge/keeper/keeper_test.go @@ -46,11 +46,11 @@ func SimulateHeadersViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID strin } // SimulateHeadersViaHook generates a random non-zero number of canonical headers and fork headers via the hook -func SimulateHeadersAndForksViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, numHeaders uint64, numForkHeaders uint64) ([]*ibctmtypes.Header, []*ibctmtypes.Header) { +func SimulateHeadersAndForksViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, startHeight uint64, numHeaders uint64, numForkHeaders uint64) ([]*ibctmtypes.Header, []*ibctmtypes.Header) { headers := []*ibctmtypes.Header{} // invoke the hook a number of times to simulate a number of blocks for i := uint64(0); i < numHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(chainID, i) + header := datagen.GenRandomIBCTMHeader(chainID, startHeight+i) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, false) headers = append(headers, header) } @@ -58,7 +58,7 @@ func SimulateHeadersAndForksViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chain // generate a number of fork headers forkHeaders := []*ibctmtypes.Header{} for i := uint64(0); i < numForkHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(chainID, numHeaders-1) + header := datagen.GenRandomIBCTMHeader(chainID, startHeight+numHeaders-1) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, true) forkHeaders = append(forkHeaders, header) } diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index 831d110dd..32252084d 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -291,6 +291,105 @@ func (m *QueryChainInfoResponse) GetChainInfo() *ChainInfo { return nil } +// QueryEpochChainInfoRequest is request type for the Query/EpochChainInfo RPC method. +type QueryEpochChainInfoRequest struct { + EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *QueryEpochChainInfoRequest) Reset() { *m = QueryEpochChainInfoRequest{} } +func (m *QueryEpochChainInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEpochChainInfoRequest) ProtoMessage() {} +func (*QueryEpochChainInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{6} +} +func (m *QueryEpochChainInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochChainInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochChainInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochChainInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochChainInfoRequest.Merge(m, src) +} +func (m *QueryEpochChainInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochChainInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochChainInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochChainInfoRequest proto.InternalMessageInfo + +func (m *QueryEpochChainInfoRequest) GetEpochNum() uint64 { + if m != nil { + return m.EpochNum + } + return 0 +} + +func (m *QueryEpochChainInfoRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo RPC method. +type QueryEpochChainInfoResponse struct { + // chain_info is the info of the CZ + ChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=chain_info,json=chainInfo,proto3" json:"chain_info,omitempty"` +} + +func (m *QueryEpochChainInfoResponse) Reset() { *m = QueryEpochChainInfoResponse{} } +func (m *QueryEpochChainInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEpochChainInfoResponse) ProtoMessage() {} +func (*QueryEpochChainInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{7} +} +func (m *QueryEpochChainInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochChainInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochChainInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochChainInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochChainInfoResponse.Merge(m, src) +} +func (m *QueryEpochChainInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochChainInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochChainInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochChainInfoResponse proto.InternalMessageInfo + +func (m *QueryEpochChainInfoResponse) GetChainInfo() *ChainInfo { + if m != nil { + return m.ChainInfo + } + return nil +} + // QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. type QueryListHeadersRequest struct { ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` @@ -302,7 +401,7 @@ func (m *QueryListHeadersRequest) Reset() { *m = QueryListHeadersRequest func (m *QueryListHeadersRequest) String() string { return proto.CompactTextString(m) } func (*QueryListHeadersRequest) ProtoMessage() {} func (*QueryListHeadersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{6} + return fileDescriptor_2caab7ee15063236, []int{8} } func (m *QueryListHeadersRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +456,7 @@ func (m *QueryListHeadersResponse) Reset() { *m = QueryListHeadersRespon func (m *QueryListHeadersResponse) String() string { return proto.CompactTextString(m) } func (*QueryListHeadersResponse) ProtoMessage() {} func (*QueryListHeadersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{7} + return fileDescriptor_2caab7ee15063236, []int{9} } func (m *QueryListHeadersResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +511,7 @@ func (m *QueryFinalizedChainInfoRequest) Reset() { *m = QueryFinalizedCh func (m *QueryFinalizedChainInfoRequest) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoRequest) ProtoMessage() {} func (*QueryFinalizedChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{8} + return fileDescriptor_2caab7ee15063236, []int{10} } func (m *QueryFinalizedChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -480,7 +579,7 @@ func (m *QueryFinalizedChainInfoResponse) Reset() { *m = QueryFinalizedC func (m *QueryFinalizedChainInfoResponse) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoResponse) ProtoMessage() {} func (*QueryFinalizedChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{9} + return fileDescriptor_2caab7ee15063236, []int{11} } func (m *QueryFinalizedChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -572,6 +671,8 @@ func init() { proto.RegisterType((*QueryChainListResponse)(nil), "babylon.zoneconcierge.v1.QueryChainListResponse") proto.RegisterType((*QueryChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryChainInfoRequest") proto.RegisterType((*QueryChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryChainInfoResponse") + proto.RegisterType((*QueryEpochChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryEpochChainInfoRequest") + proto.RegisterType((*QueryEpochChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryEpochChainInfoResponse") proto.RegisterType((*QueryListHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListHeadersRequest") proto.RegisterType((*QueryListHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListHeadersResponse") proto.RegisterType((*QueryFinalizedChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoRequest") @@ -581,69 +682,74 @@ func init() { func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } var fileDescriptor_2caab7ee15063236 = []byte{ - // 987 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xd1, 0x8e, 0xdb, 0x44, - 0x14, 0x5d, 0xb7, 0xdd, 0xec, 0x66, 0x56, 0x54, 0x65, 0xba, 0xa5, 0x6e, 0x80, 0x74, 0x31, 0x52, - 0xbb, 0xad, 0x8a, 0x8d, 0x03, 0x15, 0xac, 0x90, 0x90, 0xba, 0x85, 0x42, 0x54, 0x04, 0x5b, 0x77, - 0x57, 0x42, 0x08, 0x64, 0x8d, 0xed, 0x89, 0x63, 0x6d, 0x32, 0xe3, 0x7a, 0xbc, 0x69, 0x52, 0xe0, - 0x85, 0x1f, 0x00, 0x89, 0x17, 0xbe, 0xa0, 0x48, 0x7c, 0x49, 0x1f, 0x78, 0xa8, 0xc4, 0x0b, 0xe2, - 0x01, 0xa1, 0x5d, 0x3e, 0x04, 0xf9, 0xce, 0x38, 0xb1, 0x93, 0x98, 0xa4, 0xfb, 0x12, 0xd9, 0x33, - 0xf7, 0x9c, 0x7b, 0xee, 0x9d, 0x3b, 0xc7, 0x41, 0x6f, 0x78, 0xc4, 0x1b, 0xf5, 0x38, 0xb3, 0x9e, - 0x70, 0x46, 0x7d, 0xce, 0xfc, 0x88, 0x26, 0x21, 0xb5, 0x1e, 0x1d, 0xd1, 0x64, 0x64, 0xc6, 0x09, - 0x4f, 0x39, 0xd6, 0x55, 0x88, 0x59, 0x0a, 0x31, 0x07, 0x76, 0x63, 0x33, 0xe4, 0x21, 0x87, 0x20, - 0x2b, 0x7b, 0x92, 0xf1, 0x8d, 0xd7, 0x42, 0xce, 0xc3, 0x1e, 0xb5, 0x48, 0x1c, 0x59, 0x84, 0x31, - 0x9e, 0x92, 0x34, 0xe2, 0x4c, 0xe4, 0xbb, 0x29, 0x65, 0x01, 0x4d, 0xfa, 0x11, 0x4b, 0xad, 0x74, - 0x14, 0x53, 0x21, 0x7f, 0xd5, 0xee, 0xeb, 0x85, 0x5d, 0x3f, 0x19, 0xc5, 0x29, 0xb7, 0xe2, 0x84, - 0xf3, 0x8e, 0xda, 0xbe, 0xe9, 0x73, 0xd1, 0xe7, 0xc2, 0xf2, 0x88, 0x50, 0x1a, 0xad, 0x81, 0xed, - 0xd1, 0x94, 0xd8, 0x56, 0x4c, 0xc2, 0x88, 0x41, 0x26, 0x15, 0xdb, 0xcc, 0x2b, 0xf3, 0x52, 0xdf, - 0xef, 0x52, 0xff, 0x30, 0xe6, 0x90, 0x73, 0xa8, 0xf6, 0x6f, 0xcc, 0xdf, 0x2f, 0xbd, 0xa9, 0xd0, - 0x71, 0x93, 0x26, 0x3b, 0x11, 0x0b, 0x8b, 0x4d, 0x6a, 0x5c, 0x9b, 0x1f, 0x32, 0x43, 0x65, 0xe4, - 0x71, 0x34, 0xe6, 0x7e, 0x37, 0x0b, 0x19, 0xd8, 0xe3, 0xe7, 0xe9, 0x98, 0xf2, 0x99, 0xc4, 0x24, - 0x21, 0x7d, 0x31, 0xad, 0xbe, 0x1c, 0x53, 0x3e, 0x22, 0x08, 0x35, 0x36, 0x11, 0x7e, 0x90, 0x29, - 0xdd, 0x03, 0xbc, 0x43, 0x1f, 0x1d, 0x51, 0x91, 0x1a, 0x07, 0xe8, 0x62, 0x69, 0x55, 0xc4, 0x9c, - 0x09, 0x8a, 0x3f, 0x44, 0x35, 0x99, 0x47, 0xd7, 0xb6, 0xb4, 0xed, 0x8d, 0xd6, 0x96, 0x59, 0x75, - 0xfa, 0xa6, 0x44, 0xee, 0x9e, 0x7b, 0xf6, 0xf7, 0xd5, 0x15, 0x47, 0xa1, 0x8c, 0xcb, 0xe8, 0x12, - 0xd0, 0xde, 0xed, 0x92, 0x88, 0x7d, 0x16, 0x89, 0x34, 0xcf, 0x77, 0x1b, 0xbd, 0x32, 0xbd, 0xa1, - 0x52, 0xbe, 0x8a, 0xea, 0x7e, 0xb6, 0xe8, 0x46, 0x41, 0x96, 0xf5, 0xec, 0x76, 0xdd, 0x59, 0x87, - 0x85, 0x76, 0x20, 0x8c, 0x56, 0x91, 0xaf, 0xcd, 0x3a, 0x5c, 0xf1, 0xe1, 0x2b, 0x68, 0x3d, 0x47, - 0x81, 0xd4, 0xba, 0xb3, 0xa6, 0x40, 0xc6, 0xd7, 0xc5, 0x54, 0x12, 0xa3, 0x52, 0xed, 0x22, 0xa4, - 0x40, 0xac, 0xc3, 0x55, 0x85, 0x6f, 0x56, 0x57, 0x38, 0x21, 0x90, 0x0a, 0xb3, 0x47, 0xe3, 0x3b, - 0x74, 0x19, 0xd8, 0xb3, 0x1a, 0x3e, 0xa5, 0x24, 0xa0, 0x89, 0x58, 0xac, 0x09, 0xdf, 0x43, 0x68, - 0x32, 0xa1, 0xfa, 0x19, 0xc8, 0x7c, 0xcd, 0x94, 0xe3, 0x6c, 0x66, 0xe3, 0x6c, 0xca, 0x69, 0x52, - 0xe3, 0x6c, 0xee, 0x91, 0x90, 0x2a, 0x5a, 0xa7, 0x80, 0x34, 0x9e, 0x6a, 0x48, 0x9f, 0x4d, 0xaf, - 0xca, 0xbb, 0x83, 0xd6, 0xba, 0x72, 0x09, 0xfa, 0xb8, 0xd1, 0xba, 0x5e, 0x5d, 0x5b, 0x9b, 0x05, - 0x74, 0x48, 0x03, 0x49, 0xe1, 0xe4, 0x38, 0xfc, 0xc9, 0x1c, 0x9d, 0xd7, 0x17, 0xea, 0x94, 0xf9, - 0x4b, 0x42, 0x1f, 0xa0, 0x26, 0xe8, 0xbc, 0x17, 0x31, 0xd2, 0x8b, 0x9e, 0xd0, 0xe0, 0x05, 0x4e, - 0x10, 0x6f, 0xa2, 0xd5, 0x38, 0xe1, 0x03, 0x0a, 0x02, 0xd6, 0x1d, 0xf9, 0x62, 0x3c, 0x5d, 0x45, - 0x57, 0x2b, 0x39, 0x55, 0x0b, 0x0e, 0xd0, 0x66, 0x27, 0xdf, 0x75, 0x4f, 0x77, 0xd6, 0xb8, 0x33, - 0x43, 0x8f, 0x77, 0x10, 0x82, 0x4b, 0x2a, 0xc9, 0x64, 0x5b, 0x1a, 0x63, 0xb2, 0xf1, 0xfd, 0x1d, - 0xd8, 0xe6, 0xc7, 0xd9, 0xb3, 0x53, 0x87, 0x25, 0x80, 0x7e, 0x8e, 0xce, 0x27, 0xe4, 0xb1, 0x3b, - 0x71, 0x02, 0xfd, 0xac, 0xea, 0x6a, 0x0e, 0x2f, 0x59, 0x46, 0xc6, 0xe1, 0x90, 0xc7, 0x77, 0xc7, - 0x6b, 0xce, 0x4b, 0x49, 0xf1, 0x15, 0x1f, 0x20, 0xec, 0xa5, 0xbe, 0x2b, 0x8e, 0xbc, 0x7e, 0x24, - 0x44, 0xc4, 0x99, 0x7b, 0x48, 0x47, 0xfa, 0xb9, 0x29, 0xce, 0xb2, 0x8d, 0x0d, 0x6c, 0xf3, 0xe1, - 0x38, 0xfe, 0x3e, 0x1d, 0x39, 0x17, 0xbc, 0xd4, 0x2f, 0xad, 0xe0, 0x8f, 0xd0, 0xcb, 0xe0, 0xb4, - 0x6e, 0x3a, 0x74, 0x23, 0xe6, 0x7a, 0x3d, 0xee, 0x1f, 0xea, 0xab, 0xc0, 0x7a, 0xc5, 0x9c, 0xb8, - 0xb2, 0x29, 0xdd, 0x7a, 0x7f, 0xb8, 0x97, 0x05, 0x3b, 0xe7, 0x01, 0xb3, 0x3f, 0x6c, 0xb3, 0xdd, - 0x0c, 0x80, 0xef, 0xa3, 0x4b, 0x92, 0x45, 0xce, 0x53, 0xc6, 0x04, 0x9d, 0xd0, 0x6b, 0xc0, 0xa4, - 0x17, 0x99, 0xa4, 0xbf, 0x9b, 0x92, 0x08, 0x03, 0x4c, 0x4e, 0x63, 0x9b, 0x41, 0x13, 0xf1, 0x97, - 0x48, 0xae, 0x4a, 0x0a, 0x57, 0x50, 0xd2, 0xa3, 0x81, 0xbe, 0x06, 0x4c, 0x37, 0xff, 0xc7, 0x97, - 0x32, 0x0c, 0x30, 0x3c, 0x04, 0x84, 0x73, 0x21, 0x9e, 0x5a, 0xc1, 0xdf, 0xe4, 0x32, 0x15, 0x73, - 0xd6, 0x89, 0x34, 0xa5, 0x81, 0xbe, 0x0e, 0xd7, 0xe6, 0x46, 0x75, 0x1b, 0xf7, 0x13, 0xc2, 0x04, - 0xf1, 0xb3, 0x11, 0x87, 0x61, 0xb9, 0x58, 0xe0, 0xce, 0x59, 0x5a, 0x7f, 0xd5, 0xd0, 0x2a, 0x0c, - 0x2a, 0xfe, 0x51, 0x43, 0x35, 0xe9, 0x93, 0xf8, 0x56, 0xb5, 0xe2, 0x59, 0x7b, 0x6e, 0xbc, 0xb5, - 0x64, 0xb4, 0x1c, 0x7b, 0x63, 0xfb, 0x87, 0x3f, 0xfe, 0xfd, 0xf9, 0x8c, 0x81, 0xb7, 0xac, 0xf9, - 0xdf, 0x85, 0x81, 0xad, 0x3e, 0x1f, 0xf8, 0x17, 0x0d, 0xd5, 0xc7, 0x1e, 0x8c, 0xad, 0x05, 0x69, - 0xa6, 0x6d, 0xbc, 0xf1, 0xf6, 0xf2, 0x80, 0xe5, 0xa5, 0xc1, 0x3d, 0x15, 0xf8, 0xd7, 0x5c, 0x1a, - 0xdc, 0x9b, 0xa5, 0xa4, 0x15, 0xfc, 0x64, 0x39, 0x69, 0x45, 0xb3, 0x30, 0xde, 0x03, 0x69, 0x36, - 0xb6, 0x16, 0x48, 0x83, 0x5b, 0x6f, 0x7d, 0x9b, 0xbb, 0xd5, 0xf7, 0xf8, 0x37, 0x0d, 0x6d, 0x14, - 0x0c, 0x18, 0xdb, 0x0b, 0x52, 0xcf, 0x7e, 0x2b, 0x1a, 0xad, 0x17, 0x81, 0x28, 0xbd, 0xef, 0x82, - 0x5e, 0x13, 0xdf, 0xaa, 0xd6, 0xab, 0x7c, 0xbc, 0x28, 0xf6, 0x77, 0x0d, 0xe1, 0x59, 0xc7, 0xc4, - 0xef, 0x2f, 0x10, 0x50, 0x69, 0xdc, 0x8d, 0x9d, 0x53, 0x20, 0x55, 0x05, 0x77, 0xa0, 0x82, 0x0f, - 0xf0, 0x4e, 0x75, 0x05, 0xf3, 0xec, 0xbb, 0x50, 0xce, 0xee, 0x17, 0xcf, 0x8e, 0x9b, 0xda, 0xf3, - 0xe3, 0xa6, 0xf6, 0xcf, 0x71, 0x53, 0xfb, 0xe9, 0xa4, 0xb9, 0xf2, 0xfc, 0xa4, 0xb9, 0xf2, 0xe7, - 0x49, 0x73, 0xe5, 0xab, 0xdb, 0x61, 0x94, 0x76, 0x8f, 0x3c, 0xd3, 0xe7, 0xfd, 0x9c, 0x1e, 0x60, - 0xe3, 0x5c, 0xc3, 0xa9, 0x6c, 0xe0, 0x65, 0x5e, 0x0d, 0xfe, 0x26, 0xbd, 0xf3, 0x5f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x41, 0xba, 0x43, 0xe4, 0x0b, 0x0b, 0x00, 0x00, + // 1064 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xce, 0xb6, 0x4d, 0x62, 0x4f, 0x44, 0x54, 0xa6, 0x29, 0xdd, 0x6e, 0xc0, 0x0d, 0x8b, 0xd4, + 0xa6, 0x55, 0xd9, 0xc5, 0xa6, 0x11, 0x44, 0x48, 0x48, 0x75, 0x4b, 0xc1, 0x14, 0x95, 0x74, 0x9b, + 0x48, 0x08, 0x81, 0x56, 0xb3, 0xeb, 0xb1, 0xbd, 0x8a, 0x3d, 0xb3, 0xdd, 0x59, 0xbb, 0x76, 0x4b, + 0x6f, 0x78, 0x01, 0x90, 0xb8, 0xe1, 0x09, 0x82, 0xc4, 0x93, 0x14, 0x89, 0x8b, 0x4a, 0xdc, 0x70, + 0x85, 0x50, 0xc2, 0x83, 0xa0, 0x3d, 0x33, 0x6b, 0xef, 0xfa, 0x07, 0x3b, 0x15, 0x37, 0x91, 0x77, + 0xe6, 0x7c, 0xdf, 0xf9, 0xce, 0x99, 0xf3, 0x13, 0xf4, 0xb6, 0x47, 0xbc, 0x41, 0x9b, 0x33, 0xfb, + 0x29, 0x67, 0xd4, 0xe7, 0xcc, 0x0f, 0x68, 0xd4, 0xa4, 0xf6, 0xe3, 0x2e, 0x8d, 0x06, 0x56, 0x18, + 0xf1, 0x98, 0x63, 0x5d, 0x99, 0x58, 0x39, 0x13, 0xab, 0x57, 0x36, 0x36, 0x9a, 0xbc, 0xc9, 0xc1, + 0xc8, 0x4e, 0x7e, 0x49, 0x7b, 0xe3, 0xcd, 0x26, 0xe7, 0xcd, 0x36, 0xb5, 0x49, 0x18, 0xd8, 0x84, + 0x31, 0x1e, 0x93, 0x38, 0xe0, 0x4c, 0xa4, 0xb7, 0x31, 0x65, 0x75, 0x1a, 0x75, 0x02, 0x16, 0xdb, + 0xf1, 0x20, 0xa4, 0x42, 0xfe, 0x55, 0xb7, 0x6f, 0x65, 0x6e, 0xfd, 0x68, 0x10, 0xc6, 0xdc, 0x0e, + 0x23, 0xce, 0x1b, 0xea, 0xfa, 0x86, 0xcf, 0x45, 0x87, 0x0b, 0xdb, 0x23, 0x42, 0x69, 0xb4, 0x7b, + 0x65, 0x8f, 0xc6, 0xa4, 0x6c, 0x87, 0xa4, 0x19, 0x30, 0xf0, 0xa4, 0x6c, 0x4b, 0x69, 0x64, 0x5e, + 0xec, 0xfb, 0x2d, 0xea, 0x1f, 0x86, 0x1c, 0x7c, 0xf6, 0xd5, 0xfd, 0xf5, 0xe9, 0xf7, 0xb9, 0x2f, + 0x65, 0x3a, 0x4c, 0xd2, 0xe8, 0x26, 0x60, 0xcd, 0x6c, 0x92, 0x8c, 0xab, 0xd3, 0x4d, 0x26, 0xa8, + 0xcc, 0xd4, 0x8e, 0x86, 0xdc, 0x6f, 0x25, 0x26, 0xbd, 0xf2, 0xf0, 0xf7, 0xb8, 0x4d, 0xfe, 0x4d, + 0x42, 0x12, 0x91, 0x8e, 0x18, 0x57, 0x9f, 0xb7, 0xc9, 0x3f, 0x11, 0x98, 0x9a, 0x1b, 0x08, 0x3f, + 0x4c, 0x94, 0xee, 0x01, 0xde, 0xa1, 0x8f, 0xbb, 0x54, 0xc4, 0xe6, 0x01, 0xba, 0x90, 0x3b, 0x15, + 0x21, 0x67, 0x82, 0xe2, 0x8f, 0xd1, 0x8a, 0xf4, 0xa3, 0x6b, 0x5b, 0xda, 0xf6, 0x5a, 0x65, 0xcb, + 0x9a, 0xf5, 0xfa, 0x96, 0x44, 0x56, 0xcf, 0xbd, 0xf8, 0xeb, 0xca, 0x92, 0xa3, 0x50, 0xe6, 0x25, + 0x74, 0x11, 0x68, 0xef, 0xb4, 0x48, 0xc0, 0xbe, 0x08, 0x44, 0x9c, 0xfa, 0xdb, 0x41, 0x6f, 0x8c, + 0x5f, 0x28, 0x97, 0x9b, 0xa8, 0xe8, 0x27, 0x87, 0x6e, 0x50, 0x4f, 0xbc, 0x9e, 0xdd, 0x2e, 0x3a, + 0x05, 0x38, 0xa8, 0xd5, 0x85, 0x59, 0xc9, 0xf2, 0xd5, 0x58, 0x83, 0x2b, 0x3e, 0x7c, 0x19, 0x15, + 0x52, 0x14, 0x48, 0x2d, 0x3a, 0xab, 0x0a, 0x64, 0x7e, 0x93, 0x75, 0x25, 0x31, 0xca, 0x55, 0x15, + 0x21, 0x05, 0x62, 0x0d, 0xae, 0x22, 0x7c, 0x67, 0x76, 0x84, 0x23, 0x02, 0xa9, 0x30, 0xf9, 0x69, + 0xee, 0x23, 0x03, 0xd8, 0x3f, 0x49, 0x1e, 0x6d, 0x42, 0xd6, 0x26, 0x2a, 0xc2, 0x6b, 0xba, 0xac, + 0xdb, 0x01, 0x07, 0xe7, 0x9c, 0x02, 0x1c, 0x3c, 0xe8, 0x76, 0x72, 0x9a, 0xcf, 0xe4, 0x35, 0x13, + 0xb4, 0x39, 0x95, 0xf5, 0x7f, 0x14, 0xfe, 0x1d, 0xba, 0x04, 0x2e, 0x92, 0xe4, 0x7f, 0x46, 0x49, + 0x9d, 0x46, 0x62, 0x7e, 0x32, 0xf1, 0x3d, 0x84, 0x46, 0xad, 0x05, 0xaa, 0xd7, 0x2a, 0x57, 0x2d, + 0xd9, 0x87, 0x56, 0xd2, 0x87, 0x96, 0x6c, 0x03, 0xd5, 0x87, 0xd6, 0x1e, 0x69, 0x52, 0x45, 0xeb, + 0x64, 0x90, 0xe6, 0x91, 0x86, 0xf4, 0x49, 0xf7, 0x2a, 0xbc, 0xdb, 0x68, 0xb5, 0x25, 0x8f, 0xa0, + 0x00, 0xd6, 0x2a, 0xd7, 0x66, 0xc7, 0x56, 0x63, 0x75, 0xda, 0xa7, 0x75, 0x49, 0xe1, 0xa4, 0x38, + 0xfc, 0xe9, 0x14, 0x9d, 0xd7, 0xe6, 0xea, 0x94, 0xfe, 0x73, 0x42, 0x1f, 0xa2, 0x12, 0xe8, 0xbc, + 0x17, 0x30, 0xd2, 0x0e, 0x9e, 0xd2, 0xfa, 0x29, 0x4a, 0x0f, 0x6f, 0xa0, 0xe5, 0x30, 0xe2, 0x3d, + 0x0a, 0x02, 0x0a, 0x8e, 0xfc, 0x30, 0x8f, 0x96, 0xd1, 0x95, 0x99, 0x9c, 0x2a, 0x05, 0x07, 0x68, + 0xa3, 0x91, 0xde, 0xba, 0xaf, 0xf6, 0xd6, 0xb8, 0x31, 0x41, 0x8f, 0x77, 0x11, 0x92, 0xf5, 0x08, + 0x64, 0x32, 0x2d, 0xc6, 0x90, 0x6c, 0x38, 0x78, 0x7a, 0x65, 0x0b, 0x2a, 0xcf, 0x91, 0xd5, 0x0b, + 0xd0, 0x07, 0x68, 0x3d, 0x22, 0x4f, 0xdc, 0xd1, 0x08, 0xd3, 0xcf, 0xaa, 0xac, 0xa6, 0xf0, 0xdc, + 0xac, 0x4b, 0x38, 0x1c, 0xf2, 0xe4, 0xce, 0xf0, 0xcc, 0x79, 0x2d, 0xca, 0x7e, 0xe2, 0x03, 0x84, + 0xbd, 0xd8, 0x77, 0x45, 0xd7, 0xeb, 0x04, 0x42, 0x04, 0x9c, 0xb9, 0x87, 0x74, 0xa0, 0x9f, 0x1b, + 0xe3, 0xcc, 0xcf, 0xdf, 0x5e, 0xd9, 0x7a, 0x34, 0xb4, 0xbf, 0x4f, 0x07, 0xce, 0x79, 0x2f, 0xf6, + 0x73, 0x27, 0xf8, 0x2e, 0x7a, 0x1d, 0x56, 0x84, 0x1b, 0xf7, 0xdd, 0x80, 0xb9, 0x5e, 0x9b, 0xfb, + 0x87, 0xfa, 0x32, 0xb0, 0x5e, 0xb6, 0x46, 0xeb, 0xc4, 0x92, 0x6b, 0x66, 0xbf, 0xbf, 0x97, 0x18, + 0x3b, 0xeb, 0x80, 0xd9, 0xef, 0xd7, 0x58, 0x35, 0x01, 0xe0, 0xfb, 0xe8, 0xa2, 0x64, 0x91, 0xf5, + 0x94, 0x30, 0x41, 0x26, 0xf4, 0x15, 0x60, 0xd2, 0xb3, 0x4c, 0x72, 0x31, 0x59, 0x92, 0x08, 0x03, + 0x4c, 0x56, 0x63, 0x8d, 0x41, 0x12, 0xf1, 0x57, 0x48, 0x9e, 0x4a, 0x0a, 0x57, 0x50, 0xd2, 0xa6, + 0x75, 0x7d, 0x15, 0x98, 0x6e, 0xfc, 0xc7, 0x40, 0x4d, 0x30, 0xc0, 0xf0, 0x08, 0x10, 0xce, 0xf9, + 0x70, 0xec, 0x04, 0x7f, 0x9b, 0xca, 0x54, 0xcc, 0x49, 0x26, 0xe2, 0x98, 0xd6, 0xf5, 0x02, 0xb4, + 0xcd, 0xf5, 0xd9, 0x69, 0xdc, 0x8f, 0x08, 0x13, 0xc4, 0x4f, 0x4a, 0x1c, 0x8a, 0xe5, 0x42, 0x86, + 0x3b, 0x65, 0xa9, 0x1c, 0x15, 0xd0, 0x32, 0x14, 0x2a, 0xfe, 0x41, 0x43, 0x2b, 0x72, 0xc0, 0xe3, + 0x9b, 0xb3, 0x15, 0x4f, 0xee, 0x15, 0xe3, 0xdd, 0x05, 0xad, 0x65, 0xd9, 0x9b, 0xdb, 0xdf, 0xff, + 0xf1, 0xcf, 0x4f, 0x67, 0x4c, 0xbc, 0x65, 0x4f, 0x5f, 0x68, 0xbd, 0xb2, 0xda, 0x7b, 0xf8, 0x67, + 0x0d, 0x15, 0x87, 0xcb, 0x03, 0xdb, 0x73, 0xdc, 0x8c, 0xef, 0x1f, 0xe3, 0xbd, 0xc5, 0x01, 0x8b, + 0x4b, 0x83, 0x3e, 0x15, 0xf8, 0x97, 0x54, 0x1a, 0xf4, 0xcd, 0x42, 0xd2, 0x32, 0xf3, 0x64, 0x31, + 0x69, 0xd9, 0x61, 0x61, 0x7e, 0x00, 0xd2, 0xca, 0xd8, 0x9e, 0x23, 0x0d, 0xba, 0xde, 0x7e, 0x96, + 0x4e, 0xab, 0xe7, 0xf8, 0x37, 0x0d, 0xad, 0xe7, 0x57, 0x0c, 0xbe, 0x35, 0xc7, 0xfb, 0xd4, 0x3d, + 0x67, 0xec, 0x9c, 0x12, 0xa5, 0x84, 0x7f, 0x0e, 0xc2, 0xef, 0xe2, 0xea, 0x29, 0x85, 0xcb, 0x7f, + 0x91, 0x84, 0xfd, 0x6c, 0xb8, 0x5c, 0x9f, 0xe3, 0x5f, 0x35, 0xb4, 0x96, 0x59, 0x26, 0xb8, 0x3c, + 0x47, 0xd2, 0xe4, 0xde, 0x33, 0x2a, 0xa7, 0x81, 0xa8, 0x10, 0x6e, 0x41, 0x08, 0x16, 0xbe, 0x39, + 0x3b, 0x04, 0xb5, 0x93, 0xb2, 0x89, 0xff, 0x5d, 0x43, 0x78, 0x72, 0xfa, 0xe3, 0x0f, 0xe7, 0x08, + 0x98, 0xb9, 0x84, 0x8c, 0xdd, 0x57, 0x40, 0xaa, 0x08, 0x6e, 0x43, 0x04, 0x1f, 0xe1, 0xdd, 0xd9, + 0x11, 0x4c, 0x5b, 0x45, 0x99, 0x70, 0xaa, 0x5f, 0xbe, 0x38, 0x2e, 0x69, 0x2f, 0x8f, 0x4b, 0xda, + 0xdf, 0xc7, 0x25, 0xed, 0xc7, 0x93, 0xd2, 0xd2, 0xcb, 0x93, 0xd2, 0xd2, 0x9f, 0x27, 0xa5, 0xa5, + 0xaf, 0x77, 0x9a, 0x41, 0xdc, 0xea, 0x7a, 0x96, 0xcf, 0x3b, 0x29, 0x3d, 0xc0, 0x86, 0xbe, 0xfa, + 0x63, 0xde, 0x60, 0x2e, 0x7b, 0x2b, 0xf0, 0xbf, 0xea, 0xfb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x39, 0xca, 0xb6, 0x51, 0x90, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -664,6 +770,8 @@ type QueryClient interface { ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) // ChainInfo queries the latest info of a chain in Babylon's view ChainInfo(ctx context.Context, in *QueryChainInfoRequest, opts ...grpc.CallOption) (*QueryChainInfoResponse, error) + // EpochChainInfo queries the latest info of a chain in a given epoch of Babylon's view + EpochChainInfo(ctx context.Context, in *QueryEpochChainInfoRequest, opts ...grpc.CallOption) (*QueryEpochChainInfoResponse, error) // ListHeaders queries the headers of a chain in Babylon's view, with pagination support ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs @@ -705,6 +813,15 @@ func (c *queryClient) ChainInfo(ctx context.Context, in *QueryChainInfoRequest, return out, nil } +func (c *queryClient) EpochChainInfo(ctx context.Context, in *QueryEpochChainInfoRequest, opts ...grpc.CallOption) (*QueryEpochChainInfoResponse, error) { + out := new(QueryEpochChainInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/EpochChainInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) { out := new(QueryListHeadersResponse) err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ListHeaders", in, out, opts...) @@ -731,6 +848,8 @@ type QueryServer interface { ChainList(context.Context, *QueryChainListRequest) (*QueryChainListResponse, error) // ChainInfo queries the latest info of a chain in Babylon's view ChainInfo(context.Context, *QueryChainInfoRequest) (*QueryChainInfoResponse, error) + // EpochChainInfo queries the latest info of a chain in a given epoch of Babylon's view + EpochChainInfo(context.Context, *QueryEpochChainInfoRequest) (*QueryEpochChainInfoResponse, error) // ListHeaders queries the headers of a chain in Babylon's view, with pagination support ListHeaders(context.Context, *QueryListHeadersRequest) (*QueryListHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs @@ -750,6 +869,9 @@ func (*UnimplementedQueryServer) ChainList(ctx context.Context, req *QueryChainL func (*UnimplementedQueryServer) ChainInfo(ctx context.Context, req *QueryChainInfoRequest) (*QueryChainInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ChainInfo not implemented") } +func (*UnimplementedQueryServer) EpochChainInfo(ctx context.Context, req *QueryEpochChainInfoRequest) (*QueryEpochChainInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EpochChainInfo not implemented") +} func (*UnimplementedQueryServer) ListHeaders(ctx context.Context, req *QueryListHeadersRequest) (*QueryListHeadersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListHeaders not implemented") } @@ -815,6 +937,24 @@ func _Query_ChainInfo_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _Query_EpochChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEpochChainInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EpochChainInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/EpochChainInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EpochChainInfo(ctx, req.(*QueryEpochChainInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Query_ListHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryListHeadersRequest) if err := dec(in); err != nil { @@ -867,6 +1007,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "ChainInfo", Handler: _Query_ChainInfo_Handler, }, + { + MethodName: "EpochChainInfo", + Handler: _Query_EpochChainInfo_Handler, + }, { MethodName: "ListHeaders", Handler: _Query_ListHeaders_Handler, @@ -1056,6 +1200,76 @@ func (m *QueryChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *QueryEpochChainInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochChainInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochChainInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.EpochNum != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryEpochChainInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochChainInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ChainInfo != nil { + { + size, err := m.ChainInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *QueryListHeadersRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1389,6 +1603,35 @@ func (m *QueryChainInfoResponse) Size() (n int) { return n } +func (m *QueryEpochChainInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEpochChainInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChainInfo != nil { + l = m.ChainInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + func (m *QueryListHeadersRequest) Size() (n int) { if m == nil { return 0 @@ -1923,6 +2166,193 @@ func (m *QueryChainInfoResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryEpochChainInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochChainInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochChainInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEpochChainInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochChainInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochChainInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ChainInfo == nil { + m.ChainInfo = &ChainInfo{} + } + if err := m.ChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryListHeadersRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go index 7797403d4..96e69913c 100644 --- a/x/zoneconcierge/types/query.pb.gw.go +++ b/x/zoneconcierge/types/query.pb.gw.go @@ -123,6 +123,82 @@ func local_request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Mars } +func request_Query_EpochChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochChainInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := client.EpochChainInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_EpochChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochChainInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := server.EpochChainInfo(ctx, &protoReq) + return msg, metadata, err + +} + var ( filter_Query_ListHeaders_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) @@ -342,6 +418,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_EpochChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_EpochChainInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochChainInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -489,6 +588,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_EpochChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_EpochChainInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochChainInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -539,6 +658,8 @@ var ( pattern_Query_ChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_EpochChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_ListHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_FinalizedChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) @@ -551,6 +672,8 @@ var ( forward_Query_ChainInfo_0 = runtime.ForwardResponseMessage + forward_Query_EpochChainInfo_0 = runtime.ForwardResponseMessage + forward_Query_ListHeaders_0 = runtime.ForwardResponseMessage forward_Query_FinalizedChainInfo_0 = runtime.ForwardResponseMessage From 84de8f6343f9113becd35e48f06e023070b1d6a8 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Fri, 6 Jan 2023 12:16:51 +1100 Subject: [PATCH 08/37] zoneconcierge: API for querying headers in a given epoch (#261) --- client/docs/swagger-ui/swagger.yaml | 464 ++++++++++++++ proto/babylon/zoneconcierge/query.proto | 16 + .../keeper/canonical_chain_indexer_test.go | 3 +- .../keeper/epoch_chain_info_indexer.go | 40 ++ .../keeper/epoch_chain_info_indexer_test.go | 69 ++- x/zoneconcierge/keeper/fork_indexer_test.go | 3 +- x/zoneconcierge/keeper/grpc_query.go | 26 +- x/zoneconcierge/keeper/grpc_query_test.go | 85 ++- x/zoneconcierge/keeper/keeper_test.go | 8 +- .../keeper/proof_tx_in_block_test.go | 4 +- x/zoneconcierge/keeper/query_kvstore_test.go | 4 +- x/zoneconcierge/types/errors.go | 7 +- x/zoneconcierge/types/query.pb.go | 570 +++++++++++++++--- x/zoneconcierge/types/query.pb.gw.go | 123 ++++ 14 files changed, 1335 insertions(+), 87 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 58561db06..b9301446d 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -11590,6 +11590,203 @@ definitions: - The validator set is committed to the `app_hash` of the sealer header babylon.zoneconcierge.v1.QueryChainInfoResponse: + type: object + properties: + chain_info: + title: chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in the canonical chain of CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header + + (babylon_block_height, babylon_tx_hash) jointly provides the position + of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + babylon.zoneconcierge.v1.Params: + type: object + description: Params defines the parameters for the module. + babylon.zoneconcierge.v1.ProofEpochSealed: + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and its + bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on `last_commit_hash` + of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator set is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + title: >- + ProofEpochSealed is the proof that an epoch is sealed by the sealer + header, i.e., the 2nd header of the next epoch + + With the access of metadata + + - Metadata of this epoch, which includes the sealer header + + - Raw checkpoint of this epoch + + The verifier can perform the following verification rules: + + - The raw checkpoint's `last_commit_hash` is same as in the sealer header + + - More than 1/3 (in voting power) validators in the validator set of this + epoch have signed `last_commit_hash` of the sealer header + + - The epoch medatata is committed to the `app_hash` of the sealer header + + - The validator set is committed to the `app_hash` of the sealer header + babylon.zoneconcierge.v1.QueryChainInfoResponse: type: object properties: chain_info: @@ -12867,6 +13064,273 @@ definitions: description: >- QueryFinalizedChainInfoResponse is response type for the Query/FinalizedChainInfo RPC method. + babylon.zoneconcierge.v1.QueryListEpochHeadersResponse: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header + + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + description: >- + QueryListEpochHeadersResponse is response type for the + Query/ListEpochHeaders RPC method. + babylon.zoneconcierge.v1.QueryListHeadersResponse: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header + + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + description: >- + QueryFinalizedChainInfoResponse is response type for the + Query/FinalizedChainInfo RPC method. babylon.zoneconcierge.v1.QueryListHeadersResponse: type: object properties: diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 67342d490..374943457 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -38,6 +38,10 @@ service Query { rpc ListHeaders(QueryListHeadersRequest) returns (QueryListHeadersResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/headers/{chain_id}"; } + // ListEpochHeaders queries the headers of a chain timestamped in a given epoch of Babylon, with pagination support + rpc ListEpochHeaders(QueryListEpochHeadersRequest) returns (QueryListEpochHeadersResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/headers/{chain_id}/epochs/{epoch_num}"; + } // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs rpc FinalizedChainInfo(QueryFinalizedChainInfoRequest) returns (QueryFinalizedChainInfoResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}"; @@ -99,6 +103,18 @@ message QueryListHeadersResponse { cosmos.base.query.v1beta1.PageResponse pagination = 2; } +// QueryListEpochHeadersRequest is request type for the Query/ListEpochHeaders RPC method. +message QueryListEpochHeadersRequest { + uint64 epoch_num = 1; + string chain_id = 2; +} + +// QueryListEpochHeadersResponse is response type for the Query/ListEpochHeaders RPC method. +message QueryListEpochHeadersResponse { + // headers is the list of headers + repeated babylon.zoneconcierge.v1.IndexedHeader headers = 1; +} + // QueryFinalizedChainInfoRequest is request type for the Query/FinalizedChainInfo RPC method. message QueryFinalizedChainInfoRequest { // chain_id is the ID of the CZ diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go index d6041daf6..7f7d8f53a 100644 --- a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go +++ b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go @@ -14,7 +14,8 @@ func FuzzCanonicalChainIndexer(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go index 6d1d550a0..121c8bcef 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go @@ -1,6 +1,7 @@ package keeper import ( + bbn "github.com/babylonchain/babylon/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" @@ -19,6 +20,45 @@ func (k Keeper) GetEpochChainInfo(ctx sdk.Context, chainID string, epochNumber u return &chainInfo, nil } +// GetEpochHeaders gets the headers timestamped in a given epoch, in the ascending order +func (k Keeper) GetEpochHeaders(ctx sdk.Context, chainID string, epochNumber uint64) ([]*types.IndexedHeader, error) { + headers := []*types.IndexedHeader{} + + // find the last timestamped header of this chain in the epoch + epochChainInfo, err := k.GetEpochChainInfo(ctx, chainID, epochNumber) + if err != nil { + return nil, err + } + // it's possible that this epoch's snapshot is not updated for many epochs + // this implies that this epoch does not timestamp any header for this chain at all + if epochChainInfo.LatestHeader.BabylonEpoch < epochNumber { + return nil, types.ErrEpochHeadersNotFound + } + // now we have the last header in this epoch + headers = append(headers, epochChainInfo.LatestHeader) + + // append all previous headers until reaching the previous epoch + canonicalChainStore := k.canonicalChainStore(ctx, chainID) + lastHeaderKey := sdk.Uint64ToBigEndian(epochChainInfo.LatestHeader.Height) + // NOTE: even in ReverseIterator, start and end should still be specified in ascending order + canonicalChainIter := canonicalChainStore.ReverseIterator(nil, lastHeaderKey) + defer canonicalChainIter.Close() + for ; canonicalChainIter.Valid(); canonicalChainIter.Next() { + var prevHeader types.IndexedHeader + k.cdc.MustUnmarshal(canonicalChainIter.Value(), &prevHeader) + if prevHeader.BabylonEpoch < epochNumber { + // we have reached the previous epoch, break the loop + break + } + headers = append(headers, &prevHeader) + } + + // reverse the list so that it remains ascending order + bbn.Reverse(headers) + + return headers, nil +} + // recordEpochChainInfo records the chain info for a given epoch number of given chain ID // where the latest chain info is retrieved from the chain info indexer func (k Keeper) recordEpochChainInfo(ctx sdk.Context, chainID string, epochNumber uint64) { diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go index dcc5bdd49..82ced134b 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/babylonchain/babylon/testutil/datagen" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" "github.com/stretchr/testify/require" ) @@ -14,7 +15,8 @@ func FuzzEpochChainInfoIndexer(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() @@ -35,3 +37,68 @@ func FuzzEpochChainInfoIndexer(f *testing.F) { require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers))) }) } + +func FuzzGetEpochHeaders(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + epochingKeeper := babylonApp.EpochingKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + numReqs := datagen.RandomInt(5) + 1 + + epochNumList := []uint64{datagen.RandomInt(10) + 1} + nextHeightList := []uint64{0} + numHeadersList := []uint64{} + expectedHeadersMap := map[uint64][]*ibctmtypes.Header{} + numForkHeadersList := []uint64{} + + // we test the scenario of ending an epoch for multiple times, in order to ensure that + // consecutive epoch infos do not affect each other. + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // enter a random epoch + if i == 0 { + for j := uint64(0); j < epochNum; j++ { + epochingKeeper.IncEpoch(ctx) + } + } else { + for j := uint64(0); j < epochNum-epochNumList[i-1]; j++ { + epochingKeeper.IncEpoch(ctx) + } + } + + // generate a random number of headers and fork headers + numHeadersList = append(numHeadersList, datagen.RandomInt(100)+1) + numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(10)+1) + // trigger hooks to append these headers and fork headers + expectedHeaders, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) + expectedHeadersMap[epochNum] = expectedHeaders + // prepare nextHeight for the next request + nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) + + // simulate the scenario that a random epoch has ended + hooks.AfterEpochEnds(ctx, epochNum) + // prepare epochNum for the next request + epochNumList = append(epochNumList, epochNum+datagen.RandomInt(10)+1) + } + + // attest the correctness of epoch info for each tested epoch + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // check if the headers are same as expected + headers, err := zcKeeper.GetEpochHeaders(ctx, czChain.ChainID, epochNum) + require.NoError(t, err) + require.Equal(t, len(expectedHeadersMap[epochNum]), len(headers)) + for j := 0; j < len(expectedHeadersMap[epochNum]); j++ { + require.Equal(t, expectedHeadersMap[epochNum][j].Header.LastCommitHash, headers[j].Hash) + } + } + }) +} diff --git a/x/zoneconcierge/keeper/fork_indexer_test.go b/x/zoneconcierge/keeper/fork_indexer_test.go index 42c65f29e..1f48a4420 100644 --- a/x/zoneconcierge/keeper/fork_indexer_test.go +++ b/x/zoneconcierge/keeper/fork_indexer_test.go @@ -14,7 +14,8 @@ func FuzzForkIndexer(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index 205453dc9..ea8190fe4 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -97,6 +97,30 @@ func (k Keeper) ListHeaders(c context.Context, req *types.QueryListHeadersReques return resp, nil } +// ListEpochHeaders returns all headers of a chain with given ID +// TODO: support pagination in this RPC +func (k Keeper) ListEpochHeaders(c context.Context, req *types.QueryListEpochHeadersRequest) (*types.QueryListEpochHeadersResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + + headers, err := k.GetEpochHeaders(ctx, req.ChainId, req.EpochNum) + if err != nil { + return nil, err + } + + resp := &types.QueryListEpochHeadersResponse{ + Headers: headers, + } + return resp, nil +} + func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedChainInfoRequest) (*types.QueryFinalizedChainInfoResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") @@ -122,7 +146,7 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC // It's possible that the chain info's epoch is way before the last finalised epoch // e.g., when there is no relayer for many epochs - // NOTE: if an epoch is finalisedm then all of its previous epochs are also finalised + // NOTE: if an epoch is finalised then all of its previous epochs are also finalised if chainInfo.LatestHeader.BabylonEpoch < finalizedEpoch { finalizedEpoch = chainInfo.LatestHeader.BabylonEpoch } diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index ad13d2a3f..8b13a9f5f 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -11,6 +11,7 @@ import ( checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" zctypes "github.com/babylonchain/babylon/x/zoneconcierge/types" "github.com/cosmos/cosmos-sdk/types/query" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" @@ -24,7 +25,8 @@ func FuzzChainList(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, _, zcKeeper := SetupTest(t) + _, babylonChain, _, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() @@ -66,7 +68,8 @@ func FuzzChainInfo(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() @@ -91,7 +94,8 @@ func FuzzEpochChainInfo(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() @@ -137,7 +141,8 @@ func FuzzListHeaders(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() @@ -164,6 +169,78 @@ func FuzzListHeaders(f *testing.F) { }) } +func FuzzListEpochHeaders(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + epochingKeeper := babylonApp.EpochingKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + numReqs := datagen.RandomInt(5) + 1 + + epochNumList := []uint64{datagen.RandomInt(10) + 1} + nextHeightList := []uint64{0} + numHeadersList := []uint64{} + expectedHeadersMap := map[uint64][]*ibctmtypes.Header{} + numForkHeadersList := []uint64{} + + // we test the scenario of ending an epoch for multiple times, in order to ensure that + // consecutive epoch infos do not affect each other. + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // enter a random epoch + if i == 0 { + for j := uint64(0); j < epochNum; j++ { + epochingKeeper.IncEpoch(ctx) + } + } else { + for j := uint64(0); j < epochNum-epochNumList[i-1]; j++ { + epochingKeeper.IncEpoch(ctx) + } + } + + // generate a random number of headers and fork headers + numHeadersList = append(numHeadersList, datagen.RandomInt(100)+1) + numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(10)+1) + // trigger hooks to append these headers and fork headers + expectedHeaders, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) + expectedHeadersMap[epochNum] = expectedHeaders + // prepare nextHeight for the next request + nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) + + // simulate the scenario that a random epoch has ended + hooks.AfterEpochEnds(ctx, epochNum) + // prepare epochNum for the next request + epochNumList = append(epochNumList, epochNum+datagen.RandomInt(10)+1) + } + + // attest the correctness of epoch info for each tested epoch + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // make request + req := &zctypes.QueryListEpochHeadersRequest{ + ChainId: czChain.ChainID, + EpochNum: epochNum, + } + resp, err := zcKeeper.ListEpochHeaders(ctx, req) + require.NoError(t, err) + + // check if the headers are same as expected + headers := resp.Headers + require.Equal(t, len(expectedHeadersMap[epochNum]), len(headers)) + for j := 0; j < len(expectedHeadersMap[epochNum]); j++ { + require.Equal(t, expectedHeadersMap[epochNum][j].Header.LastCommitHash, headers[j].Hash) + } + } + }) +} + func FuzzFinalizedChainInfo(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go index ff370bb88..1152d6cc8 100644 --- a/x/zoneconcierge/keeper/keeper_test.go +++ b/x/zoneconcierge/keeper/keeper_test.go @@ -13,13 +13,13 @@ import ( ) // SetupTest creates a coordinator with 2 test chains, and a ZoneConcierge keeper. -func SetupTest(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *ibctesting.TestChain, zckeeper.Keeper) { - var zcKeeper zckeeper.Keeper +func SetupTest(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *ibctesting.TestChain, *app.BabylonApp) { + var bbnApp *app.BabylonApp coordinator := ibctesting.NewCoordinator(t, 2) // replace the first test chain with a Babylon chain ibctesting.DefaultTestingAppInit = func() (ibctesting.TestingApp, map[string]json.RawMessage) { babylonApp := app.Setup(t, false) - zcKeeper = babylonApp.ZoneConciergeKeeper + bbnApp = babylonApp encCdc := app.MakeTestEncodingConfig() genesis := app.NewDefaultGenesisState(encCdc.Marshaler) return babylonApp, genesis @@ -30,7 +30,7 @@ func SetupTest(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *i babylonChain := coordinator.GetChain(ibctesting.GetChainID(1)) czChain := coordinator.GetChain(ibctesting.GetChainID(2)) - return coordinator, babylonChain, czChain, zcKeeper + return coordinator, babylonChain, czChain, bbnApp } // SimulateHeadersViaHook generates a non-zero number of canonical headers via the hook diff --git a/x/zoneconcierge/keeper/proof_tx_in_block_test.go b/x/zoneconcierge/keeper/proof_tx_in_block_test.go index 92e91e7a1..11944081a 100644 --- a/x/zoneconcierge/keeper/proof_tx_in_block_test.go +++ b/x/zoneconcierge/keeper/proof_tx_in_block_test.go @@ -31,7 +31,9 @@ func TestProveTxInBlock(t *testing.T) { err = testNetwork.WaitForNextBlock() require.NoError(t, err) - _, babylonChain, _, zcKeeper := SetupTest(t) + _, babylonChain, _, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + ctx := babylonChain.GetContext() // construct client context diff --git a/x/zoneconcierge/keeper/query_kvstore_test.go b/x/zoneconcierge/keeper/query_kvstore_test.go index aaa730ebe..06f619831 100644 --- a/x/zoneconcierge/keeper/query_kvstore_test.go +++ b/x/zoneconcierge/keeper/query_kvstore_test.go @@ -17,7 +17,9 @@ func FuzzQueryStore(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, _, zcKeeper := SetupTest(t) + _, babylonChain, _, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + babylonChain.NextBlock() babylonChain.NextBlock() diff --git a/x/zoneconcierge/types/errors.go b/x/zoneconcierge/types/errors.go index 71cba5771..416c02409 100644 --- a/x/zoneconcierge/types/errors.go +++ b/x/zoneconcierge/types/errors.go @@ -17,7 +17,8 @@ var ( ErrForkNotFound = sdkerrors.Register(ModuleName, 1106, "cannot find fork") ErrInvalidForks = sdkerrors.Register(ModuleName, 1107, "input forks is invalid") ErrEpochChainInfoNotFound = sdkerrors.Register(ModuleName, 1108, "no chain info exists at this epoch") - ErrFinalizedEpochNotFound = sdkerrors.Register(ModuleName, 1109, "cannot find a finalized epoch") - ErrInvalidProofEpochSealed = sdkerrors.Register(ModuleName, 1110, "invalid ProofEpochSealed") - ErrInvalidMerkleProof = sdkerrors.Register(ModuleName, 1111, "invalid Merkle inclusion proof") + ErrEpochHeadersNotFound = sdkerrors.Register(ModuleName, 1109, "no timestamped header exists at this epoch") + ErrFinalizedEpochNotFound = sdkerrors.Register(ModuleName, 1110, "cannot find a finalized epoch") + ErrInvalidProofEpochSealed = sdkerrors.Register(ModuleName, 1111, "invalid ProofEpochSealed") + ErrInvalidMerkleProof = sdkerrors.Register(ModuleName, 1112, "invalid Merkle inclusion proof") ) diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index 32252084d..1e6a5e6ce 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -499,6 +499,105 @@ func (m *QueryListHeadersResponse) GetPagination() *query.PageResponse { return nil } +// QueryListEpochHeadersRequest is request type for the Query/ListEpochHeaders RPC method. +type QueryListEpochHeadersRequest struct { + EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *QueryListEpochHeadersRequest) Reset() { *m = QueryListEpochHeadersRequest{} } +func (m *QueryListEpochHeadersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryListEpochHeadersRequest) ProtoMessage() {} +func (*QueryListEpochHeadersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{10} +} +func (m *QueryListEpochHeadersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryListEpochHeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListEpochHeadersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryListEpochHeadersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListEpochHeadersRequest.Merge(m, src) +} +func (m *QueryListEpochHeadersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryListEpochHeadersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListEpochHeadersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryListEpochHeadersRequest proto.InternalMessageInfo + +func (m *QueryListEpochHeadersRequest) GetEpochNum() uint64 { + if m != nil { + return m.EpochNum + } + return 0 +} + +func (m *QueryListEpochHeadersRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// QueryListEpochHeadersResponse is response type for the Query/ListEpochHeaders RPC method. +type QueryListEpochHeadersResponse struct { + // headers is the list of headers + Headers []*IndexedHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (m *QueryListEpochHeadersResponse) Reset() { *m = QueryListEpochHeadersResponse{} } +func (m *QueryListEpochHeadersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryListEpochHeadersResponse) ProtoMessage() {} +func (*QueryListEpochHeadersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{11} +} +func (m *QueryListEpochHeadersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryListEpochHeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListEpochHeadersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryListEpochHeadersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListEpochHeadersResponse.Merge(m, src) +} +func (m *QueryListEpochHeadersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryListEpochHeadersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListEpochHeadersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryListEpochHeadersResponse proto.InternalMessageInfo + +func (m *QueryListEpochHeadersResponse) GetHeaders() []*IndexedHeader { + if m != nil { + return m.Headers + } + return nil +} + // QueryFinalizedChainInfoRequest is request type for the Query/FinalizedChainInfo RPC method. type QueryFinalizedChainInfoRequest struct { // chain_id is the ID of the CZ @@ -511,7 +610,7 @@ func (m *QueryFinalizedChainInfoRequest) Reset() { *m = QueryFinalizedCh func (m *QueryFinalizedChainInfoRequest) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoRequest) ProtoMessage() {} func (*QueryFinalizedChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{10} + return fileDescriptor_2caab7ee15063236, []int{12} } func (m *QueryFinalizedChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -579,7 +678,7 @@ func (m *QueryFinalizedChainInfoResponse) Reset() { *m = QueryFinalizedC func (m *QueryFinalizedChainInfoResponse) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoResponse) ProtoMessage() {} func (*QueryFinalizedChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{11} + return fileDescriptor_2caab7ee15063236, []int{13} } func (m *QueryFinalizedChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -675,6 +774,8 @@ func init() { proto.RegisterType((*QueryEpochChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryEpochChainInfoResponse") proto.RegisterType((*QueryListHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListHeadersRequest") proto.RegisterType((*QueryListHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListHeadersResponse") + proto.RegisterType((*QueryListEpochHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListEpochHeadersRequest") + proto.RegisterType((*QueryListEpochHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListEpochHeadersResponse") proto.RegisterType((*QueryFinalizedChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoRequest") proto.RegisterType((*QueryFinalizedChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse") } @@ -682,74 +783,77 @@ func init() { func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } var fileDescriptor_2caab7ee15063236 = []byte{ - // 1064 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0x45, - 0x14, 0xce, 0xb6, 0x4d, 0x62, 0x4f, 0x44, 0x54, 0xa6, 0x29, 0xdd, 0x6e, 0xc0, 0x0d, 0x8b, 0xd4, - 0xa6, 0x55, 0xd9, 0xc5, 0xa6, 0x11, 0x44, 0x48, 0x48, 0x75, 0x4b, 0xc1, 0x14, 0x95, 0x74, 0x9b, - 0x48, 0x08, 0x81, 0x56, 0xb3, 0xeb, 0xb1, 0xbd, 0x8a, 0x3d, 0xb3, 0xdd, 0x59, 0xbb, 0x76, 0x4b, - 0x6f, 0x78, 0x01, 0x90, 0xb8, 0xe1, 0x09, 0x82, 0xc4, 0x93, 0x14, 0x89, 0x8b, 0x4a, 0xdc, 0x70, - 0x85, 0x50, 0xc2, 0x83, 0xa0, 0x3d, 0x33, 0x6b, 0xef, 0xfa, 0x07, 0x3b, 0x15, 0x37, 0x91, 0x77, - 0xe6, 0x7c, 0xdf, 0xf9, 0xce, 0x99, 0xf3, 0x13, 0xf4, 0xb6, 0x47, 0xbc, 0x41, 0x9b, 0x33, 0xfb, - 0x29, 0x67, 0xd4, 0xe7, 0xcc, 0x0f, 0x68, 0xd4, 0xa4, 0xf6, 0xe3, 0x2e, 0x8d, 0x06, 0x56, 0x18, - 0xf1, 0x98, 0x63, 0x5d, 0x99, 0x58, 0x39, 0x13, 0xab, 0x57, 0x36, 0x36, 0x9a, 0xbc, 0xc9, 0xc1, - 0xc8, 0x4e, 0x7e, 0x49, 0x7b, 0xe3, 0xcd, 0x26, 0xe7, 0xcd, 0x36, 0xb5, 0x49, 0x18, 0xd8, 0x84, - 0x31, 0x1e, 0x93, 0x38, 0xe0, 0x4c, 0xa4, 0xb7, 0x31, 0x65, 0x75, 0x1a, 0x75, 0x02, 0x16, 0xdb, - 0xf1, 0x20, 0xa4, 0x42, 0xfe, 0x55, 0xb7, 0x6f, 0x65, 0x6e, 0xfd, 0x68, 0x10, 0xc6, 0xdc, 0x0e, - 0x23, 0xce, 0x1b, 0xea, 0xfa, 0x86, 0xcf, 0x45, 0x87, 0x0b, 0xdb, 0x23, 0x42, 0x69, 0xb4, 0x7b, - 0x65, 0x8f, 0xc6, 0xa4, 0x6c, 0x87, 0xa4, 0x19, 0x30, 0xf0, 0xa4, 0x6c, 0x4b, 0x69, 0x64, 0x5e, - 0xec, 0xfb, 0x2d, 0xea, 0x1f, 0x86, 0x1c, 0x7c, 0xf6, 0xd5, 0xfd, 0xf5, 0xe9, 0xf7, 0xb9, 0x2f, - 0x65, 0x3a, 0x4c, 0xd2, 0xe8, 0x26, 0x60, 0xcd, 0x6c, 0x92, 0x8c, 0xab, 0xd3, 0x4d, 0x26, 0xa8, - 0xcc, 0xd4, 0x8e, 0x86, 0xdc, 0x6f, 0x25, 0x26, 0xbd, 0xf2, 0xf0, 0xf7, 0xb8, 0x4d, 0xfe, 0x4d, - 0x42, 0x12, 0x91, 0x8e, 0x18, 0x57, 0x9f, 0xb7, 0xc9, 0x3f, 0x11, 0x98, 0x9a, 0x1b, 0x08, 0x3f, - 0x4c, 0x94, 0xee, 0x01, 0xde, 0xa1, 0x8f, 0xbb, 0x54, 0xc4, 0xe6, 0x01, 0xba, 0x90, 0x3b, 0x15, - 0x21, 0x67, 0x82, 0xe2, 0x8f, 0xd1, 0x8a, 0xf4, 0xa3, 0x6b, 0x5b, 0xda, 0xf6, 0x5a, 0x65, 0xcb, - 0x9a, 0xf5, 0xfa, 0x96, 0x44, 0x56, 0xcf, 0xbd, 0xf8, 0xeb, 0xca, 0x92, 0xa3, 0x50, 0xe6, 0x25, - 0x74, 0x11, 0x68, 0xef, 0xb4, 0x48, 0xc0, 0xbe, 0x08, 0x44, 0x9c, 0xfa, 0xdb, 0x41, 0x6f, 0x8c, - 0x5f, 0x28, 0x97, 0x9b, 0xa8, 0xe8, 0x27, 0x87, 0x6e, 0x50, 0x4f, 0xbc, 0x9e, 0xdd, 0x2e, 0x3a, - 0x05, 0x38, 0xa8, 0xd5, 0x85, 0x59, 0xc9, 0xf2, 0xd5, 0x58, 0x83, 0x2b, 0x3e, 0x7c, 0x19, 0x15, - 0x52, 0x14, 0x48, 0x2d, 0x3a, 0xab, 0x0a, 0x64, 0x7e, 0x93, 0x75, 0x25, 0x31, 0xca, 0x55, 0x15, - 0x21, 0x05, 0x62, 0x0d, 0xae, 0x22, 0x7c, 0x67, 0x76, 0x84, 0x23, 0x02, 0xa9, 0x30, 0xf9, 0x69, - 0xee, 0x23, 0x03, 0xd8, 0x3f, 0x49, 0x1e, 0x6d, 0x42, 0xd6, 0x26, 0x2a, 0xc2, 0x6b, 0xba, 0xac, - 0xdb, 0x01, 0x07, 0xe7, 0x9c, 0x02, 0x1c, 0x3c, 0xe8, 0x76, 0x72, 0x9a, 0xcf, 0xe4, 0x35, 0x13, - 0xb4, 0x39, 0x95, 0xf5, 0x7f, 0x14, 0xfe, 0x1d, 0xba, 0x04, 0x2e, 0x92, 0xe4, 0x7f, 0x46, 0x49, - 0x9d, 0x46, 0x62, 0x7e, 0x32, 0xf1, 0x3d, 0x84, 0x46, 0xad, 0x05, 0xaa, 0xd7, 0x2a, 0x57, 0x2d, - 0xd9, 0x87, 0x56, 0xd2, 0x87, 0x96, 0x6c, 0x03, 0xd5, 0x87, 0xd6, 0x1e, 0x69, 0x52, 0x45, 0xeb, - 0x64, 0x90, 0xe6, 0x91, 0x86, 0xf4, 0x49, 0xf7, 0x2a, 0xbc, 0xdb, 0x68, 0xb5, 0x25, 0x8f, 0xa0, - 0x00, 0xd6, 0x2a, 0xd7, 0x66, 0xc7, 0x56, 0x63, 0x75, 0xda, 0xa7, 0x75, 0x49, 0xe1, 0xa4, 0x38, - 0xfc, 0xe9, 0x14, 0x9d, 0xd7, 0xe6, 0xea, 0x94, 0xfe, 0x73, 0x42, 0x1f, 0xa2, 0x12, 0xe8, 0xbc, - 0x17, 0x30, 0xd2, 0x0e, 0x9e, 0xd2, 0xfa, 0x29, 0x4a, 0x0f, 0x6f, 0xa0, 0xe5, 0x30, 0xe2, 0x3d, - 0x0a, 0x02, 0x0a, 0x8e, 0xfc, 0x30, 0x8f, 0x96, 0xd1, 0x95, 0x99, 0x9c, 0x2a, 0x05, 0x07, 0x68, - 0xa3, 0x91, 0xde, 0xba, 0xaf, 0xf6, 0xd6, 0xb8, 0x31, 0x41, 0x8f, 0x77, 0x11, 0x92, 0xf5, 0x08, - 0x64, 0x32, 0x2d, 0xc6, 0x90, 0x6c, 0x38, 0x78, 0x7a, 0x65, 0x0b, 0x2a, 0xcf, 0x91, 0xd5, 0x0b, - 0xd0, 0x07, 0x68, 0x3d, 0x22, 0x4f, 0xdc, 0xd1, 0x08, 0xd3, 0xcf, 0xaa, 0xac, 0xa6, 0xf0, 0xdc, - 0xac, 0x4b, 0x38, 0x1c, 0xf2, 0xe4, 0xce, 0xf0, 0xcc, 0x79, 0x2d, 0xca, 0x7e, 0xe2, 0x03, 0x84, - 0xbd, 0xd8, 0x77, 0x45, 0xd7, 0xeb, 0x04, 0x42, 0x04, 0x9c, 0xb9, 0x87, 0x74, 0xa0, 0x9f, 0x1b, - 0xe3, 0xcc, 0xcf, 0xdf, 0x5e, 0xd9, 0x7a, 0x34, 0xb4, 0xbf, 0x4f, 0x07, 0xce, 0x79, 0x2f, 0xf6, - 0x73, 0x27, 0xf8, 0x2e, 0x7a, 0x1d, 0x56, 0x84, 0x1b, 0xf7, 0xdd, 0x80, 0xb9, 0x5e, 0x9b, 0xfb, - 0x87, 0xfa, 0x32, 0xb0, 0x5e, 0xb6, 0x46, 0xeb, 0xc4, 0x92, 0x6b, 0x66, 0xbf, 0xbf, 0x97, 0x18, - 0x3b, 0xeb, 0x80, 0xd9, 0xef, 0xd7, 0x58, 0x35, 0x01, 0xe0, 0xfb, 0xe8, 0xa2, 0x64, 0x91, 0xf5, - 0x94, 0x30, 0x41, 0x26, 0xf4, 0x15, 0x60, 0xd2, 0xb3, 0x4c, 0x72, 0x31, 0x59, 0x92, 0x08, 0x03, - 0x4c, 0x56, 0x63, 0x8d, 0x41, 0x12, 0xf1, 0x57, 0x48, 0x9e, 0x4a, 0x0a, 0x57, 0x50, 0xd2, 0xa6, - 0x75, 0x7d, 0x15, 0x98, 0x6e, 0xfc, 0xc7, 0x40, 0x4d, 0x30, 0xc0, 0xf0, 0x08, 0x10, 0xce, 0xf9, - 0x70, 0xec, 0x04, 0x7f, 0x9b, 0xca, 0x54, 0xcc, 0x49, 0x26, 0xe2, 0x98, 0xd6, 0xf5, 0x02, 0xb4, - 0xcd, 0xf5, 0xd9, 0x69, 0xdc, 0x8f, 0x08, 0x13, 0xc4, 0x4f, 0x4a, 0x1c, 0x8a, 0xe5, 0x42, 0x86, - 0x3b, 0x65, 0xa9, 0x1c, 0x15, 0xd0, 0x32, 0x14, 0x2a, 0xfe, 0x41, 0x43, 0x2b, 0x72, 0xc0, 0xe3, - 0x9b, 0xb3, 0x15, 0x4f, 0xee, 0x15, 0xe3, 0xdd, 0x05, 0xad, 0x65, 0xd9, 0x9b, 0xdb, 0xdf, 0xff, - 0xf1, 0xcf, 0x4f, 0x67, 0x4c, 0xbc, 0x65, 0x4f, 0x5f, 0x68, 0xbd, 0xb2, 0xda, 0x7b, 0xf8, 0x67, - 0x0d, 0x15, 0x87, 0xcb, 0x03, 0xdb, 0x73, 0xdc, 0x8c, 0xef, 0x1f, 0xe3, 0xbd, 0xc5, 0x01, 0x8b, - 0x4b, 0x83, 0x3e, 0x15, 0xf8, 0x97, 0x54, 0x1a, 0xf4, 0xcd, 0x42, 0xd2, 0x32, 0xf3, 0x64, 0x31, - 0x69, 0xd9, 0x61, 0x61, 0x7e, 0x00, 0xd2, 0xca, 0xd8, 0x9e, 0x23, 0x0d, 0xba, 0xde, 0x7e, 0x96, - 0x4e, 0xab, 0xe7, 0xf8, 0x37, 0x0d, 0xad, 0xe7, 0x57, 0x0c, 0xbe, 0x35, 0xc7, 0xfb, 0xd4, 0x3d, - 0x67, 0xec, 0x9c, 0x12, 0xa5, 0x84, 0x7f, 0x0e, 0xc2, 0xef, 0xe2, 0xea, 0x29, 0x85, 0xcb, 0x7f, - 0x91, 0x84, 0xfd, 0x6c, 0xb8, 0x5c, 0x9f, 0xe3, 0x5f, 0x35, 0xb4, 0x96, 0x59, 0x26, 0xb8, 0x3c, - 0x47, 0xd2, 0xe4, 0xde, 0x33, 0x2a, 0xa7, 0x81, 0xa8, 0x10, 0x6e, 0x41, 0x08, 0x16, 0xbe, 0x39, - 0x3b, 0x04, 0xb5, 0x93, 0xb2, 0x89, 0xff, 0x5d, 0x43, 0x78, 0x72, 0xfa, 0xe3, 0x0f, 0xe7, 0x08, - 0x98, 0xb9, 0x84, 0x8c, 0xdd, 0x57, 0x40, 0xaa, 0x08, 0x6e, 0x43, 0x04, 0x1f, 0xe1, 0xdd, 0xd9, - 0x11, 0x4c, 0x5b, 0x45, 0x99, 0x70, 0xaa, 0x5f, 0xbe, 0x38, 0x2e, 0x69, 0x2f, 0x8f, 0x4b, 0xda, - 0xdf, 0xc7, 0x25, 0xed, 0xc7, 0x93, 0xd2, 0xd2, 0xcb, 0x93, 0xd2, 0xd2, 0x9f, 0x27, 0xa5, 0xa5, - 0xaf, 0x77, 0x9a, 0x41, 0xdc, 0xea, 0x7a, 0x96, 0xcf, 0x3b, 0x29, 0x3d, 0xc0, 0x86, 0xbe, 0xfa, - 0x63, 0xde, 0x60, 0x2e, 0x7b, 0x2b, 0xf0, 0xbf, 0xea, 0xfb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x39, 0xca, 0xb6, 0x51, 0x90, 0x0c, 0x00, 0x00, + // 1116 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0x5e, 0xb7, 0xfb, 0x97, 0x59, 0xb1, 0x5a, 0xa6, 0x5b, 0xea, 0x7a, 0xdb, 0x74, 0x31, 0x52, + 0xbb, 0xad, 0x8a, 0x4d, 0x42, 0x97, 0xb2, 0x42, 0x02, 0x6d, 0x5a, 0x5a, 0x42, 0x51, 0xd9, 0xba, + 0xbb, 0x08, 0x21, 0x90, 0x65, 0x3b, 0x93, 0xc4, 0xda, 0x64, 0xc6, 0xb5, 0x9d, 0x34, 0x69, 0xe9, + 0x0d, 0x2f, 0x00, 0x12, 0x37, 0x3c, 0x41, 0x91, 0x78, 0x92, 0x22, 0xf5, 0xa2, 0x12, 0x37, 0x5c, + 0x21, 0xb4, 0xcb, 0x2b, 0x70, 0x8f, 0x7c, 0x66, 0xec, 0xd8, 0x49, 0x4c, 0x92, 0x55, 0x6f, 0xaa, + 0x78, 0xe6, 0x7c, 0xdf, 0xf9, 0xce, 0x99, 0x39, 0xf3, 0x75, 0xd1, 0xdb, 0xb6, 0x65, 0xf7, 0x5b, + 0x8c, 0xea, 0x4f, 0x18, 0x25, 0x0e, 0xa3, 0x8e, 0x4b, 0xfc, 0x06, 0xd1, 0x1f, 0x75, 0x88, 0xdf, + 0xd7, 0x3c, 0x9f, 0x85, 0x0c, 0xcb, 0x22, 0x44, 0xcb, 0x84, 0x68, 0xdd, 0x92, 0xb2, 0xde, 0x60, + 0x0d, 0x06, 0x41, 0x7a, 0xf4, 0x8b, 0xc7, 0x2b, 0x17, 0x1a, 0x8c, 0x35, 0x5a, 0x44, 0xb7, 0x3c, + 0x57, 0xb7, 0x28, 0x65, 0xa1, 0x15, 0xba, 0x8c, 0x06, 0xf1, 0x6e, 0x48, 0x68, 0x8d, 0xf8, 0x6d, + 0x97, 0x86, 0x7a, 0xd8, 0xf7, 0x48, 0xc0, 0xff, 0x15, 0xbb, 0x17, 0x53, 0xbb, 0x8e, 0xdf, 0xf7, + 0x42, 0xa6, 0x7b, 0x3e, 0x63, 0x75, 0xb1, 0x7d, 0xcd, 0x61, 0x41, 0x9b, 0x05, 0xba, 0x6d, 0x05, + 0x42, 0xa3, 0xde, 0x2d, 0xd9, 0x24, 0xb4, 0x4a, 0xba, 0x67, 0x35, 0x5c, 0x0a, 0x99, 0x44, 0x6c, + 0x31, 0xae, 0xcc, 0x0e, 0x1d, 0xa7, 0x49, 0x9c, 0x43, 0x8f, 0x41, 0xce, 0x9e, 0xd8, 0xbf, 0x3a, + 0x7e, 0x3f, 0xf3, 0x25, 0x42, 0x93, 0x26, 0x0d, 0x76, 0x5c, 0xda, 0x48, 0x37, 0x49, 0xb9, 0x3c, + 0x3e, 0x64, 0x84, 0x4a, 0x8d, 0xe3, 0x88, 0xc7, 0x9c, 0x66, 0x14, 0xd2, 0x2d, 0x25, 0xbf, 0x87, + 0x63, 0xb2, 0x67, 0xe2, 0x59, 0xbe, 0xd5, 0x0e, 0x86, 0xd5, 0x67, 0x63, 0xb2, 0x47, 0x04, 0xa1, + 0xea, 0x3a, 0xc2, 0x0f, 0x22, 0xa5, 0x7b, 0x80, 0x37, 0xc8, 0xa3, 0x0e, 0x09, 0x42, 0xf5, 0x00, + 0x9d, 0xc9, 0xac, 0x06, 0x1e, 0xa3, 0x01, 0xc1, 0x1f, 0xa3, 0x45, 0x9e, 0x47, 0x96, 0x36, 0xa5, + 0xad, 0x95, 0xf2, 0xa6, 0x96, 0x77, 0xfa, 0x1a, 0x47, 0x56, 0xe6, 0x5f, 0xfc, 0x75, 0x69, 0xce, + 0x10, 0x28, 0xf5, 0x1c, 0x3a, 0x0b, 0xb4, 0xb7, 0x9a, 0x96, 0x4b, 0xbf, 0x70, 0x83, 0x30, 0xce, + 0xb7, 0x8d, 0xde, 0x1a, 0xde, 0x10, 0x29, 0x37, 0x50, 0xc1, 0x89, 0x16, 0x4d, 0xb7, 0x16, 0x65, + 0x3d, 0xbd, 0x55, 0x30, 0x96, 0x61, 0xa1, 0x5a, 0x0b, 0xd4, 0x72, 0x9a, 0xaf, 0x4a, 0xeb, 0x4c, + 0xf0, 0xe1, 0xf3, 0x68, 0x39, 0x46, 0x81, 0xd4, 0x82, 0xb1, 0x24, 0x40, 0xea, 0xb7, 0xe9, 0x54, + 0x1c, 0x23, 0x52, 0x55, 0x10, 0x12, 0x20, 0x5a, 0x67, 0xa2, 0xc2, 0x77, 0xf2, 0x2b, 0x1c, 0x10, + 0x70, 0x85, 0xd1, 0x4f, 0x75, 0x1f, 0x29, 0xc0, 0xfe, 0x69, 0x74, 0x68, 0x23, 0xb2, 0x36, 0x50, + 0x01, 0x4e, 0xd3, 0xa4, 0x9d, 0x36, 0x24, 0x98, 0x37, 0x96, 0x61, 0xe1, 0x7e, 0xa7, 0x9d, 0xd1, + 0x7c, 0x2a, 0xab, 0xd9, 0x42, 0x1b, 0x63, 0x59, 0x5f, 0xa3, 0xf0, 0xef, 0xd1, 0x39, 0x48, 0x11, + 0x35, 0xff, 0x33, 0x62, 0xd5, 0x88, 0x1f, 0x4c, 0x6e, 0x26, 0xbe, 0x83, 0xd0, 0x60, 0xb4, 0x40, + 0xf5, 0x4a, 0xf9, 0xb2, 0xc6, 0xe7, 0x50, 0x8b, 0xe6, 0x50, 0xe3, 0x63, 0x20, 0xe6, 0x50, 0xdb, + 0xb3, 0x1a, 0x44, 0xd0, 0x1a, 0x29, 0xa4, 0xfa, 0x5c, 0x42, 0xf2, 0x68, 0x7a, 0x51, 0xde, 0x2e, + 0x5a, 0x6a, 0xf2, 0x25, 0xb8, 0x00, 0x2b, 0xe5, 0x2b, 0xf9, 0xb5, 0x55, 0x69, 0x8d, 0xf4, 0x48, + 0x8d, 0x53, 0x18, 0x31, 0x0e, 0xdf, 0x1d, 0xa3, 0xf3, 0xca, 0x44, 0x9d, 0x3c, 0x7f, 0x46, 0xe8, + 0x57, 0xe8, 0x42, 0xa2, 0x13, 0x4e, 0x63, 0xa8, 0x57, 0x27, 0x3d, 0x61, 0x1b, 0x5d, 0xcc, 0xe1, + 0x7d, 0x6d, 0x4d, 0x50, 0x1f, 0xa0, 0x22, 0xe4, 0xb8, 0xe3, 0x52, 0xab, 0xe5, 0x3e, 0x21, 0xb5, + 0x19, 0xc6, 0x06, 0xaf, 0xa3, 0x05, 0xcf, 0x67, 0x5d, 0x02, 0xc2, 0x97, 0x0d, 0xfe, 0xa1, 0x3e, + 0x5f, 0x40, 0x97, 0x72, 0x39, 0x85, 0xf2, 0x03, 0xb4, 0x5e, 0x8f, 0x77, 0xcd, 0x93, 0xdd, 0x53, + 0x5c, 0x1f, 0xa1, 0xc7, 0x3b, 0x08, 0xf1, 0x4e, 0x03, 0x19, 0x3f, 0x52, 0x25, 0x21, 0x4b, 0x1e, + 0xcd, 0x6e, 0x49, 0x83, 0x7e, 0x1a, 0xfc, 0x5c, 0x00, 0x7a, 0x1f, 0xad, 0xfa, 0xd6, 0x63, 0x73, + 0xf0, 0xfc, 0xca, 0xa7, 0xc5, 0x8d, 0x88, 0xe1, 0x99, 0x77, 0x3a, 0xe2, 0x30, 0xac, 0xc7, 0xb7, + 0x92, 0x35, 0xe3, 0x0d, 0x3f, 0xfd, 0x89, 0x0f, 0x10, 0xb6, 0x43, 0xc7, 0x0c, 0x3a, 0x76, 0xdb, + 0x0d, 0x02, 0x97, 0x51, 0xf3, 0x90, 0xf4, 0xe5, 0xf9, 0x21, 0xce, 0xac, 0x77, 0x74, 0x4b, 0xda, + 0xc3, 0x24, 0xfe, 0x1e, 0xe9, 0x1b, 0x6b, 0x76, 0xe8, 0x64, 0x56, 0xf0, 0x6d, 0xf4, 0x26, 0xd8, + 0x9b, 0x19, 0xf6, 0x4c, 0x97, 0x9a, 0x76, 0x8b, 0x39, 0x87, 0xf2, 0x02, 0xb0, 0x9e, 0xd7, 0x06, + 0x56, 0xa8, 0x71, 0x8b, 0xdc, 0xef, 0xed, 0x45, 0xc1, 0xc6, 0x2a, 0x60, 0xf6, 0x7b, 0x55, 0x5a, + 0x89, 0x00, 0xf8, 0x1e, 0x3a, 0xcb, 0x59, 0xf8, 0x35, 0x88, 0x98, 0xa0, 0x13, 0xf2, 0x22, 0x30, + 0xc9, 0x69, 0x26, 0x6e, 0xaa, 0x1a, 0x27, 0xc2, 0x00, 0xe3, 0x97, 0xa8, 0x4a, 0xa1, 0x89, 0xf8, + 0x6b, 0xc4, 0x57, 0x39, 0x85, 0x19, 0x10, 0xab, 0x45, 0x6a, 0xf2, 0x12, 0x30, 0x5d, 0xfb, 0x1f, + 0x33, 0x88, 0x30, 0xc0, 0xf0, 0x10, 0x10, 0xc6, 0x9a, 0x37, 0xb4, 0x82, 0xbf, 0x8b, 0x65, 0x0a, + 0xe6, 0xa8, 0x13, 0x61, 0x48, 0x6a, 0xf2, 0x32, 0xdc, 0xf6, 0xab, 0xf9, 0x6d, 0xdc, 0xf7, 0x2d, + 0x1a, 0x58, 0x4e, 0x34, 0x9e, 0x70, 0x59, 0xce, 0xa4, 0xb8, 0x63, 0x96, 0xf2, 0xbf, 0x05, 0xb4, + 0x00, 0x17, 0x15, 0xff, 0x28, 0xa1, 0x45, 0x6e, 0x4e, 0xf8, 0x7a, 0xbe, 0xe2, 0x51, 0x4f, 0x54, + 0xde, 0x9d, 0x32, 0x9a, 0x5f, 0x7b, 0x75, 0xeb, 0x87, 0x3f, 0xfe, 0xf9, 0xf9, 0x94, 0x8a, 0x37, + 0xf5, 0xf1, 0x66, 0xdc, 0x2d, 0x09, 0xcf, 0xc6, 0xbf, 0x48, 0xa8, 0x90, 0x18, 0x1f, 0xd6, 0x27, + 0xa4, 0x19, 0xf6, 0x4e, 0xe5, 0xbd, 0xe9, 0x01, 0xd3, 0x4b, 0x83, 0x39, 0x0d, 0xf0, 0xaf, 0xb1, + 0x34, 0x98, 0x9b, 0xa9, 0xa4, 0xa5, 0xde, 0x93, 0xe9, 0xa4, 0xa5, 0x1f, 0x0b, 0xf5, 0x26, 0x48, + 0x2b, 0x61, 0x7d, 0x82, 0x34, 0x98, 0x7a, 0xfd, 0x69, 0xfc, 0x5a, 0x3d, 0xc3, 0xbf, 0x4b, 0x68, + 0x35, 0x6b, 0x8f, 0xf8, 0xc6, 0x84, 0xec, 0x63, 0x3d, 0x5a, 0xd9, 0x9e, 0x11, 0x25, 0x84, 0x7f, + 0x0e, 0xc2, 0x6f, 0xe3, 0xca, 0x8c, 0xc2, 0xf9, 0x7f, 0xef, 0x02, 0xfd, 0x69, 0x62, 0x1b, 0xcf, + 0xf0, 0x6f, 0x12, 0x5a, 0x49, 0x19, 0x21, 0x2e, 0x4d, 0x90, 0x34, 0xea, 0xd9, 0x4a, 0x79, 0x16, + 0x88, 0x28, 0xe1, 0x06, 0x94, 0xa0, 0xe1, 0xeb, 0xf9, 0x25, 0x08, 0x2b, 0x49, 0x37, 0xfe, 0xa5, + 0x84, 0xd6, 0x86, 0x5d, 0x0b, 0x7f, 0x30, 0x45, 0xfa, 0x31, 0xf6, 0xa9, 0xdc, 0x9c, 0x19, 0x27, + 0xb4, 0xdf, 0x05, 0xed, 0xbb, 0xf8, 0x93, 0x59, 0xb4, 0x8f, 0xeb, 0xfd, 0x4b, 0x09, 0xe1, 0x51, + 0x33, 0xc3, 0x1f, 0x4e, 0x10, 0x96, 0xeb, 0xa9, 0xca, 0xce, 0x09, 0x90, 0xa2, 0xa8, 0x5d, 0x28, + 0xea, 0x23, 0xbc, 0x93, 0x5f, 0xd4, 0x38, 0x67, 0x4d, 0x55, 0x58, 0xf9, 0xf2, 0xc5, 0x51, 0x51, + 0x7a, 0x75, 0x54, 0x94, 0xfe, 0x3e, 0x2a, 0x4a, 0x3f, 0x1d, 0x17, 0xe7, 0x5e, 0x1d, 0x17, 0xe7, + 0xfe, 0x3c, 0x2e, 0xce, 0x7d, 0xb3, 0xdd, 0x70, 0xc3, 0x66, 0xc7, 0xd6, 0x1c, 0xd6, 0x8e, 0xe9, + 0x01, 0x96, 0xe4, 0xea, 0x0d, 0x65, 0x03, 0x9b, 0xb1, 0x17, 0xe1, 0xcf, 0x86, 0xf7, 0xff, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0x20, 0x72, 0xc5, 0x73, 0x1b, 0x0e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -774,6 +878,8 @@ type QueryClient interface { EpochChainInfo(ctx context.Context, in *QueryEpochChainInfoRequest, opts ...grpc.CallOption) (*QueryEpochChainInfoResponse, error) // ListHeaders queries the headers of a chain in Babylon's view, with pagination support ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) + // ListEpochHeaders queries the headers of a chain timestamped in a given epoch of Babylon, with pagination support + ListEpochHeaders(ctx context.Context, in *QueryListEpochHeadersRequest, opts ...grpc.CallOption) (*QueryListEpochHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) } @@ -831,6 +937,15 @@ func (c *queryClient) ListHeaders(ctx context.Context, in *QueryListHeadersReque return out, nil } +func (c *queryClient) ListEpochHeaders(ctx context.Context, in *QueryListEpochHeadersRequest, opts ...grpc.CallOption) (*QueryListEpochHeadersResponse, error) { + out := new(QueryListEpochHeadersResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ListEpochHeaders", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) { out := new(QueryFinalizedChainInfoResponse) err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainInfo", in, out, opts...) @@ -852,6 +967,8 @@ type QueryServer interface { EpochChainInfo(context.Context, *QueryEpochChainInfoRequest) (*QueryEpochChainInfoResponse, error) // ListHeaders queries the headers of a chain in Babylon's view, with pagination support ListHeaders(context.Context, *QueryListHeadersRequest) (*QueryListHeadersResponse, error) + // ListEpochHeaders queries the headers of a chain timestamped in a given epoch of Babylon, with pagination support + ListEpochHeaders(context.Context, *QueryListEpochHeadersRequest) (*QueryListEpochHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs FinalizedChainInfo(context.Context, *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) } @@ -875,6 +992,9 @@ func (*UnimplementedQueryServer) EpochChainInfo(ctx context.Context, req *QueryE func (*UnimplementedQueryServer) ListHeaders(ctx context.Context, req *QueryListHeadersRequest) (*QueryListHeadersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListHeaders not implemented") } +func (*UnimplementedQueryServer) ListEpochHeaders(ctx context.Context, req *QueryListEpochHeadersRequest) (*QueryListEpochHeadersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListEpochHeaders not implemented") +} func (*UnimplementedQueryServer) FinalizedChainInfo(ctx context.Context, req *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfo not implemented") } @@ -973,6 +1093,24 @@ func _Query_ListHeaders_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } +func _Query_ListEpochHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryListEpochHeadersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ListEpochHeaders(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/ListEpochHeaders", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ListEpochHeaders(ctx, req.(*QueryListEpochHeadersRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Query_FinalizedChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryFinalizedChainInfoRequest) if err := dec(in); err != nil { @@ -1015,6 +1153,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "ListHeaders", Handler: _Query_ListHeaders_Handler, }, + { + MethodName: "ListEpochHeaders", + Handler: _Query_ListEpochHeaders_Handler, + }, { MethodName: "FinalizedChainInfo", Handler: _Query_FinalizedChainInfo_Handler, @@ -1361,6 +1503,78 @@ func (m *QueryListHeadersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *QueryListEpochHeadersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListEpochHeadersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListEpochHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.EpochNum != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryListEpochHeadersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListEpochHeadersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListEpochHeadersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *QueryFinalizedChainInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1668,6 +1882,37 @@ func (m *QueryListHeadersResponse) Size() (n int) { return n } +func (m *QueryListEpochHeadersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryListEpochHeadersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + func (m *QueryFinalizedChainInfoRequest) Size() (n int) { if m == nil { return 0 @@ -2591,6 +2836,191 @@ func (m *QueryListHeadersResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryListEpochHeadersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryListEpochHeadersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryListEpochHeadersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryListEpochHeadersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryListEpochHeadersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryListEpochHeadersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Headers = append(m.Headers, &IndexedHeader{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryFinalizedChainInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go index 96e69913c..4badc5d0f 100644 --- a/x/zoneconcierge/types/query.pb.gw.go +++ b/x/zoneconcierge/types/query.pb.gw.go @@ -271,6 +271,82 @@ func local_request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Ma } +func request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListEpochHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := client.ListEpochHeaders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListEpochHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := server.ListEpochHeaders(ctx, &protoReq) + return msg, metadata, err + +} + var ( filter_Query_FinalizedChainInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) @@ -464,6 +540,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_ListEpochHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ListEpochHeaders_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListEpochHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_FinalizedChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -628,6 +727,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_ListEpochHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ListEpochHeaders_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListEpochHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_FinalizedChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -662,6 +781,8 @@ var ( pattern_Query_ListHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_ListEpochHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_FinalizedChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) ) @@ -676,5 +797,7 @@ var ( forward_Query_ListHeaders_0 = runtime.ForwardResponseMessage + forward_Query_ListEpochHeaders_0 = runtime.ForwardResponseMessage + forward_Query_FinalizedChainInfo_0 = runtime.ForwardResponseMessage ) From 973a47232df0fd3cc1bd6a8c8555dba8c9c42a62 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Tue, 10 Jan 2023 10:13:46 +1100 Subject: [PATCH 09/37] zoneconcierge API: adding total number of timestamped headers to chainInfo (#263) --- .../babylon/zoneconcierge/zoneconcierge.proto | 4 +- x/zoneconcierge/keeper/chain_info_indexer.go | 16 ++- .../keeper/epoch_chain_info_indexer_test.go | 1 + x/zoneconcierge/types/zoneconcierge.pb.go | 110 ++++++++++++------ 4 files changed, 92 insertions(+), 39 deletions(-) diff --git a/proto/babylon/zoneconcierge/zoneconcierge.proto b/proto/babylon/zoneconcierge/zoneconcierge.proto index 5d89a43c8..680240573 100644 --- a/proto/babylon/zoneconcierge/zoneconcierge.proto +++ b/proto/babylon/zoneconcierge/zoneconcierge.proto @@ -48,10 +48,12 @@ message Forks { message ChainInfo { // chain_id is the ID of the chain string chain_id = 1; - // latest_header is the latest header in the canonical chain of CZ + // latest_header is the latest header in CZ's canonical chain IndexedHeader latest_header = 2; // latest_forks is the latest forks, formed as a series of IndexedHeader (from low to high) Forks latest_forks = 3; + // timestamped_headers_count is the number of timestamped headers in CZ's canonical chain + uint64 timestamped_headers_count = 4; } // ProofEpochSealed is the proof that an epoch is sealed by the sealer header, i.e., the 2nd header of the next epoch diff --git a/x/zoneconcierge/keeper/chain_info_indexer.go b/x/zoneconcierge/keeper/chain_info_indexer.go index 72e6f27d1..1a5024ebb 100644 --- a/x/zoneconcierge/keeper/chain_info_indexer.go +++ b/x/zoneconcierge/keeper/chain_info_indexer.go @@ -33,16 +33,28 @@ func (k Keeper) GetChainInfo(ctx sdk.Context, chainID string) *types.ChainInfo { return &chainInfo } +// updateLatestHeader updates the chainInfo w.r.t. the given header, including +// - replace the old latest header with the given one +// - increment the number of timestamped headers +// Note that this function is triggered only upon receiving headers from the relayer, +// and only a subset of headers in CZ are relayed. Thus TimestampedHeadersCount is not +// equal to the total number of headers in CZ. func (k Keeper) updateLatestHeader(ctx sdk.Context, chainID string, header *types.IndexedHeader) error { if header == nil { return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") } chainInfo := k.GetChainInfo(ctx, chainID) - chainInfo.LatestHeader = header + chainInfo.LatestHeader = header // replace the old latest header with the given one + chainInfo.TimestampedHeadersCount++ // increment the number of timestamped headers k.setChainInfo(ctx, chainInfo) return nil } +// tryToUpdateLatestForkHeader tries to update the chainInfo w.r.t. the given fork header +// - If no fork exists, add this fork header as the latest one +// - If there is a fork header at the same height, add this fork to the set of latest fork headers +// - If this fork header is newer than the previous one, replace the old fork headers with this fork header +// - If this fork header is older than the current latest fork, ignore func (k Keeper) tryToUpdateLatestForkHeader(ctx sdk.Context, chainID string, header *types.IndexedHeader) error { if header == nil { return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") @@ -57,7 +69,7 @@ func (k Keeper) tryToUpdateLatestForkHeader(ctx sdk.Context, chainID string, hea // there exists fork headers at the same height, add this fork header to the set of latest fork headers chainInfo.LatestForks.Headers = append(chainInfo.LatestForks.Headers, header) } else if chainInfo.LatestForks.Headers[0].Height < header.Height { - // this fork header is newer than the previous one, add this fork header as the latest one + // this fork header is newer than the previous one, replace the old fork headers with this fork header chainInfo.LatestForks = &types.Forks{ Headers: []*types.IndexedHeader{header}, } diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go index 82ced134b..57f52f781 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go @@ -34,6 +34,7 @@ func FuzzEpochChainInfoIndexer(f *testing.F) { chainInfo, err := zcKeeper.GetEpochChainInfo(ctx, czChain.ChainID, epochNum) require.NoError(t, err) require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height) + require.Equal(t, numHeaders, chainInfo.TimestampedHeadersCount) require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers))) }) } diff --git a/x/zoneconcierge/types/zoneconcierge.pb.go b/x/zoneconcierge/types/zoneconcierge.pb.go index 3931f274c..af6967140 100644 --- a/x/zoneconcierge/types/zoneconcierge.pb.go +++ b/x/zoneconcierge/types/zoneconcierge.pb.go @@ -180,10 +180,12 @@ func (m *Forks) GetHeaders() []*IndexedHeader { type ChainInfo struct { // chain_id is the ID of the chain ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - // latest_header is the latest header in the canonical chain of CZ + // latest_header is the latest header in CZ's canonical chain LatestHeader *IndexedHeader `protobuf:"bytes,2,opt,name=latest_header,json=latestHeader,proto3" json:"latest_header,omitempty"` // latest_forks is the latest forks, formed as a series of IndexedHeader (from low to high) LatestForks *Forks `protobuf:"bytes,3,opt,name=latest_forks,json=latestForks,proto3" json:"latest_forks,omitempty"` + // timestamped_headers_count is the number of timestamped headers in CZ's canonical chain + TimestampedHeadersCount uint64 `protobuf:"varint,4,opt,name=timestamped_headers_count,json=timestampedHeadersCount,proto3" json:"timestamped_headers_count,omitempty"` } func (m *ChainInfo) Reset() { *m = ChainInfo{} } @@ -240,6 +242,13 @@ func (m *ChainInfo) GetLatestForks() *Forks { return nil } +func (m *ChainInfo) GetTimestampedHeadersCount() uint64 { + if m != nil { + return m.TimestampedHeadersCount + } + return 0 +} + // ProofEpochSealed is the proof that an epoch is sealed by the sealer header, i.e., the 2nd header of the next epoch // With the access of metadata // - Metadata of this epoch, which includes the sealer header @@ -325,41 +334,43 @@ func init() { } var fileDescriptor_c76d28ce8dde4532 = []byte{ - // 537 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xee, 0x36, 0x69, 0x4a, 0x37, 0x49, 0x89, 0x8c, 0x84, 0x4c, 0x01, 0x13, 0xa5, 0x52, 0x09, - 0x07, 0x6c, 0x11, 0xc4, 0x19, 0x11, 0x54, 0xd4, 0x16, 0xa4, 0x22, 0x07, 0x15, 0x89, 0x8b, 0xb5, - 0xb1, 0x27, 0xd9, 0x55, 0xdc, 0x5d, 0xcb, 0x5e, 0xa2, 0x84, 0xa7, 0xe0, 0x71, 0x78, 0x04, 0x8e, - 0x3d, 0x72, 0x44, 0x09, 0x2f, 0xc1, 0x0d, 0x65, 0xbc, 0xce, 0x0f, 0xa2, 0xc0, 0xc5, 0xf2, 0xb7, - 0x33, 0xf3, 0xed, 0x37, 0xdf, 0xcc, 0xd2, 0x47, 0x7d, 0xd6, 0x9f, 0xc6, 0x4a, 0x7a, 0x9f, 0x94, - 0x84, 0x50, 0xc9, 0x50, 0x40, 0x3a, 0x84, 0x4d, 0xe4, 0x26, 0xa9, 0xd2, 0xca, 0xb2, 0x4d, 0xaa, - 0xbb, 0x19, 0x1c, 0x3f, 0x39, 0x38, 0x2c, 0x48, 0x42, 0x0e, 0xe1, 0x28, 0x51, 0x42, 0x6a, 0x21, - 0x87, 0x5e, 0x3f, 0xce, 0x82, 0x11, 0x4c, 0xf3, 0xf2, 0x83, 0xa3, 0x3f, 0x27, 0xad, 0x90, 0xc9, - 0xbb, 0xa7, 0x41, 0x46, 0x90, 0x5e, 0x0a, 0xa9, 0x3d, 0x3d, 0x4d, 0x20, 0xcb, 0xbf, 0x26, 0x7a, - 0x7f, 0x2d, 0x1a, 0xa6, 0xd3, 0x44, 0x2b, 0x2f, 0x49, 0x95, 0x1a, 0xe4, 0xe1, 0xd6, 0x0f, 0x42, - 0xeb, 0xa7, 0x32, 0x82, 0x09, 0x44, 0x27, 0xc0, 0x22, 0x48, 0xad, 0x3b, 0xf4, 0x46, 0xc8, 0x99, - 0x90, 0x81, 0x88, 0x6c, 0xd2, 0x24, 0xed, 0x3d, 0x7f, 0x17, 0xf1, 0x69, 0x64, 0x59, 0xb4, 0xcc, - 0x59, 0xc6, 0xed, 0xed, 0x26, 0x69, 0xd7, 0x7c, 0xfc, 0xb7, 0x6e, 0xd3, 0x0a, 0x07, 0x31, 0xe4, - 0xda, 0x2e, 0x35, 0x49, 0xbb, 0xec, 0x1b, 0x64, 0x3d, 0xa7, 0xfb, 0x46, 0x7f, 0xc0, 0x91, 0xd8, - 0x2e, 0x37, 0x49, 0xbb, 0xda, 0xb1, 0xdd, 0x95, 0x20, 0x37, 0x17, 0x9a, 0x5f, 0xec, 0xd7, 0x4d, - 0xbe, 0xd1, 0x71, 0x48, 0x8b, 0x83, 0x00, 0x12, 0x15, 0x72, 0x7b, 0x07, 0xf9, 0x6b, 0xe6, 0xf0, - 0x78, 0x71, 0x66, 0x1d, 0xd1, 0x9b, 0x45, 0x92, 0x9e, 0x04, 0x28, 0xae, 0x82, 0xe2, 0x8a, 0xda, - 0x77, 0x93, 0x13, 0x96, 0xf1, 0xd6, 0x19, 0xdd, 0x79, 0xa5, 0xd2, 0x51, 0x66, 0xbd, 0xa0, 0xbb, - 0xb9, 0x9c, 0xcc, 0x2e, 0x35, 0x4b, 0xed, 0x6a, 0xe7, 0xa1, 0x7b, 0xdd, 0x94, 0xdc, 0x0d, 0x5f, - 0xfc, 0xa2, 0xae, 0xf5, 0x85, 0xd0, 0xbd, 0x97, 0xe8, 0x88, 0x1c, 0xa8, 0xbf, 0xd9, 0xf5, 0x86, - 0xd6, 0x63, 0xa6, 0x21, 0xd3, 0x85, 0x03, 0xdb, 0xe8, 0xc0, 0x7f, 0xdf, 0x58, 0xcb, 0xab, 0x8d, - 0x1f, 0x5d, 0x6a, 0x70, 0x30, 0x58, 0x74, 0x82, 0x76, 0x57, 0x3b, 0x0f, 0xae, 0x27, 0xc3, 0x86, - 0xfd, 0x6a, 0x5e, 0x84, 0xa0, 0xf5, 0x93, 0xd0, 0xc6, 0xdb, 0xc5, 0xf4, 0xd1, 0xbd, 0x1e, 0xb0, - 0x18, 0x22, 0xcb, 0xa7, 0xf5, 0x31, 0x8b, 0x45, 0xc4, 0xb4, 0x4a, 0x83, 0x0c, 0xb4, 0x4d, 0xd0, - 0x98, 0xc7, 0x4b, 0xe6, 0x8d, 0xfd, 0x5b, 0x30, 0x5f, 0x14, 0xe9, 0xef, 0x85, 0xe6, 0xdd, 0x38, - 0x7b, 0x0d, 0x53, 0xbf, 0xb6, 0xe4, 0xe8, 0x81, 0xb6, 0x8e, 0x69, 0x03, 0xb7, 0x2c, 0x1f, 0x5d, - 0x20, 0xe4, 0x40, 0x99, 0xee, 0xef, 0xae, 0xcf, 0x3f, 0x5f, 0x48, 0x17, 0x25, 0x9d, 0x27, 0x99, - 0xbf, 0x9f, 0x2c, 0xc5, 0xa1, 0xb9, 0x67, 0xf4, 0xd6, 0x3a, 0xcd, 0x98, 0xc5, 0x28, 0xb0, 0xf4, - 0x6f, 0xa6, 0xc6, 0x8a, 0xe9, 0x82, 0xc5, 0x3d, 0xd0, 0xdd, 0xf3, 0xaf, 0x33, 0x87, 0x5c, 0xcd, - 0x1c, 0xf2, 0x7d, 0xe6, 0x90, 0xcf, 0x73, 0x67, 0xeb, 0x6a, 0xee, 0x6c, 0x7d, 0x9b, 0x3b, 0x5b, - 0x1f, 0x9e, 0x0d, 0x85, 0xe6, 0x1f, 0xfb, 0x6e, 0xa8, 0x2e, 0x3d, 0xd3, 0x33, 0x8e, 0xb0, 0x00, - 0xde, 0xe4, 0xb7, 0xc7, 0x8e, 0x6b, 0xdb, 0xaf, 0xe0, 0x0b, 0x7a, 0xfa, 0x2b, 0x00, 0x00, 0xff, - 0xff, 0x62, 0xcd, 0x4c, 0xba, 0x12, 0x04, 0x00, 0x00, + // 571 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xcd, 0x36, 0x69, 0xfa, 0xeb, 0x26, 0xe9, 0x2f, 0x32, 0x12, 0xb8, 0x05, 0x4c, 0x94, 0x4a, + 0x25, 0x1c, 0xb0, 0x45, 0x10, 0x17, 0x2e, 0x88, 0x54, 0x45, 0x6d, 0x41, 0x2a, 0x72, 0x50, 0x91, + 0xb8, 0x58, 0x1b, 0x7b, 0x12, 0xaf, 0xe2, 0xec, 0x5a, 0xde, 0x6d, 0x94, 0xf0, 0x29, 0xf8, 0x58, + 0x1c, 0x7b, 0xe4, 0x88, 0x12, 0x3e, 0x02, 0x17, 0x6e, 0x28, 0xbb, 0xeb, 0xfc, 0x41, 0x14, 0xb8, + 0x58, 0x9e, 0x9d, 0x37, 0x6f, 0xdf, 0xbc, 0x19, 0x1b, 0x3f, 0xea, 0x91, 0xde, 0x34, 0xe1, 0xcc, + 0xfb, 0xc8, 0x19, 0x84, 0x9c, 0x85, 0x14, 0xb2, 0x01, 0x6c, 0x46, 0x6e, 0x9a, 0x71, 0xc9, 0x2d, + 0xdb, 0x40, 0xdd, 0xcd, 0xe4, 0xf8, 0xc9, 0xc1, 0x61, 0x4e, 0x12, 0xc6, 0x10, 0x0e, 0x53, 0x4e, + 0x99, 0xa4, 0x6c, 0xe0, 0xf5, 0x12, 0x11, 0x0c, 0x61, 0xaa, 0xcb, 0x0f, 0x8e, 0x7e, 0x0f, 0x5a, + 0x45, 0x06, 0x77, 0x4f, 0x02, 0x8b, 0x20, 0x1b, 0x51, 0x26, 0x3d, 0x39, 0x4d, 0x41, 0xe8, 0xa7, + 0xc9, 0xde, 0x5f, 0xcb, 0x86, 0xd9, 0x34, 0x95, 0xdc, 0x4b, 0x33, 0xce, 0xfb, 0x3a, 0xdd, 0xfc, + 0x86, 0x70, 0xed, 0x8c, 0x45, 0x30, 0x81, 0xe8, 0x14, 0x48, 0x04, 0x99, 0xb5, 0x8f, 0xff, 0x0b, + 0x63, 0x42, 0x59, 0x40, 0x23, 0x1b, 0x35, 0x50, 0x6b, 0xd7, 0xdf, 0x51, 0xf1, 0x59, 0x64, 0x59, + 0xb8, 0x14, 0x13, 0x11, 0xdb, 0x5b, 0x0d, 0xd4, 0xaa, 0xfa, 0xea, 0xdd, 0xba, 0x8d, 0xcb, 0x31, + 0xd0, 0x41, 0x2c, 0xed, 0x62, 0x03, 0xb5, 0x4a, 0xbe, 0x89, 0xac, 0x17, 0x78, 0xcf, 0xe8, 0x0f, + 0x62, 0x45, 0x6c, 0x97, 0x1a, 0xa8, 0x55, 0x69, 0xdb, 0xee, 0x4a, 0x90, 0xab, 0x85, 0xea, 0x8b, + 0xfd, 0x9a, 0xc1, 0x1b, 0x1d, 0x87, 0x38, 0x3f, 0x08, 0x20, 0xe5, 0x61, 0x6c, 0x6f, 0x2b, 0xfe, + 0xaa, 0x39, 0x3c, 0x59, 0x9c, 0x59, 0x47, 0xf8, 0xff, 0x1c, 0x24, 0x27, 0x81, 0x12, 0x57, 0x56, + 0xe2, 0xf2, 0xda, 0x77, 0x93, 0x53, 0x22, 0xe2, 0xe6, 0x39, 0xde, 0x7e, 0xc5, 0xb3, 0xa1, 0xb0, + 0x5e, 0xe2, 0x1d, 0x2d, 0x47, 0xd8, 0xc5, 0x46, 0xb1, 0x55, 0x69, 0x3f, 0x74, 0x6f, 0x9a, 0x92, + 0xbb, 0xe1, 0x8b, 0x9f, 0xd7, 0x35, 0xbf, 0x23, 0xbc, 0x7b, 0xac, 0x1c, 0x61, 0x7d, 0xfe, 0x27, + 0xbb, 0xde, 0xe0, 0x5a, 0x42, 0x24, 0x08, 0x99, 0x3b, 0xb0, 0xa5, 0x1c, 0xf8, 0xe7, 0x1b, 0xab, + 0xba, 0xda, 0xf8, 0xd1, 0xc1, 0x26, 0x0e, 0xfa, 0x8b, 0x4e, 0x94, 0xdd, 0x95, 0xf6, 0x83, 0x9b, + 0xc9, 0x54, 0xc3, 0x7e, 0x45, 0x17, 0xe9, 0xee, 0x9f, 0xe3, 0x7d, 0x49, 0x47, 0x20, 0x24, 0x19, + 0xa5, 0x10, 0x19, 0x59, 0x22, 0x08, 0xf9, 0x15, 0x93, 0x6a, 0x3e, 0x25, 0xff, 0xce, 0x1a, 0x40, + 0xdf, 0x2c, 0x8e, 0x17, 0xe9, 0xe6, 0x0f, 0x84, 0xeb, 0x6f, 0x17, 0x9b, 0xa3, 0x9c, 0xef, 0x02, + 0x49, 0x20, 0xb2, 0x7c, 0x5c, 0x1b, 0x93, 0x84, 0x46, 0x44, 0xf2, 0x2c, 0x10, 0x20, 0x6d, 0xa4, + 0x4c, 0x7d, 0xbc, 0x54, 0xb5, 0xb1, 0xbb, 0x0b, 0x55, 0x97, 0x39, 0xfc, 0x3d, 0x95, 0x71, 0x27, + 0x11, 0xaf, 0x61, 0xea, 0x57, 0x97, 0x1c, 0x5d, 0x90, 0xd6, 0x09, 0xae, 0xab, 0x0d, 0xd5, 0x63, + 0x0f, 0x28, 0xeb, 0x73, 0xe3, 0xdc, 0xdd, 0xf5, 0xdd, 0xd1, 0xcb, 0xec, 0x2a, 0x49, 0x17, 0xa9, + 0xf0, 0xf7, 0xd2, 0xa5, 0x38, 0x35, 0x98, 0x73, 0x7c, 0x6b, 0x9d, 0x66, 0x4c, 0x12, 0x25, 0xb0, + 0xf8, 0x77, 0xa6, 0xfa, 0x8a, 0xe9, 0x92, 0x24, 0x5d, 0x90, 0x9d, 0x8b, 0xcf, 0x33, 0x07, 0x5d, + 0xcf, 0x1c, 0xf4, 0x75, 0xe6, 0xa0, 0x4f, 0x73, 0xa7, 0x70, 0x3d, 0x77, 0x0a, 0x5f, 0xe6, 0x4e, + 0xe1, 0xc3, 0xb3, 0x01, 0x95, 0xf1, 0x55, 0xcf, 0x0d, 0xf9, 0xc8, 0x33, 0x3d, 0xab, 0xf1, 0xe7, + 0x81, 0x37, 0xf9, 0xe5, 0x47, 0xa1, 0x56, 0xbe, 0x57, 0x56, 0x5f, 0xdf, 0xd3, 0x9f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xd6, 0xfb, 0xc0, 0x15, 0x4e, 0x04, 0x00, 0x00, } func (m *IndexedHeader) Marshal() (dAtA []byte, err error) { @@ -485,6 +496,11 @@ func (m *ChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TimestampedHeadersCount != 0 { + i = encodeVarintZoneconcierge(dAtA, i, uint64(m.TimestampedHeadersCount)) + i-- + dAtA[i] = 0x20 + } if m.LatestForks != nil { { size, err := m.LatestForks.MarshalToSizedBuffer(dAtA[:i]) @@ -655,6 +671,9 @@ func (m *ChainInfo) Size() (n int) { l = m.LatestForks.Size() n += 1 + l + sovZoneconcierge(uint64(l)) } + if m.TimestampedHeadersCount != 0 { + n += 1 + sovZoneconcierge(uint64(m.TimestampedHeadersCount)) + } return n } @@ -1128,6 +1147,25 @@ func (m *ChainInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampedHeadersCount", wireType) + } + m.TimestampedHeadersCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampedHeadersCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipZoneconcierge(dAtA[iNdEx:]) From c2fd7603a14ccf52c171c5f9f58e4a6b177c5e1f Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Tue, 10 Jan 2023 20:06:55 +1100 Subject: [PATCH 10/37] zoneconcierge API: find the BTC-finalised chain info before specific CZ height (#264) --- proto/babylon/zoneconcierge/query.proto | 44 + x/btccheckpoint/keeper/keeper.go | 27 +- x/btccheckpoint/keeper/msg_server_test.go | 26 +- .../keeper/canonical_chain_indexer.go | 30 + .../keeper/canonical_chain_indexer_test.go | 45 +- .../keeper/epoch_chain_info_indexer.go | 31 + x/zoneconcierge/keeper/grpc_query.go | 142 +- x/zoneconcierge/keeper/grpc_query_test.go | 16 +- x/zoneconcierge/keeper/keeper_test.go | 4 +- .../keeper/proof_epoch_submitted.go | 4 +- x/zoneconcierge/types/expected_keepers.go | 2 +- x/zoneconcierge/types/mocked_keepers.go | 19 +- x/zoneconcierge/types/query.pb.go | 1198 +++++++++++++++-- x/zoneconcierge/types/query.pb.gw.go | 141 ++ 14 files changed, 1510 insertions(+), 219 deletions(-) diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 374943457..5405eed6f 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -46,6 +46,10 @@ service Query { rpc FinalizedChainInfo(QueryFinalizedChainInfoRequest) returns (QueryFinalizedChainInfoResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}"; } + // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than the provided CZ height, with proofs + rpc FinalizedChainInfoUntilHeight(QueryFinalizedChainInfoUntilHeightRequest) returns (QueryFinalizedChainInfoUntilHeightResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}/height/{height}"; + } } // QueryParamsRequest is request type for the Query/Params RPC method. @@ -151,3 +155,43 @@ message QueryFinalizedChainInfoResponse { // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 8; } + +// QueryFinalizedChainInfoUntilHeightRequest is request type for the Query/FinalizedChainInfoUntilHeight RPC method. +message QueryFinalizedChainInfoUntilHeightRequest { + // chain_id is the ID of the CZ + string chain_id = 1; + // height is the height of the CZ chain + // such that the returned finalised chain info will be no later than this height + uint64 height = 2; + // prove indicates whether the querier wants to get proofs of this timestamp + bool prove = 3; +} + +// QueryFinalizedChainInfoUntilHeightResponse is response type for the Query/FinalizedChainInfoUntilHeight RPC method. +message QueryFinalizedChainInfoUntilHeightResponse { + // finalized_chain_info is the info of the CZ + babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 1; + + /* + The following fields include metadata related to this chain info + */ + // epoch_info is the metadata of the last BTC-finalised epoch + babylon.epoching.v1.Epoch epoch_info = 2; + // raw_checkpoint is the raw checkpoint of this epoch + babylon.checkpointing.v1.RawCheckpoint raw_checkpoint = 3; + // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch + babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 4; + + /* + The following fields include proofs that attest the chain info is BTC-finalised + */ + // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block + tendermint.types.TxProof proof_tx_in_block = 5; + // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch + tendermint.crypto.Proof proof_header_in_epoch = 6; + // proof_epoch_sealed is the proof that the epoch is sealed + babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 7; + // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger + // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission + repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 8; +} diff --git a/x/btccheckpoint/keeper/keeper.go b/x/btccheckpoint/keeper/keeper.go index 3c5630121..b0bdd6e0f 100644 --- a/x/btccheckpoint/keeper/keeper.go +++ b/x/btccheckpoint/keeper/keeper.go @@ -8,6 +8,7 @@ import ( txformat "github.com/babylonchain/babylon/btctxformatter" bbn "github.com/babylonchain/babylon/types" "github.com/babylonchain/babylon/x/btccheckpoint/types" + checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/prefix" storetypes "github.com/cosmos/cosmos-sdk/store/types" @@ -173,7 +174,7 @@ func (k Keeper) SubmissionExists(ctx sdk.Context, sk types.SubmissionKey) bool { return k.GetSubmissionData(ctx, sk) != nil } -// Return epoch data for given epoch, if there is not epoch data yet returns nil +// GetEpochData returns epoch data for given epoch, if there is not epoch data yet returns nil func (k Keeper) GetEpochData(ctx sdk.Context, e uint64) *types.EpochData { store := ctx.KVStore(k.storeKey) bytes := store.Get(types.GetEpochIndexKey(e)) @@ -187,6 +188,30 @@ func (k Keeper) GetEpochData(ctx sdk.Context, e uint64) *types.EpochData { return ed } +// GetFinalizedEpochDataWithBestSubmission gets the status, raw checkpoint bytes, +// and the best submission of a given epoch +func (k Keeper) GetFinalizedEpochDataWithBestSubmission(ctx sdk.Context, epochNumber uint64) (types.BtcStatus, *checkpointingtypes.RawCheckpoint, *types.SubmissionKey, error) { + // find the btc checkpoint tx index of this epoch + ed := k.GetEpochData(ctx, epochNumber) + if ed == nil { + return 0, nil, nil, types.ErrNoCheckpointsForPreviousEpoch + } + if ed.Status != types.Finalized { + return 0, nil, nil, fmt.Errorf("epoch %d has not been finalized yet", epochNumber) + } + if len(ed.Key) == 0 { + return 0, nil, nil, types.ErrNoCheckpointsForPreviousEpoch + } + bestSubmissionKey := ed.Key[0] // index of checkpoint tx on BTC + + // get raw checkpoint of this epoch + rawCheckpoint, err := checkpointingtypes.FromBTCCkptBytesToRawCkpt(ed.RawCheckpoint) + if err != nil { + return 0, nil, nil, err + } + return ed.Status, rawCheckpoint, bestSubmissionKey, nil +} + // checkAncestors checks if there is at least one ancestor in previous epoch submissions // previous epoch submission is considered ancestor when: // - it is on main chain diff --git a/x/btccheckpoint/keeper/msg_server_test.go b/x/btccheckpoint/keeper/msg_server_test.go index 406bd6044..7de9ffe84 100644 --- a/x/btccheckpoint/keeper/msg_server_test.go +++ b/x/btccheckpoint/keeper/msg_server_test.go @@ -73,7 +73,7 @@ func (k *TestKeepers) setEpoch(epoch uint64) { k.Checkpointing.SetEpoch(epoch) } -func (k *TestKeepers) getEpochData(e uint64) *btcctypes.EpochData { +func (k *TestKeepers) GetEpochData(e uint64) *btcctypes.EpochData { return k.BTCCheckpoint.GetEpochData(k.SdkCtx, e) } @@ -201,7 +201,7 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { require.NoErrorf(t, err, "Unexpected message processing error: %v", err) - ed := tk.getEpochData(epoch) + ed := tk.GetEpochData(epoch) if len(ed.Key) == 0 { t.Errorf("There should be at least one key in epoch %d", epoch) @@ -238,7 +238,7 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { require.Equal(t, msg.Proofs[i].MerkleNodes, txInfo.Proof) } - ed1 := tk.getEpochData(epoch) + ed1 := tk.GetEpochData(epoch) // TODO Add custom equal fo submission key and transaction key to check // it is expected key @@ -395,7 +395,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { for i := 1; i <= 3; i++ { // all 3 epoch must have two submissions - ed := tk.getEpochData(uint64(i)) + ed := tk.GetEpochData(uint64(i)) require.NotNil(t, ed) require.Len(t, ed.Key, 2) require.EqualValues(t, ed.Status, btcctypes.Submitted) @@ -409,7 +409,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { tk.onTipChange() for i := 1; i <= 3; i++ { - ed := tk.getEpochData(uint64(i)) + ed := tk.GetEpochData(uint64(i)) require.NotNil(t, ed) if i == 1 { @@ -430,7 +430,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { for i := 1; i <= 3; i++ { // all 3 epoch must have two submissions - ed := tk.getEpochData(uint64(i)) + ed := tk.GetEpochData(uint64(i)) require.NotNil(t, ed) require.Len(t, ed.Key, 0) require.EqualValues(t, ed.Status, btcctypes.Submitted) @@ -461,7 +461,7 @@ func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { _, err = tk.insertProofMsg(msg3) require.NoError(t, err, "failed to insert submission") - ed := tk.getEpochData(uint64(1)) + ed := tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 3) @@ -475,7 +475,7 @@ func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { tk.onTipChange() - ed = tk.getEpochData(uint64(1)) + ed = tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 1) require.Equal(t, ed.Status, btcctypes.Finalized) @@ -504,7 +504,7 @@ func TestTxIdxShouldBreakTies(t *testing.T) { _, err = tk.insertProofMsg(msg2) require.NoError(t, err, "failed to insert submission") - ed := tk.getEpochData(uint64(1)) + ed := tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 2) @@ -518,7 +518,7 @@ func TestTxIdxShouldBreakTies(t *testing.T) { tk.onTipChange() - ed = tk.getEpochData(uint64(1)) + ed = tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 1) require.Equal(t, ed.Status, btcctypes.Finalized) @@ -561,7 +561,7 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { } // TODO customs Equality for submission keys - ed := tk.getEpochData(epoch) + ed := tk.GetEpochData(epoch) if len(ed.Key) != 1 { t.Errorf("Unexpected missing submissions") @@ -579,7 +579,7 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { tk.onTipChange() // TODO customs Equality for submission keys to check this are really keys // we are looking for - ed = tk.getEpochData(epoch) + ed = tk.GetEpochData(epoch) if len(ed.Key) != 1 { t.Errorf("Unexpected missing submission") @@ -594,7 +594,7 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { tk.onTipChange() - ed = tk.getEpochData(epoch) + ed = tk.GetEpochData(epoch) if ed == nil || ed.Status != btcctypes.Finalized { t.Errorf("Epoch Data missing of in unexpected state") diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer.go b/x/zoneconcierge/keeper/canonical_chain_indexer.go index 73bd3cc9a..f71a72a60 100644 --- a/x/zoneconcierge/keeper/canonical_chain_indexer.go +++ b/x/zoneconcierge/keeper/canonical_chain_indexer.go @@ -1,12 +1,42 @@ package keeper import ( + "fmt" + sdkerrors "cosmossdk.io/errors" "github.com/babylonchain/babylon/x/zoneconcierge/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" ) +// FindClosestHeader finds the IndexedHeader that is closest to (but not after) the given height +func (k Keeper) FindClosestHeader(ctx sdk.Context, chainID string, height uint64) (*types.IndexedHeader, error) { + chainInfo := k.GetChainInfo(ctx, chainID) + if chainInfo.LatestHeader == nil { + return nil, fmt.Errorf("chain with ID %s does not have a timestamped header", chainID) + } + + // if the given height is no lower than the latest header, return the latest header directly + if chainInfo.LatestHeader.Height <= height { + return chainInfo.LatestHeader, nil + } + + // the requested height is lower than the latest header, trace back until finding a timestamped header + store := k.canonicalChainStore(ctx, chainID) + heightBytes := sdk.Uint64ToBigEndian(height) + iter := store.ReverseIterator(nil, heightBytes) + defer iter.Close() + // if there is no key within range [0, height], return error + if !iter.Valid() { + return nil, fmt.Errorf("chain with ID %s does not have a timestamped header before height %d", chainID, height) + } + // find the header in bytes, decode and return + headerBytes := iter.Value() + var header types.IndexedHeader + k.cdc.MustUnmarshal(headerBytes, &header) + return &header, nil +} + func (k Keeper) GetHeader(ctx sdk.Context, chainID string, height uint64) (*types.IndexedHeader, error) { store := k.canonicalChainStore(ctx, chainID) heightBytes := sdk.Uint64ToBigEndian(height) diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go index 7f7d8f53a..bdae49362 100644 --- a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go +++ b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go @@ -20,9 +20,9 @@ func FuzzCanonicalChainIndexer(f *testing.F) { ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() - // invoke the hook a random number of times to simulate a random number of blocks + // simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 - headers := SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, numHeaders) + headers := SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders) // check if the canonical chain index is correct or not for i := uint64(0); i < numHeaders; i++ { @@ -42,3 +42,44 @@ func FuzzCanonicalChainIndexer(f *testing.F) { require.Equal(t, headers[numHeaders-1].Header.LastCommitHash, chainInfo.LatestHeader.Hash) }) } + +func FuzzFindClosestHeader(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + // no header at the moment, FindClosestHeader invocation should give error + _, err := zcKeeper.FindClosestHeader(ctx, czChain.ChainID, 100) + require.Error(t, err) + + // simulate a random number of blocks + numHeaders := datagen.RandomInt(100) + 1 + headers := SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders) + + header, err := zcKeeper.FindClosestHeader(ctx, czChain.ChainID, numHeaders) + require.NoError(t, err) + require.Equal(t, headers[len(headers)-1].Header.LastCommitHash, header.Hash) + + // skip a non-zero number of headers in between, in order to create a gap of non-timestamped headers + gap := datagen.RandomInt(10) + 1 + + // simulate a random number of blocks + // where the new batch of headers has a gap with the previous batch + SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, numHeaders+gap+1, numHeaders) + + // get a random height that is in this gap + randomHeightInGap := datagen.RandomInt(int(gap+1)) + numHeaders + // find the closest header with the given randomHeightInGap + header, err = zcKeeper.FindClosestHeader(ctx, czChain.ChainID, randomHeightInGap) + require.NoError(t, err) + // the header should be the same as the last header in the last batch + require.Equal(t, headers[len(headers)-1].Header.LastCommitHash, header.Hash) + }) +} diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go index 121c8bcef..dc3cdced1 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go @@ -20,6 +20,37 @@ func (k Keeper) GetEpochChainInfo(ctx sdk.Context, chainID string, epochNumber u return &chainInfo, nil } +// GetLastFinalizedChainInfo gets the last finalised chain info recorded for a given chain ID +// and the earliest epoch that snapshots this chain info +func (k Keeper) GetLastFinalizedChainInfo(ctx sdk.Context, chainID string) (uint64, *types.ChainInfo, error) { + // find the last finalised epoch + finalizedEpoch, err := k.GetFinalizedEpoch(ctx) + if err != nil { + return 0, nil, err + } + + // find the chain info of this epoch + chainInfo, err := k.GetEpochChainInfo(ctx, chainID, finalizedEpoch) + if err != nil { + return finalizedEpoch, nil, err + } + + // It's possible that the chain info's epoch is way before the last finalised epoch + // e.g., when there is no relayer for many epochs + // NOTE: if an epoch is finalised then all of its previous epochs are also finalised + if chainInfo.LatestHeader.BabylonEpoch < finalizedEpoch { + // remember the last finalised epoch + finalizedEpoch = chainInfo.LatestHeader.BabylonEpoch + // replace the chain info w.r.t. this last finalised epoch + chainInfo, err = k.GetEpochChainInfo(ctx, chainID, finalizedEpoch) + if err != nil { + return finalizedEpoch, nil, err + } + } + + return finalizedEpoch, chainInfo, nil +} + // GetEpochHeaders gets the headers timestamped in a given epoch, in the ascending order func (k Keeper) GetEpochHeaders(ctx sdk.Context, chainID string, epochNumber uint64) ([]*types.IndexedHeader, error) { headers := []*types.IndexedHeader{} diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index ea8190fe4..8cb75bfe0 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -2,13 +2,14 @@ package keeper import ( "context" - "fmt" btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" + epochingtypes "github.com/babylonchain/babylon/x/epoching/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/query" + tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -132,56 +133,97 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC ctx := sdk.UnwrapSDKContext(c) - // find the last finalised epoch - finalizedEpoch, err := k.GetFinalizedEpoch(ctx) + // find the last finalised chain info and the earliest epoch that snapshots this chain info + finalizedEpoch, chainInfo, err := k.GetLastFinalizedChainInfo(ctx, req.ChainId) if err != nil { return nil, err } - // find the chain info of this epoch - chainInfo, err := k.GetEpochChainInfo(ctx, req.ChainId, finalizedEpoch) + // find the epoch metadata of the finalised epoch + epochInfo, err := k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) if err != nil { return nil, err } - // It's possible that the chain info's epoch is way before the last finalised epoch - // e.g., when there is no relayer for many epochs - // NOTE: if an epoch is finalised then all of its previous epochs are also finalised - if chainInfo.LatestHeader.BabylonEpoch < finalizedEpoch { - finalizedEpoch = chainInfo.LatestHeader.BabylonEpoch + // find the raw checkpoint and the best submission key for the finalised epoch + _, rawCheckpoint, bestSubmissionKey, err := k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) + if err != nil { + return nil, err } - // find the epoch metadata - epochInfo, err := k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) + resp := &types.QueryFinalizedChainInfoResponse{ + FinalizedChainInfo: chainInfo, + // metadata related to this chain info, including the epoch, the raw checkpoint of this epoch, and the BTC tx index of the raw checkpoint + EpochInfo: epochInfo, + RawCheckpoint: rawCheckpoint, + BtcSubmissionKey: bestSubmissionKey, + } + + // if the query does not want the proofs, return here + if !req.Prove { + return resp, nil + } + + // generate all proofs + resp.ProofTxInBlock, resp.ProofHeaderInEpoch, resp.ProofEpochSealed, resp.ProofEpochSubmitted, err = k.proveFinalizedChainInfo(ctx, chainInfo, epochInfo, bestSubmissionKey) if err != nil { return nil, err } - // find the btc checkpoint tx index of this epoch - ed := k.btccKeeper.GetEpochData(ctx, finalizedEpoch) - if ed.Status != btcctypes.Finalized { - err := fmt.Errorf("epoch %d should have been finalized, but is in status %s", finalizedEpoch, ed.Status.String()) - panic(err) // this can only be a programming error + return resp, nil +} + +func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.QueryFinalizedChainInfoUntilHeightRequest) (*types.QueryFinalizedChainInfoUntilHeightResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") } - if len(ed.Key) == 0 { - err := fmt.Errorf("finalized epoch %d should have at least 1 checkpoint submission", finalizedEpoch) - panic(err) // this can only be a programming error + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") } - bestSubmissionKey := ed.Key[0] // index of checkpoint tx on BTC - // get raw checkpoint of this epoch - rawCheckpointBytes := ed.RawCheckpoint - rawCheckpoint, err := checkpointingtypes.FromBTCCkptBytesToRawCkpt(rawCheckpointBytes) + ctx := sdk.UnwrapSDKContext(c) + resp := &types.QueryFinalizedChainInfoUntilHeightResponse{} + + // find and assign the last finalised chain info and the earliest epoch that snapshots this chain info + finalizedEpoch, chainInfo, err := k.GetLastFinalizedChainInfo(ctx, req.ChainId) if err != nil { return nil, err } - - resp := &types.QueryFinalizedChainInfoResponse{ - FinalizedChainInfo: chainInfo, - // metadata related to this chain info, including the epoch, the raw checkpoint of this epoch, and the BTC tx index of the raw checkpoint - EpochInfo: epochInfo, - RawCheckpoint: rawCheckpoint, - BtcSubmissionKey: bestSubmissionKey, + resp.FinalizedChainInfo = chainInfo + + if chainInfo.LatestHeader.Height <= req.Height { // the requested height is after the last finalised chain info + // find and assign the epoch metadata of the finalised epoch + resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) + if err != nil { + return nil, err + } + + // find and assign the raw checkpoint and the best submission key for the finalised epoch + _, resp.RawCheckpoint, resp.BtcSubmissionKey, err = k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) + if err != nil { + return nil, err + } + } else { // the requested height is before the last finalised chain info + // starting from the requested height, iterate backward until a timestamped header + closestHeader, err := k.FindClosestHeader(ctx, req.ChainId, req.Height) + if err != nil { + return nil, err + } + // assign the finalizedEpoch, and retrieve epoch info, raw ckpt and submission key + finalizedEpoch = closestHeader.BabylonEpoch + resp.FinalizedChainInfo, err = k.GetEpochChainInfo(ctx, req.ChainId, finalizedEpoch) + if err != nil { + return nil, err + } + resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) + if err != nil { + return nil, err + } + _, resp.RawCheckpoint, resp.BtcSubmissionKey, err = k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) + if err != nil { + return nil, err + } } // if the query does not want the proofs, return here @@ -189,33 +231,51 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC return resp, nil } - // Proof that the Babylon tx is in block - resp.ProofTxInBlock, err = k.ProveTxInBlock(ctx, chainInfo.LatestHeader.BabylonTxHash) + // generate all proofs + resp.ProofTxInBlock, resp.ProofHeaderInEpoch, resp.ProofEpochSealed, resp.ProofEpochSubmitted, err = k.proveFinalizedChainInfo(ctx, resp.FinalizedChainInfo, resp.EpochInfo, resp.BtcSubmissionKey) if err != nil { return nil, err } + return resp, nil +} + +// proveFinalizedChainInfo generates proofs that a chainInfo has been finalised by the given epoch with epochInfo +// It includes proofTxInBlock, proofHeaderInEpoch, proofEpochSealed and proofEpochSubmitted +// The proofs can be verified by a verifier with access to a BTC and Babylon light client +// CONTRACT: this is only a private helper function for simplifying the implementation of RPC calls +func (k Keeper) proveFinalizedChainInfo( + ctx sdk.Context, + chainInfo *types.ChainInfo, + epochInfo *epochingtypes.Epoch, + bestSubmissionKey *btcctypes.SubmissionKey, +) (*tmproto.TxProof, *tmcrypto.Proof, *types.ProofEpochSealed, []*btcctypes.TransactionInfo, error) { + // Proof that the Babylon tx is in block + proofTxInBlock, err := k.ProveTxInBlock(ctx, chainInfo.LatestHeader.BabylonTxHash) + if err != nil { + return nil, nil, nil, nil, err + } + // proof that the block is in this epoch - resp.ProofHeaderInEpoch, err = k.ProveHeaderInEpoch(ctx, chainInfo.LatestHeader.BabylonHeader, epochInfo) + proofHeaderInEpoch, err := k.ProveHeaderInEpoch(ctx, chainInfo.LatestHeader.BabylonHeader, epochInfo) if err != nil { - return nil, err + return nil, nil, nil, nil, err } // proof that the epoch is sealed - resp.ProofEpochSealed, err = k.ProveEpochSealed(ctx, finalizedEpoch) + proofEpochSealed, err := k.ProveEpochSealed(ctx, epochInfo.EpochNumber) if err != nil { - return nil, err + return nil, nil, nil, nil, err } // proof that the epoch's checkpoint is submitted to BTC // i.e., the two `TransactionInfo`s for the checkpoint - resp.ProofEpochSubmitted, err = k.ProveEpochSubmitted(ctx, *bestSubmissionKey) + proofEpochSubmitted, err := k.ProveEpochSubmitted(ctx, bestSubmissionKey) if err != nil { // The only error in ProveEpochSubmitted is the nil bestSubmission. // Since the epoch w.r.t. the bestSubmissionKey is finalised, this // can only be a programming error, so we should panic here. panic(err) } - - return resp, nil + return proofTxInBlock, proofHeaderInEpoch, proofEpochSealed, proofEpochSubmitted, nil } diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index 8b13a9f5f..24315d4bc 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -262,15 +262,17 @@ func FuzzFinalizedChainInfo(f *testing.F) { checkpointingKeeper.EXPECT().GetBLSPubKeySet(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return([]*checkpointingtypes.ValidatorWithBlsKey{}, nil).AnyTimes() // mock btccheckpoint keeper // TODO: test with BTCSpvProofs + randomRawCkpt := datagen.GenRandomRawCheckpoint() + randomRawCkpt.EpochNum = epoch.EpochNumber btccKeeper := zctypes.NewMockBtcCheckpointKeeper(ctrl) - mockEpochData := &btcctypes.EpochData{ - Key: []*btcctypes.SubmissionKey{ - {Key: []*btcctypes.TransactionKey{}}, + btccKeeper.EXPECT().GetFinalizedEpochDataWithBestSubmission(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( + btcctypes.Finalized, + randomRawCkpt, + &btcctypes.SubmissionKey{ + Key: []*btcctypes.TransactionKey{}, }, - Status: btcctypes.Finalized, - RawCheckpoint: datagen.RandomRawCheckpointDataForEpoch(epoch.EpochNumber).ExpectedOpReturn, - } - btccKeeper.EXPECT().GetEpochData(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return(mockEpochData).AnyTimes() + nil, + ).AnyTimes() mockSubmissionData := &btcctypes.SubmissionData{TxsInfo: []*btcctypes.TransactionInfo{}} btccKeeper.EXPECT().GetSubmissionData(gomock.Any(), gomock.Any()).Return(mockSubmissionData).AnyTimes() // mock epoching keeper diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go index 1152d6cc8..12069d90f 100644 --- a/x/zoneconcierge/keeper/keeper_test.go +++ b/x/zoneconcierge/keeper/keeper_test.go @@ -34,11 +34,11 @@ func SetupTest(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *i } // SimulateHeadersViaHook generates a non-zero number of canonical headers via the hook -func SimulateHeadersViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, numHeaders uint64) []*ibctmtypes.Header { +func SimulateHeadersViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, startHeight uint64, numHeaders uint64) []*ibctmtypes.Header { headers := []*ibctmtypes.Header{} // invoke the hook a number of times to simulate a number of blocks for i := uint64(0); i < numHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(chainID, i) + header := datagen.GenRandomIBCTMHeader(chainID, startHeight+i) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, false) headers = append(headers, header) } diff --git a/x/zoneconcierge/keeper/proof_epoch_submitted.go b/x/zoneconcierge/keeper/proof_epoch_submitted.go index 54efdd81f..758f2fc3e 100644 --- a/x/zoneconcierge/keeper/proof_epoch_submitted.go +++ b/x/zoneconcierge/keeper/proof_epoch_submitted.go @@ -14,8 +14,8 @@ import ( // ProveEpochSubmitted generates proof that the epoch's checkpoint is submitted to BTC // i.e., the two `TransactionInfo`s for the checkpoint -func (k Keeper) ProveEpochSubmitted(ctx sdk.Context, sk btcctypes.SubmissionKey) ([]*btcctypes.TransactionInfo, error) { - bestSubmissionData := k.btccKeeper.GetSubmissionData(ctx, sk) +func (k Keeper) ProveEpochSubmitted(ctx sdk.Context, sk *btcctypes.SubmissionKey) ([]*btcctypes.TransactionInfo, error) { + bestSubmissionData := k.btccKeeper.GetSubmissionData(ctx, *sk) if bestSubmissionData == nil { return nil, fmt.Errorf("the best submission key for epoch %d has no submission data", bestSubmissionData.Epoch) } diff --git a/x/zoneconcierge/types/expected_keepers.go b/x/zoneconcierge/types/expected_keepers.go index 8317fefe7..236d54430 100644 --- a/x/zoneconcierge/types/expected_keepers.go +++ b/x/zoneconcierge/types/expected_keepers.go @@ -68,7 +68,7 @@ type ScopedKeeper interface { } type BtcCheckpointKeeper interface { - GetEpochData(ctx sdk.Context, e uint64) *btcctypes.EpochData + GetFinalizedEpochDataWithBestSubmission(ctx sdk.Context, e uint64) (btcctypes.BtcStatus, *checkpointingtypes.RawCheckpoint, *btcctypes.SubmissionKey, error) GetSubmissionData(ctx sdk.Context, sk btcctypes.SubmissionKey) *btcctypes.SubmissionData } diff --git a/x/zoneconcierge/types/mocked_keepers.go b/x/zoneconcierge/types/mocked_keepers.go index 8126d2ba7..1e48d0971 100644 --- a/x/zoneconcierge/types/mocked_keepers.go +++ b/x/zoneconcierge/types/mocked_keepers.go @@ -502,18 +502,21 @@ func (m *MockBtcCheckpointKeeper) EXPECT() *MockBtcCheckpointKeeperMockRecorder return m.recorder } -// GetEpochData mocks base method. -func (m *MockBtcCheckpointKeeper) GetEpochData(ctx types2.Context, e uint64) *types.EpochData { +// GetFinalizedEpochDataWithBestSubmission mocks base method. +func (m *MockBtcCheckpointKeeper) GetFinalizedEpochDataWithBestSubmission(ctx types2.Context, e uint64) (types.BtcStatus, *types0.RawCheckpoint, *types.SubmissionKey, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetEpochData", ctx, e) - ret0, _ := ret[0].(*types.EpochData) - return ret0 + ret := m.ctrl.Call(m, "GetFinalizedEpochDataWithBestSubmission", ctx, e) + ret0, _ := ret[0].(types.BtcStatus) + ret1, _ := ret[1].(*types0.RawCheckpoint) + ret2, _ := ret[2].(*types.SubmissionKey) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 } -// GetEpochData indicates an expected call of GetEpochData. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetEpochData(ctx, e interface{}) *gomock.Call { +// GetFinalizedEpochDataWithBestSubmission indicates an expected call of GetFinalizedEpochDataWithBestSubmission. +func (mr *MockBtcCheckpointKeeperMockRecorder) GetFinalizedEpochDataWithBestSubmission(ctx, e interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpochData", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetEpochData), ctx, e) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalizedEpochDataWithBestSubmission", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetFinalizedEpochDataWithBestSubmission), ctx, e) } // GetSubmissionData mocks base method. diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index 1e6a5e6ce..6ed73d11e 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -763,6 +763,189 @@ func (m *QueryFinalizedChainInfoResponse) GetProofEpochSubmitted() []*types2.Tra return nil } +// QueryFinalizedChainInfoUntilHeightRequest is request type for the Query/FinalizedChainInfoUntilHeight RPC method. +type QueryFinalizedChainInfoUntilHeightRequest struct { + // chain_id is the ID of the CZ + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // height is the height of the CZ chain + // such that the returned finalised chain info will be no later than this height + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + // prove indicates whether the querier wants to get proofs of this timestamp + Prove bool `protobuf:"varint,3,opt,name=prove,proto3" json:"prove,omitempty"` +} + +func (m *QueryFinalizedChainInfoUntilHeightRequest) Reset() { + *m = QueryFinalizedChainInfoUntilHeightRequest{} +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) String() string { + return proto.CompactTextString(m) +} +func (*QueryFinalizedChainInfoUntilHeightRequest) ProtoMessage() {} +func (*QueryFinalizedChainInfoUntilHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{14} +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.Merge(m, src) +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest proto.InternalMessageInfo + +func (m *QueryFinalizedChainInfoUntilHeightRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *QueryFinalizedChainInfoUntilHeightRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *QueryFinalizedChainInfoUntilHeightRequest) GetProve() bool { + if m != nil { + return m.Prove + } + return false +} + +// QueryFinalizedChainInfoUntilHeightResponse is response type for the Query/FinalizedChainInfoUntilHeight RPC method. +type QueryFinalizedChainInfoUntilHeightResponse struct { + // finalized_chain_info is the info of the CZ + FinalizedChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=finalized_chain_info,json=finalizedChainInfo,proto3" json:"finalized_chain_info,omitempty"` + // epoch_info is the metadata of the last BTC-finalised epoch + EpochInfo *types.Epoch `protobuf:"bytes,2,opt,name=epoch_info,json=epochInfo,proto3" json:"epoch_info,omitempty"` + // raw_checkpoint is the raw checkpoint of this epoch + RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` + // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch + BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,4,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` + // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block + ProofTxInBlock *types3.TxProof `protobuf:"bytes,5,opt,name=proof_tx_in_block,json=proofTxInBlock,proto3" json:"proof_tx_in_block,omitempty"` + // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch + ProofHeaderInEpoch *crypto.Proof `protobuf:"bytes,6,opt,name=proof_header_in_epoch,json=proofHeaderInEpoch,proto3" json:"proof_header_in_epoch,omitempty"` + // proof_epoch_sealed is the proof that the epoch is sealed + ProofEpochSealed *ProofEpochSealed `protobuf:"bytes,7,opt,name=proof_epoch_sealed,json=proofEpochSealed,proto3" json:"proof_epoch_sealed,omitempty"` + // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger + // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission + ProofEpochSubmitted []*types2.TransactionInfo `protobuf:"bytes,8,rep,name=proof_epoch_submitted,json=proofEpochSubmitted,proto3" json:"proof_epoch_submitted,omitempty"` +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) Reset() { + *m = QueryFinalizedChainInfoUntilHeightResponse{} +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) String() string { + return proto.CompactTextString(m) +} +func (*QueryFinalizedChainInfoUntilHeightResponse) ProtoMessage() {} +func (*QueryFinalizedChainInfoUntilHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{15} +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.Merge(m, src) +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse proto.InternalMessageInfo + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetFinalizedChainInfo() *ChainInfo { + if m != nil { + return m.FinalizedChainInfo + } + return nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetEpochInfo() *types.Epoch { + if m != nil { + return m.EpochInfo + } + return nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetRawCheckpoint() *types1.RawCheckpoint { + if m != nil { + return m.RawCheckpoint + } + return nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetBtcSubmissionKey() *types2.SubmissionKey { + if m != nil { + return m.BtcSubmissionKey + } + return nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofTxInBlock() *types3.TxProof { + if m != nil { + return m.ProofTxInBlock + } + return nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofHeaderInEpoch() *crypto.Proof { + if m != nil { + return m.ProofHeaderInEpoch + } + return nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofEpochSealed() *ProofEpochSealed { + if m != nil { + return m.ProofEpochSealed + } + return nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofEpochSubmitted() []*types2.TransactionInfo { + if m != nil { + return m.ProofEpochSubmitted + } + return nil +} + func init() { proto.RegisterType((*QueryParamsRequest)(nil), "babylon.zoneconcierge.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.zoneconcierge.v1.QueryParamsResponse") @@ -778,82 +961,90 @@ func init() { proto.RegisterType((*QueryListEpochHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListEpochHeadersResponse") proto.RegisterType((*QueryFinalizedChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoRequest") proto.RegisterType((*QueryFinalizedChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse") + proto.RegisterType((*QueryFinalizedChainInfoUntilHeightRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightRequest") + proto.RegisterType((*QueryFinalizedChainInfoUntilHeightResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightResponse") } func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } var fileDescriptor_2caab7ee15063236 = []byte{ - // 1116 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x8e, 0xdb, 0x44, - 0x14, 0x5e, 0xb7, 0xfb, 0x97, 0x59, 0xb1, 0x5a, 0xa6, 0x5b, 0xea, 0x7a, 0xdb, 0x74, 0x31, 0x52, - 0xbb, 0xad, 0x8a, 0x4d, 0x42, 0x97, 0xb2, 0x42, 0x02, 0x6d, 0x5a, 0x5a, 0x42, 0x51, 0xd9, 0xba, - 0xbb, 0x08, 0x21, 0x90, 0x65, 0x3b, 0x93, 0xc4, 0xda, 0x64, 0xc6, 0xb5, 0x9d, 0x34, 0x69, 0xe9, - 0x0d, 0x2f, 0x00, 0x12, 0x37, 0x3c, 0x41, 0x91, 0x78, 0x92, 0x22, 0xf5, 0xa2, 0x12, 0x37, 0x5c, - 0x21, 0xb4, 0xcb, 0x2b, 0x70, 0x8f, 0x7c, 0x66, 0xec, 0xd8, 0x49, 0x4c, 0x92, 0x55, 0x6f, 0xaa, - 0x78, 0xe6, 0x7c, 0xdf, 0xf9, 0xce, 0x99, 0x39, 0xf3, 0x75, 0xd1, 0xdb, 0xb6, 0x65, 0xf7, 0x5b, - 0x8c, 0xea, 0x4f, 0x18, 0x25, 0x0e, 0xa3, 0x8e, 0x4b, 0xfc, 0x06, 0xd1, 0x1f, 0x75, 0x88, 0xdf, - 0xd7, 0x3c, 0x9f, 0x85, 0x0c, 0xcb, 0x22, 0x44, 0xcb, 0x84, 0x68, 0xdd, 0x92, 0xb2, 0xde, 0x60, - 0x0d, 0x06, 0x41, 0x7a, 0xf4, 0x8b, 0xc7, 0x2b, 0x17, 0x1a, 0x8c, 0x35, 0x5a, 0x44, 0xb7, 0x3c, - 0x57, 0xb7, 0x28, 0x65, 0xa1, 0x15, 0xba, 0x8c, 0x06, 0xf1, 0x6e, 0x48, 0x68, 0x8d, 0xf8, 0x6d, - 0x97, 0x86, 0x7a, 0xd8, 0xf7, 0x48, 0xc0, 0xff, 0x15, 0xbb, 0x17, 0x53, 0xbb, 0x8e, 0xdf, 0xf7, - 0x42, 0xa6, 0x7b, 0x3e, 0x63, 0x75, 0xb1, 0x7d, 0xcd, 0x61, 0x41, 0x9b, 0x05, 0xba, 0x6d, 0x05, - 0x42, 0xa3, 0xde, 0x2d, 0xd9, 0x24, 0xb4, 0x4a, 0xba, 0x67, 0x35, 0x5c, 0x0a, 0x99, 0x44, 0x6c, - 0x31, 0xae, 0xcc, 0x0e, 0x1d, 0xa7, 0x49, 0x9c, 0x43, 0x8f, 0x41, 0xce, 0x9e, 0xd8, 0xbf, 0x3a, - 0x7e, 0x3f, 0xf3, 0x25, 0x42, 0x93, 0x26, 0x0d, 0x76, 0x5c, 0xda, 0x48, 0x37, 0x49, 0xb9, 0x3c, - 0x3e, 0x64, 0x84, 0x4a, 0x8d, 0xe3, 0x88, 0xc7, 0x9c, 0x66, 0x14, 0xd2, 0x2d, 0x25, 0xbf, 0x87, - 0x63, 0xb2, 0x67, 0xe2, 0x59, 0xbe, 0xd5, 0x0e, 0x86, 0xd5, 0x67, 0x63, 0xb2, 0x47, 0x04, 0xa1, - 0xea, 0x3a, 0xc2, 0x0f, 0x22, 0xa5, 0x7b, 0x80, 0x37, 0xc8, 0xa3, 0x0e, 0x09, 0x42, 0xf5, 0x00, - 0x9d, 0xc9, 0xac, 0x06, 0x1e, 0xa3, 0x01, 0xc1, 0x1f, 0xa3, 0x45, 0x9e, 0x47, 0x96, 0x36, 0xa5, - 0xad, 0x95, 0xf2, 0xa6, 0x96, 0x77, 0xfa, 0x1a, 0x47, 0x56, 0xe6, 0x5f, 0xfc, 0x75, 0x69, 0xce, - 0x10, 0x28, 0xf5, 0x1c, 0x3a, 0x0b, 0xb4, 0xb7, 0x9a, 0x96, 0x4b, 0xbf, 0x70, 0x83, 0x30, 0xce, - 0xb7, 0x8d, 0xde, 0x1a, 0xde, 0x10, 0x29, 0x37, 0x50, 0xc1, 0x89, 0x16, 0x4d, 0xb7, 0x16, 0x65, - 0x3d, 0xbd, 0x55, 0x30, 0x96, 0x61, 0xa1, 0x5a, 0x0b, 0xd4, 0x72, 0x9a, 0xaf, 0x4a, 0xeb, 0x4c, - 0xf0, 0xe1, 0xf3, 0x68, 0x39, 0x46, 0x81, 0xd4, 0x82, 0xb1, 0x24, 0x40, 0xea, 0xb7, 0xe9, 0x54, - 0x1c, 0x23, 0x52, 0x55, 0x10, 0x12, 0x20, 0x5a, 0x67, 0xa2, 0xc2, 0x77, 0xf2, 0x2b, 0x1c, 0x10, - 0x70, 0x85, 0xd1, 0x4f, 0x75, 0x1f, 0x29, 0xc0, 0xfe, 0x69, 0x74, 0x68, 0x23, 0xb2, 0x36, 0x50, - 0x01, 0x4e, 0xd3, 0xa4, 0x9d, 0x36, 0x24, 0x98, 0x37, 0x96, 0x61, 0xe1, 0x7e, 0xa7, 0x9d, 0xd1, - 0x7c, 0x2a, 0xab, 0xd9, 0x42, 0x1b, 0x63, 0x59, 0x5f, 0xa3, 0xf0, 0xef, 0xd1, 0x39, 0x48, 0x11, - 0x35, 0xff, 0x33, 0x62, 0xd5, 0x88, 0x1f, 0x4c, 0x6e, 0x26, 0xbe, 0x83, 0xd0, 0x60, 0xb4, 0x40, - 0xf5, 0x4a, 0xf9, 0xb2, 0xc6, 0xe7, 0x50, 0x8b, 0xe6, 0x50, 0xe3, 0x63, 0x20, 0xe6, 0x50, 0xdb, - 0xb3, 0x1a, 0x44, 0xd0, 0x1a, 0x29, 0xa4, 0xfa, 0x5c, 0x42, 0xf2, 0x68, 0x7a, 0x51, 0xde, 0x2e, - 0x5a, 0x6a, 0xf2, 0x25, 0xb8, 0x00, 0x2b, 0xe5, 0x2b, 0xf9, 0xb5, 0x55, 0x69, 0x8d, 0xf4, 0x48, - 0x8d, 0x53, 0x18, 0x31, 0x0e, 0xdf, 0x1d, 0xa3, 0xf3, 0xca, 0x44, 0x9d, 0x3c, 0x7f, 0x46, 0xe8, - 0x57, 0xe8, 0x42, 0xa2, 0x13, 0x4e, 0x63, 0xa8, 0x57, 0x27, 0x3d, 0x61, 0x1b, 0x5d, 0xcc, 0xe1, - 0x7d, 0x6d, 0x4d, 0x50, 0x1f, 0xa0, 0x22, 0xe4, 0xb8, 0xe3, 0x52, 0xab, 0xe5, 0x3e, 0x21, 0xb5, - 0x19, 0xc6, 0x06, 0xaf, 0xa3, 0x05, 0xcf, 0x67, 0x5d, 0x02, 0xc2, 0x97, 0x0d, 0xfe, 0xa1, 0x3e, - 0x5f, 0x40, 0x97, 0x72, 0x39, 0x85, 0xf2, 0x03, 0xb4, 0x5e, 0x8f, 0x77, 0xcd, 0x93, 0xdd, 0x53, - 0x5c, 0x1f, 0xa1, 0xc7, 0x3b, 0x08, 0xf1, 0x4e, 0x03, 0x19, 0x3f, 0x52, 0x25, 0x21, 0x4b, 0x1e, - 0xcd, 0x6e, 0x49, 0x83, 0x7e, 0x1a, 0xfc, 0x5c, 0x00, 0x7a, 0x1f, 0xad, 0xfa, 0xd6, 0x63, 0x73, - 0xf0, 0xfc, 0xca, 0xa7, 0xc5, 0x8d, 0x88, 0xe1, 0x99, 0x77, 0x3a, 0xe2, 0x30, 0xac, 0xc7, 0xb7, - 0x92, 0x35, 0xe3, 0x0d, 0x3f, 0xfd, 0x89, 0x0f, 0x10, 0xb6, 0x43, 0xc7, 0x0c, 0x3a, 0x76, 0xdb, - 0x0d, 0x02, 0x97, 0x51, 0xf3, 0x90, 0xf4, 0xe5, 0xf9, 0x21, 0xce, 0xac, 0x77, 0x74, 0x4b, 0xda, - 0xc3, 0x24, 0xfe, 0x1e, 0xe9, 0x1b, 0x6b, 0x76, 0xe8, 0x64, 0x56, 0xf0, 0x6d, 0xf4, 0x26, 0xd8, - 0x9b, 0x19, 0xf6, 0x4c, 0x97, 0x9a, 0x76, 0x8b, 0x39, 0x87, 0xf2, 0x02, 0xb0, 0x9e, 0xd7, 0x06, - 0x56, 0xa8, 0x71, 0x8b, 0xdc, 0xef, 0xed, 0x45, 0xc1, 0xc6, 0x2a, 0x60, 0xf6, 0x7b, 0x55, 0x5a, - 0x89, 0x00, 0xf8, 0x1e, 0x3a, 0xcb, 0x59, 0xf8, 0x35, 0x88, 0x98, 0xa0, 0x13, 0xf2, 0x22, 0x30, - 0xc9, 0x69, 0x26, 0x6e, 0xaa, 0x1a, 0x27, 0xc2, 0x00, 0xe3, 0x97, 0xa8, 0x4a, 0xa1, 0x89, 0xf8, - 0x6b, 0xc4, 0x57, 0x39, 0x85, 0x19, 0x10, 0xab, 0x45, 0x6a, 0xf2, 0x12, 0x30, 0x5d, 0xfb, 0x1f, - 0x33, 0x88, 0x30, 0xc0, 0xf0, 0x10, 0x10, 0xc6, 0x9a, 0x37, 0xb4, 0x82, 0xbf, 0x8b, 0x65, 0x0a, - 0xe6, 0xa8, 0x13, 0x61, 0x48, 0x6a, 0xf2, 0x32, 0xdc, 0xf6, 0xab, 0xf9, 0x6d, 0xdc, 0xf7, 0x2d, - 0x1a, 0x58, 0x4e, 0x34, 0x9e, 0x70, 0x59, 0xce, 0xa4, 0xb8, 0x63, 0x96, 0xf2, 0xbf, 0x05, 0xb4, - 0x00, 0x17, 0x15, 0xff, 0x28, 0xa1, 0x45, 0x6e, 0x4e, 0xf8, 0x7a, 0xbe, 0xe2, 0x51, 0x4f, 0x54, - 0xde, 0x9d, 0x32, 0x9a, 0x5f, 0x7b, 0x75, 0xeb, 0x87, 0x3f, 0xfe, 0xf9, 0xf9, 0x94, 0x8a, 0x37, - 0xf5, 0xf1, 0x66, 0xdc, 0x2d, 0x09, 0xcf, 0xc6, 0xbf, 0x48, 0xa8, 0x90, 0x18, 0x1f, 0xd6, 0x27, - 0xa4, 0x19, 0xf6, 0x4e, 0xe5, 0xbd, 0xe9, 0x01, 0xd3, 0x4b, 0x83, 0x39, 0x0d, 0xf0, 0xaf, 0xb1, - 0x34, 0x98, 0x9b, 0xa9, 0xa4, 0xa5, 0xde, 0x93, 0xe9, 0xa4, 0xa5, 0x1f, 0x0b, 0xf5, 0x26, 0x48, - 0x2b, 0x61, 0x7d, 0x82, 0x34, 0x98, 0x7a, 0xfd, 0x69, 0xfc, 0x5a, 0x3d, 0xc3, 0xbf, 0x4b, 0x68, - 0x35, 0x6b, 0x8f, 0xf8, 0xc6, 0x84, 0xec, 0x63, 0x3d, 0x5a, 0xd9, 0x9e, 0x11, 0x25, 0x84, 0x7f, - 0x0e, 0xc2, 0x6f, 0xe3, 0xca, 0x8c, 0xc2, 0xf9, 0x7f, 0xef, 0x02, 0xfd, 0x69, 0x62, 0x1b, 0xcf, - 0xf0, 0x6f, 0x12, 0x5a, 0x49, 0x19, 0x21, 0x2e, 0x4d, 0x90, 0x34, 0xea, 0xd9, 0x4a, 0x79, 0x16, - 0x88, 0x28, 0xe1, 0x06, 0x94, 0xa0, 0xe1, 0xeb, 0xf9, 0x25, 0x08, 0x2b, 0x49, 0x37, 0xfe, 0xa5, - 0x84, 0xd6, 0x86, 0x5d, 0x0b, 0x7f, 0x30, 0x45, 0xfa, 0x31, 0xf6, 0xa9, 0xdc, 0x9c, 0x19, 0x27, - 0xb4, 0xdf, 0x05, 0xed, 0xbb, 0xf8, 0x93, 0x59, 0xb4, 0x8f, 0xeb, 0xfd, 0x4b, 0x09, 0xe1, 0x51, - 0x33, 0xc3, 0x1f, 0x4e, 0x10, 0x96, 0xeb, 0xa9, 0xca, 0xce, 0x09, 0x90, 0xa2, 0xa8, 0x5d, 0x28, - 0xea, 0x23, 0xbc, 0x93, 0x5f, 0xd4, 0x38, 0x67, 0x4d, 0x55, 0x58, 0xf9, 0xf2, 0xc5, 0x51, 0x51, - 0x7a, 0x75, 0x54, 0x94, 0xfe, 0x3e, 0x2a, 0x4a, 0x3f, 0x1d, 0x17, 0xe7, 0x5e, 0x1d, 0x17, 0xe7, - 0xfe, 0x3c, 0x2e, 0xce, 0x7d, 0xb3, 0xdd, 0x70, 0xc3, 0x66, 0xc7, 0xd6, 0x1c, 0xd6, 0x8e, 0xe9, - 0x01, 0x96, 0xe4, 0xea, 0x0d, 0x65, 0x03, 0x9b, 0xb1, 0x17, 0xe1, 0xcf, 0x86, 0xf7, 0xff, 0x0b, - 0x00, 0x00, 0xff, 0xff, 0x20, 0x72, 0xc5, 0x73, 0x1b, 0x0e, 0x00, 0x00, + // 1213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6e, 0xdb, 0xc6, + 0x13, 0x36, 0xe3, 0xff, 0x6b, 0xfc, 0x0c, 0xff, 0x36, 0x4e, 0xa2, 0xd0, 0xb1, 0xe2, 0xb2, 0x40, + 0x62, 0x1b, 0x29, 0x59, 0xa9, 0x71, 0x53, 0xa3, 0x40, 0x0b, 0xff, 0x69, 0x12, 0xd5, 0x45, 0x6a, + 0x33, 0x76, 0x51, 0x14, 0x2d, 0x04, 0x92, 0x5a, 0x4b, 0x84, 0xa5, 0x5d, 0x86, 0xa4, 0x14, 0x29, + 0xae, 0x2f, 0x7d, 0x81, 0x16, 0xe8, 0xa5, 0x4f, 0x90, 0x02, 0xbd, 0xf5, 0x2d, 0x52, 0x20, 0x87, + 0x00, 0xbd, 0xf4, 0x54, 0x14, 0x76, 0x5f, 0xa1, 0xc7, 0x02, 0x05, 0x67, 0x97, 0x14, 0x29, 0x89, + 0x91, 0xe4, 0xe6, 0xd0, 0x43, 0x2e, 0x36, 0xb9, 0x3b, 0xdf, 0x37, 0xdf, 0xcc, 0xee, 0x70, 0xc6, + 0x46, 0x6f, 0x98, 0x86, 0xd9, 0xaa, 0x32, 0xaa, 0x3d, 0x61, 0x94, 0x58, 0x8c, 0x5a, 0x36, 0x71, + 0xcb, 0x44, 0x7b, 0x54, 0x27, 0x6e, 0x4b, 0x75, 0x5c, 0xe6, 0x33, 0x9c, 0x11, 0x26, 0x6a, 0xc2, + 0x44, 0x6d, 0xe4, 0xe4, 0xf9, 0x32, 0x2b, 0x33, 0x30, 0xd2, 0x82, 0x27, 0x6e, 0x2f, 0x5f, 0x2b, + 0x33, 0x56, 0xae, 0x12, 0xcd, 0x70, 0x6c, 0xcd, 0xa0, 0x94, 0xf9, 0x86, 0x6f, 0x33, 0xea, 0x85, + 0xbb, 0x3e, 0xa1, 0x25, 0xe2, 0xd6, 0x6c, 0xea, 0x6b, 0x7e, 0xcb, 0x21, 0x1e, 0xff, 0x29, 0x76, + 0x17, 0x63, 0xbb, 0x96, 0xdb, 0x72, 0x7c, 0xa6, 0x39, 0x2e, 0x63, 0x87, 0x62, 0x7b, 0xd5, 0x62, + 0x5e, 0x8d, 0x79, 0x9a, 0x69, 0x78, 0x42, 0xa3, 0xd6, 0xc8, 0x99, 0xc4, 0x37, 0x72, 0x9a, 0x63, + 0x94, 0x6d, 0x0a, 0x9e, 0x84, 0x6d, 0x36, 0x8c, 0xcc, 0xf4, 0x2d, 0xab, 0x42, 0xac, 0x23, 0x87, + 0x81, 0xcf, 0xa6, 0xd8, 0x5f, 0xe9, 0xbd, 0x9f, 0x78, 0x13, 0xa6, 0x51, 0x92, 0xda, 0x3b, 0x36, + 0x2d, 0xc7, 0x93, 0x24, 0xdf, 0xe8, 0x6d, 0xd2, 0x45, 0xa5, 0x84, 0x76, 0xc4, 0x61, 0x56, 0x25, + 0x30, 0x69, 0xe4, 0xa2, 0xe7, 0x4e, 0x9b, 0xe4, 0x99, 0x38, 0x86, 0x6b, 0xd4, 0xbc, 0x4e, 0xf5, + 0x49, 0x9b, 0xe4, 0x11, 0x81, 0xa9, 0x32, 0x8f, 0xf0, 0x5e, 0xa0, 0x74, 0x17, 0xf0, 0x3a, 0x79, + 0x54, 0x27, 0x9e, 0xaf, 0x1c, 0xa0, 0x8b, 0x89, 0x55, 0xcf, 0x61, 0xd4, 0x23, 0xf8, 0x03, 0x34, + 0xc1, 0xfd, 0x64, 0xa4, 0x25, 0x69, 0x79, 0x26, 0xbf, 0xa4, 0xa6, 0x9d, 0xbe, 0xca, 0x91, 0x9b, + 0x63, 0xcf, 0x7e, 0xbf, 0x3e, 0xa2, 0x0b, 0x94, 0x72, 0x05, 0x5d, 0x02, 0xda, 0xad, 0x8a, 0x61, + 0xd3, 0x4f, 0x6c, 0xcf, 0x0f, 0xfd, 0xad, 0xa1, 0xcb, 0x9d, 0x1b, 0xc2, 0xe5, 0x02, 0x9a, 0xb6, + 0x82, 0xc5, 0xa2, 0x5d, 0x0a, 0xbc, 0x8e, 0x2e, 0x4f, 0xeb, 0x53, 0xb0, 0x50, 0x28, 0x79, 0x4a, + 0x3e, 0xce, 0x57, 0xa0, 0x87, 0x4c, 0xf0, 0xe1, 0xab, 0x68, 0x2a, 0x44, 0x81, 0xd4, 0x69, 0x7d, + 0x52, 0x80, 0x94, 0x2f, 0xe3, 0xae, 0x38, 0x46, 0xb8, 0xda, 0x44, 0x48, 0x80, 0xe8, 0x21, 0x13, + 0x11, 0xbe, 0x99, 0x1e, 0x61, 0x9b, 0x80, 0x2b, 0x0c, 0x1e, 0x95, 0x7d, 0x24, 0x03, 0xfb, 0x47, + 0xc1, 0xa1, 0x75, 0xc9, 0x5a, 0x40, 0xd3, 0x70, 0x9a, 0x45, 0x5a, 0xaf, 0x81, 0x83, 0x31, 0x7d, + 0x0a, 0x16, 0x1e, 0xd4, 0x6b, 0x09, 0xcd, 0x17, 0x92, 0x9a, 0x0d, 0xb4, 0xd0, 0x93, 0xf5, 0x15, + 0x0a, 0xff, 0x1a, 0x5d, 0x01, 0x17, 0x41, 0xf2, 0xef, 0x13, 0xa3, 0x44, 0x5c, 0xaf, 0x7f, 0x32, + 0xf1, 0x5d, 0x84, 0xda, 0xa5, 0x05, 0xaa, 0x67, 0xf2, 0x37, 0x54, 0x5e, 0x87, 0x6a, 0x50, 0x87, + 0x2a, 0x2f, 0x03, 0x51, 0x87, 0xea, 0xae, 0x51, 0x26, 0x82, 0x56, 0x8f, 0x21, 0x95, 0xa7, 0x12, + 0xca, 0x74, 0xbb, 0x17, 0xe1, 0x6d, 0xa0, 0xc9, 0x0a, 0x5f, 0x82, 0x0b, 0x30, 0x93, 0xbf, 0x99, + 0x1e, 0x5b, 0x81, 0x96, 0x48, 0x93, 0x94, 0x38, 0x85, 0x1e, 0xe2, 0xf0, 0xbd, 0x1e, 0x3a, 0x6f, + 0xf6, 0xd5, 0xc9, 0xfd, 0x27, 0x84, 0x7e, 0x86, 0xae, 0x45, 0x3a, 0xe1, 0x34, 0x3a, 0x72, 0x75, + 0xde, 0x13, 0x36, 0xd1, 0x62, 0x0a, 0xef, 0x2b, 0x4b, 0x82, 0xb2, 0x87, 0xb2, 0xe0, 0xe3, 0xae, + 0x4d, 0x8d, 0xaa, 0xfd, 0x84, 0x94, 0x86, 0x28, 0x1b, 0x3c, 0x8f, 0xc6, 0x1d, 0x97, 0x35, 0x08, + 0x08, 0x9f, 0xd2, 0xf9, 0x8b, 0xf2, 0x74, 0x1c, 0x5d, 0x4f, 0xe5, 0x14, 0xca, 0x0f, 0xd0, 0xfc, + 0x61, 0xb8, 0x5b, 0x3c, 0xdf, 0x3d, 0xc5, 0x87, 0x5d, 0xf4, 0x78, 0x1d, 0x21, 0x9e, 0x69, 0x20, + 0xe3, 0x47, 0x2a, 0x47, 0x64, 0xd1, 0x47, 0xb3, 0x91, 0x53, 0x21, 0x9f, 0x3a, 0x3f, 0x17, 0x80, + 0x3e, 0x40, 0xb3, 0xae, 0xf1, 0xb8, 0xd8, 0xfe, 0xfc, 0x66, 0x46, 0xc5, 0x8d, 0x08, 0xe1, 0x89, + 0xef, 0x74, 0xc0, 0xa1, 0x1b, 0x8f, 0xb7, 0xa2, 0x35, 0xfd, 0x7f, 0x6e, 0xfc, 0x15, 0x1f, 0x20, + 0x6c, 0xfa, 0x56, 0xd1, 0xab, 0x9b, 0x35, 0xdb, 0xf3, 0x6c, 0x46, 0x8b, 0x47, 0xa4, 0x95, 0x19, + 0xeb, 0xe0, 0x4c, 0xf6, 0x8e, 0x46, 0x4e, 0x7d, 0x18, 0xd9, 0xef, 0x90, 0x96, 0x3e, 0x67, 0xfa, + 0x56, 0x62, 0x05, 0x6f, 0xa3, 0xff, 0x43, 0x7b, 0x2b, 0xfa, 0xcd, 0xa2, 0x4d, 0x8b, 0x66, 0x95, + 0x59, 0x47, 0x99, 0x71, 0x60, 0xbd, 0xaa, 0xb6, 0x5b, 0xa1, 0xca, 0x5b, 0xe4, 0x7e, 0x73, 0x37, + 0x30, 0xd6, 0x67, 0x01, 0xb3, 0xdf, 0x2c, 0xd0, 0xcd, 0x00, 0x80, 0x77, 0xd0, 0x25, 0xce, 0xc2, + 0xaf, 0x41, 0xc0, 0x04, 0x99, 0xc8, 0x4c, 0x00, 0x53, 0x26, 0xce, 0xc4, 0x9b, 0xaa, 0xca, 0x89, + 0x30, 0xc0, 0xf8, 0x25, 0x2a, 0x50, 0x48, 0x22, 0xfe, 0x1c, 0xf1, 0x55, 0x4e, 0x51, 0xf4, 0x88, + 0x51, 0x25, 0xa5, 0xcc, 0x24, 0x30, 0xad, 0xbe, 0xa4, 0x19, 0x04, 0x18, 0x60, 0x78, 0x08, 0x08, + 0x7d, 0xce, 0xe9, 0x58, 0xc1, 0x5f, 0x85, 0x32, 0x05, 0x73, 0x90, 0x09, 0xdf, 0x27, 0xa5, 0xcc, + 0x14, 0xdc, 0xf6, 0x95, 0xf4, 0x34, 0xee, 0xbb, 0x06, 0xf5, 0x0c, 0x2b, 0x28, 0x4f, 0xb8, 0x2c, + 0x17, 0x63, 0xdc, 0x21, 0x8b, 0xe2, 0xa3, 0x95, 0x94, 0x7b, 0x7a, 0x40, 0x7d, 0xbb, 0x7a, 0x9f, + 0xd8, 0xe5, 0x8a, 0x3f, 0x40, 0x19, 0x5c, 0x46, 0x13, 0x15, 0xb0, 0x85, 0x1b, 0x37, 0xa6, 0x8b, + 0xb7, 0x76, 0x79, 0x8c, 0xc6, 0xcb, 0xe3, 0xe7, 0x71, 0xb4, 0x3a, 0x88, 0xdb, 0xd7, 0x95, 0xf2, + 0xba, 0x52, 0xfe, 0x23, 0x95, 0x92, 0xff, 0x6b, 0x06, 0x8d, 0xc3, 0x9d, 0xc5, 0xdf, 0x4a, 0x68, + 0x82, 0x8f, 0x71, 0xf8, 0x56, 0xba, 0xe2, 0xee, 0xe9, 0x51, 0x7e, 0x6b, 0x40, 0x6b, 0x7e, 0xed, + 0x95, 0xe5, 0x6f, 0x7e, 0xfd, 0xf3, 0xfb, 0x0b, 0x0a, 0x5e, 0xd2, 0x7a, 0x8f, 0xad, 0x8d, 0x9c, + 0x98, 0x6e, 0xf1, 0x0f, 0x12, 0x9a, 0x8e, 0x46, 0x44, 0xac, 0xf5, 0x71, 0xd3, 0x39, 0x65, 0xca, + 0x6f, 0x0f, 0x0e, 0x18, 0x5c, 0x1a, 0xd4, 0xa9, 0x87, 0x7f, 0x0c, 0xa5, 0x41, 0xdd, 0x0c, 0x24, + 0x2d, 0xd6, 0x79, 0x07, 0x93, 0x16, 0x6f, 0xab, 0xca, 0x1d, 0x90, 0x96, 0xc3, 0x5a, 0x1f, 0x69, + 0x50, 0xf5, 0xda, 0x71, 0xf8, 0x41, 0x3b, 0xc1, 0xbf, 0x48, 0x68, 0x36, 0x39, 0x48, 0xe2, 0xdb, + 0x7d, 0xbc, 0xf7, 0x9c, 0x66, 0xe5, 0xb5, 0x21, 0x51, 0x42, 0xf8, 0xc7, 0x20, 0x7c, 0x1b, 0x6f, + 0x0e, 0x29, 0x9c, 0xff, 0x21, 0xe4, 0x69, 0xc7, 0xd1, 0x80, 0x75, 0x82, 0x7f, 0x92, 0xd0, 0x4c, + 0x6c, 0x64, 0xc4, 0xb9, 0x3e, 0x92, 0xba, 0xa7, 0x5b, 0x39, 0x3f, 0x0c, 0x44, 0x84, 0x70, 0x1b, + 0x42, 0x50, 0xf1, 0xad, 0xf4, 0x10, 0xc4, 0xd0, 0x15, 0x4f, 0xfc, 0x73, 0x09, 0xcd, 0x75, 0xce, + 0x77, 0xf8, 0xdd, 0x01, 0xdc, 0xf7, 0x18, 0x34, 0xe5, 0x3b, 0x43, 0xe3, 0x84, 0xf6, 0x7b, 0xa0, + 0x7d, 0x03, 0x7f, 0x38, 0x8c, 0xf6, 0x5e, 0xb9, 0x7f, 0x2e, 0x21, 0xdc, 0xdd, 0xd7, 0xf0, 0x7b, + 0x7d, 0x84, 0xa5, 0x4e, 0x9f, 0xf2, 0xfa, 0x39, 0x90, 0x22, 0xa8, 0x0d, 0x08, 0xea, 0x7d, 0xbc, + 0x9e, 0x1e, 0x54, 0xaf, 0xce, 0x1a, 0x3f, 0x9d, 0xbf, 0x25, 0xb4, 0xf8, 0xd2, 0x36, 0x8d, 0xb7, + 0x86, 0xd6, 0xd7, 0x3d, 0x5b, 0xc8, 0xdb, 0xff, 0x8e, 0x44, 0xc4, 0xbb, 0x07, 0xf1, 0xee, 0xe0, + 0xc2, 0xb9, 0xe3, 0xd5, 0xf8, 0xe0, 0xa2, 0x1d, 0xf3, 0xdf, 0x27, 0x9b, 0x9f, 0x3e, 0x3b, 0xcd, + 0x4a, 0x2f, 0x4e, 0xb3, 0xd2, 0x1f, 0xa7, 0x59, 0xe9, 0xbb, 0xb3, 0xec, 0xc8, 0x8b, 0xb3, 0xec, + 0xc8, 0x6f, 0x67, 0xd9, 0x91, 0x2f, 0xd6, 0xca, 0xb6, 0x5f, 0xa9, 0x9b, 0xaa, 0xc5, 0x6a, 0xa1, + 0x3b, 0xa0, 0x89, 0x7c, 0x37, 0x3b, 0xbc, 0x43, 0x9b, 0x35, 0x27, 0xe0, 0x1f, 0x0c, 0xef, 0xfc, + 0x13, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x9c, 0x97, 0x6d, 0x45, 0x12, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -882,6 +1073,8 @@ type QueryClient interface { ListEpochHeaders(ctx context.Context, in *QueryListEpochHeadersRequest, opts ...grpc.CallOption) (*QueryListEpochHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) + // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than the provided CZ height, with proofs + FinalizedChainInfoUntilHeight(ctx context.Context, in *QueryFinalizedChainInfoUntilHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoUntilHeightResponse, error) } type queryClient struct { @@ -955,6 +1148,15 @@ func (c *queryClient) FinalizedChainInfo(ctx context.Context, in *QueryFinalized return out, nil } +func (c *queryClient) FinalizedChainInfoUntilHeight(ctx context.Context, in *QueryFinalizedChainInfoUntilHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoUntilHeightResponse, error) { + out := new(QueryFinalizedChainInfoUntilHeightResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainInfoUntilHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // QueryServer is the server API for Query service. type QueryServer interface { // Parameters queries the parameters of the module. @@ -971,6 +1173,8 @@ type QueryServer interface { ListEpochHeaders(context.Context, *QueryListEpochHeadersRequest) (*QueryListEpochHeadersResponse, error) // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs FinalizedChainInfo(context.Context, *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) + // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than the provided CZ height, with proofs + FinalizedChainInfoUntilHeight(context.Context, *QueryFinalizedChainInfoUntilHeightRequest) (*QueryFinalizedChainInfoUntilHeightResponse, error) } // UnimplementedQueryServer can be embedded to have forward compatible implementations. @@ -998,6 +1202,9 @@ func (*UnimplementedQueryServer) ListEpochHeaders(ctx context.Context, req *Quer func (*UnimplementedQueryServer) FinalizedChainInfo(ctx context.Context, req *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfo not implemented") } +func (*UnimplementedQueryServer) FinalizedChainInfoUntilHeight(ctx context.Context, req *QueryFinalizedChainInfoUntilHeightRequest) (*QueryFinalizedChainInfoUntilHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfoUntilHeight not implemented") +} func RegisterQueryServer(s grpc1.Server, srv QueryServer) { s.RegisterService(&_Query_serviceDesc, srv) @@ -1129,6 +1336,24 @@ func _Query_FinalizedChainInfo_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Query_FinalizedChainInfoUntilHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryFinalizedChainInfoUntilHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).FinalizedChainInfoUntilHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/FinalizedChainInfoUntilHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).FinalizedChainInfoUntilHeight(ctx, req.(*QueryFinalizedChainInfoUntilHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "babylon.zoneconcierge.v1.Query", HandlerType: (*QueryServer)(nil), @@ -1161,6 +1386,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "FinalizedChainInfo", Handler: _Query_FinalizedChainInfo_Handler, }, + { + MethodName: "FinalizedChainInfoUntilHeight", + Handler: _Query_FinalizedChainInfoUntilHeight_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "babylon/zoneconcierge/query.proto", @@ -1736,93 +1965,259 @@ func (m *QueryFinalizedChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int return len(dAtA) - i, nil } -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryParamsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *QueryParamsResponse) Size() (n int) { - if m == nil { - return 0 +func (m *QueryFinalizedChainInfoUntilHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovQuery(uint64(l)) - return n + return dAtA[:n], nil } -func (m *QueryChainListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +func (m *QueryFinalizedChainInfoUntilHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryChainListResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *QueryFinalizedChainInfoUntilHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.ChainIds) > 0 { - for _, s := range m.ChainIds { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - return n -} - -func (m *QueryChainInfoRequest) Size() (n int) { - if m == nil { - return 0 + if m.Height != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *QueryChainInfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ChainInfo != nil { - l = m.ChainInfo.Size() - n += 1 + l + sovQuery(uint64(l)) +func (m *QueryFinalizedChainInfoUntilHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *QueryEpochChainInfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *QueryFinalizedChainInfoUntilHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProofEpochSubmitted) > 0 { + for iNdEx := len(m.ProofEpochSubmitted) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ProofEpochSubmitted[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.ProofEpochSealed != nil { + { + size, err := m.ProofEpochSealed.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.ProofHeaderInEpoch != nil { + { + size, err := m.ProofHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.ProofTxInBlock != nil { + { + size, err := m.ProofTxInBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.BtcSubmissionKey != nil { + { + size, err := m.BtcSubmissionKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RawCheckpoint != nil { + { + size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EpochInfo != nil { + { + size, err := m.EpochInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FinalizedChainInfo != nil { + { + size, err := m.FinalizedChainInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryChainListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryChainListResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChainIds) > 0 { + for _, s := range m.ChainIds { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryChainInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChainInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChainInfo != nil { + l = m.ChainInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEpochChainInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.EpochNum != 0 { n += 1 + sovQuery(uint64(m.EpochNum)) } @@ -1972,6 +2367,68 @@ func (m *QueryFinalizedChainInfoResponse) Size() (n int) { return n } +func (m *QueryFinalizedChainInfoUntilHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovQuery(uint64(m.Height)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizedChainInfo != nil { + l = m.FinalizedChainInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.EpochInfo != nil { + l = m.EpochInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.RawCheckpoint != nil { + l = m.RawCheckpoint.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.BtcSubmissionKey != nil { + l = m.BtcSubmissionKey.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ProofTxInBlock != nil { + l = m.ProofTxInBlock.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ProofHeaderInEpoch != nil { + l = m.ProofHeaderInEpoch.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ProofEpochSealed != nil { + l = m.ProofEpochSealed.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.ProofEpochSubmitted) > 0 { + for _, e := range m.ProofEpochSubmitted { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + func sovQuery(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -3459,6 +3916,463 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryFinalizedChainInfoUntilHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedChainInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FinalizedChainInfo == nil { + m.FinalizedChainInfo = &ChainInfo{} + } + if err := m.FinalizedChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EpochInfo == nil { + m.EpochInfo = &types.Epoch{} + } + if err := m.EpochInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RawCheckpoint == nil { + m.RawCheckpoint = &types1.RawCheckpoint{} + } + if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BtcSubmissionKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BtcSubmissionKey == nil { + m.BtcSubmissionKey = &types2.SubmissionKey{} + } + if err := m.BtcSubmissionKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofTxInBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofTxInBlock == nil { + m.ProofTxInBlock = &types3.TxProof{} + } + if err := m.ProofTxInBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeaderInEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofHeaderInEpoch == nil { + m.ProofHeaderInEpoch = &crypto.Proof{} + } + if err := m.ProofHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSealed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofEpochSealed == nil { + m.ProofEpochSealed = &ProofEpochSealed{} + } + if err := m.ProofEpochSealed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSubmitted", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofEpochSubmitted = append(m.ProofEpochSubmitted, &types2.TransactionInfo{}) + if err := m.ProofEpochSubmitted[len(m.ProofEpochSubmitted)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipQuery(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go index 4badc5d0f..19f747ec0 100644 --- a/x/zoneconcierge/types/query.pb.gw.go +++ b/x/zoneconcierge/types/query.pb.gw.go @@ -419,6 +419,100 @@ func local_request_Query_FinalizedChainInfo_0(ctx context.Context, marshaler run } +var ( + filter_Query_FinalizedChainInfoUntilHeight_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0, "height": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedChainInfoUntilHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfoUntilHeight_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.FinalizedChainInfoUntilHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedChainInfoUntilHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfoUntilHeight_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.FinalizedChainInfoUntilHeight(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -586,6 +680,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_FinalizedChainInfoUntilHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_FinalizedChainInfoUntilHeight_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_FinalizedChainInfoUntilHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -767,6 +884,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_FinalizedChainInfoUntilHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_FinalizedChainInfoUntilHeight_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_FinalizedChainInfoUntilHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -784,6 +921,8 @@ var ( pattern_Query_ListEpochHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_FinalizedChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_FinalizedChainInfoUntilHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 5}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id", "height"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( @@ -800,4 +939,6 @@ var ( forward_Query_ListEpochHeaders_0 = runtime.ForwardResponseMessage forward_Query_FinalizedChainInfo_0 = runtime.ForwardResponseMessage + + forward_Query_FinalizedChainInfoUntilHeight_0 = runtime.ForwardResponseMessage ) From 8976eba313989302e9826659d2cf962af2b0d62d Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Tue, 10 Jan 2023 20:21:13 +1100 Subject: [PATCH 11/37] zoneconcierge API: find header and fork headers at a given height (#266) --- proto/babylon/zoneconcierge/query.proto | 16 + x/zoneconcierge/keeper/grpc_query.go | 25 + x/zoneconcierge/keeper/grpc_query_test.go | 35 ++ x/zoneconcierge/types/query.pb.go | 671 +++++++++++++++++++--- x/zoneconcierge/types/query.pb.gw.go | 123 ++++ 5 files changed, 779 insertions(+), 91 deletions(-) diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 5405eed6f..800c9d8d1 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -22,6 +22,10 @@ service Query { rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/params"; } + // Header queries the CZ header and fork headers at a given height. + rpc Header(QueryHeaderRequest) returns (QueryHeaderResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/chain_info/{chain_id}/header/{height}"; + } // ChainList queries the list of chains that checkpoint to Babylon rpc ChainList(QueryChainListRequest) returns (QueryChainListResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/chains"; @@ -61,6 +65,18 @@ message QueryParamsResponse { Params params = 1 [(gogoproto.nullable) = false]; } +// QueryHeaderRequest is request type for the Query/Header RPC method. +message QueryHeaderRequest { + string chain_id = 1; + uint64 height = 2; +} + +// QueryParamsResponse is response type for the Query/Header RPC method. +message QueryHeaderResponse { + babylon.zoneconcierge.v1.IndexedHeader header = 1; + babylon.zoneconcierge.v1.Forks fork_headers = 2; +} + // QueryChainListRequest is request type for the Query/ChainList RPC method message QueryChainListRequest {} diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index 8cb75bfe0..78754aef3 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -46,6 +46,31 @@ func (k Keeper) ChainInfo(c context.Context, req *types.QueryChainInfoRequest) ( return resp, nil } +// Header returns the header and fork headers at a given height +func (k Keeper) Header(c context.Context, req *types.QueryHeaderRequest) (*types.QueryHeaderResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + + header, err := k.GetHeader(ctx, req.ChainId, req.Height) + if err != nil { + return nil, err + } + forks := k.GetForks(ctx, req.ChainId, req.Height) + resp := &types.QueryHeaderResponse{ + Header: header, + ForkHeaders: forks, + } + + return resp, nil +} + // EpochChainInfo returns the info of a chain with given ID in a given epoch func (k Keeper) EpochChainInfo(c context.Context, req *types.QueryEpochChainInfoRequest) (*types.QueryEpochChainInfoResponse, error) { if req == nil { diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index 24315d4bc..d738c1b1e 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -88,6 +88,41 @@ func FuzzChainInfo(f *testing.F) { }) } +func FuzzHeader(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + // invoke the hook a random number of times to simulate a random number of blocks + numHeaders := datagen.RandomInt(100) + 1 + numForkHeaders := datagen.RandomInt(10) + 1 + headers, forkHeaders := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) + + // find header at a random height and assert correctness against the expected header + randomHeight := datagen.RandomInt(int(numHeaders - 1)) + resp, err := zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ChainId: czChain.ChainID, Height: randomHeight}) + require.NoError(t, err) + require.Equal(t, headers[randomHeight].Header.LastCommitHash, resp.Header.Hash) + require.Len(t, resp.ForkHeaders.Headers, 0) + + // find the last header and fork headers then assert correctness + resp, err = zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ChainId: czChain.ChainID, Height: numHeaders - 1}) + require.NoError(t, err) + require.Equal(t, headers[numHeaders-1].Header.LastCommitHash, resp.Header.Hash) + require.Len(t, resp.ForkHeaders.Headers, int(numForkHeaders)) + for i := 0; i < int(numForkHeaders); i++ { + require.Equal(t, forkHeaders[i].Header.LastCommitHash, resp.ForkHeaders.Headers[i].Hash) + } + }) +} + func FuzzEpochChainInfo(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index 6ed73d11e..72ea849f2 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -118,6 +118,112 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } +// QueryHeaderRequest is request type for the Query/Header RPC method. +type QueryHeaderRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *QueryHeaderRequest) Reset() { *m = QueryHeaderRequest{} } +func (m *QueryHeaderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryHeaderRequest) ProtoMessage() {} +func (*QueryHeaderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{2} +} +func (m *QueryHeaderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryHeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryHeaderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryHeaderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryHeaderRequest.Merge(m, src) +} +func (m *QueryHeaderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryHeaderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryHeaderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryHeaderRequest proto.InternalMessageInfo + +func (m *QueryHeaderRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *QueryHeaderRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +// QueryParamsResponse is response type for the Query/Header RPC method. +type QueryHeaderResponse struct { + Header *IndexedHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + ForkHeaders *Forks `protobuf:"bytes,2,opt,name=fork_headers,json=forkHeaders,proto3" json:"fork_headers,omitempty"` +} + +func (m *QueryHeaderResponse) Reset() { *m = QueryHeaderResponse{} } +func (m *QueryHeaderResponse) String() string { return proto.CompactTextString(m) } +func (*QueryHeaderResponse) ProtoMessage() {} +func (*QueryHeaderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{3} +} +func (m *QueryHeaderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryHeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryHeaderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryHeaderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryHeaderResponse.Merge(m, src) +} +func (m *QueryHeaderResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryHeaderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryHeaderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryHeaderResponse proto.InternalMessageInfo + +func (m *QueryHeaderResponse) GetHeader() *IndexedHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *QueryHeaderResponse) GetForkHeaders() *Forks { + if m != nil { + return m.ForkHeaders + } + return nil +} + // QueryChainListRequest is request type for the Query/ChainList RPC method type QueryChainListRequest struct { } @@ -126,7 +232,7 @@ func (m *QueryChainListRequest) Reset() { *m = QueryChainListRequest{} } func (m *QueryChainListRequest) String() string { return proto.CompactTextString(m) } func (*QueryChainListRequest) ProtoMessage() {} func (*QueryChainListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{2} + return fileDescriptor_2caab7ee15063236, []int{4} } func (m *QueryChainListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +270,7 @@ func (m *QueryChainListResponse) Reset() { *m = QueryChainListResponse{} func (m *QueryChainListResponse) String() string { return proto.CompactTextString(m) } func (*QueryChainListResponse) ProtoMessage() {} func (*QueryChainListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{3} + return fileDescriptor_2caab7ee15063236, []int{5} } func (m *QueryChainListResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -209,7 +315,7 @@ func (m *QueryChainInfoRequest) Reset() { *m = QueryChainInfoRequest{} } func (m *QueryChainInfoRequest) String() string { return proto.CompactTextString(m) } func (*QueryChainInfoRequest) ProtoMessage() {} func (*QueryChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{4} + return fileDescriptor_2caab7ee15063236, []int{6} } func (m *QueryChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -255,7 +361,7 @@ func (m *QueryChainInfoResponse) Reset() { *m = QueryChainInfoResponse{} func (m *QueryChainInfoResponse) String() string { return proto.CompactTextString(m) } func (*QueryChainInfoResponse) ProtoMessage() {} func (*QueryChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{5} + return fileDescriptor_2caab7ee15063236, []int{7} } func (m *QueryChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +407,7 @@ func (m *QueryEpochChainInfoRequest) Reset() { *m = QueryEpochChainInfoR func (m *QueryEpochChainInfoRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochChainInfoRequest) ProtoMessage() {} func (*QueryEpochChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{6} + return fileDescriptor_2caab7ee15063236, []int{8} } func (m *QueryEpochChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +460,7 @@ func (m *QueryEpochChainInfoResponse) Reset() { *m = QueryEpochChainInfo func (m *QueryEpochChainInfoResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochChainInfoResponse) ProtoMessage() {} func (*QueryEpochChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{7} + return fileDescriptor_2caab7ee15063236, []int{9} } func (m *QueryEpochChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -401,7 +507,7 @@ func (m *QueryListHeadersRequest) Reset() { *m = QueryListHeadersRequest func (m *QueryListHeadersRequest) String() string { return proto.CompactTextString(m) } func (*QueryListHeadersRequest) ProtoMessage() {} func (*QueryListHeadersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{8} + return fileDescriptor_2caab7ee15063236, []int{10} } func (m *QueryListHeadersRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -456,7 +562,7 @@ func (m *QueryListHeadersResponse) Reset() { *m = QueryListHeadersRespon func (m *QueryListHeadersResponse) String() string { return proto.CompactTextString(m) } func (*QueryListHeadersResponse) ProtoMessage() {} func (*QueryListHeadersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{9} + return fileDescriptor_2caab7ee15063236, []int{11} } func (m *QueryListHeadersResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -509,7 +615,7 @@ func (m *QueryListEpochHeadersRequest) Reset() { *m = QueryListEpochHead func (m *QueryListEpochHeadersRequest) String() string { return proto.CompactTextString(m) } func (*QueryListEpochHeadersRequest) ProtoMessage() {} func (*QueryListEpochHeadersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{10} + return fileDescriptor_2caab7ee15063236, []int{12} } func (m *QueryListEpochHeadersRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -562,7 +668,7 @@ func (m *QueryListEpochHeadersResponse) Reset() { *m = QueryListEpochHea func (m *QueryListEpochHeadersResponse) String() string { return proto.CompactTextString(m) } func (*QueryListEpochHeadersResponse) ProtoMessage() {} func (*QueryListEpochHeadersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{11} + return fileDescriptor_2caab7ee15063236, []int{13} } func (m *QueryListEpochHeadersResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +716,7 @@ func (m *QueryFinalizedChainInfoRequest) Reset() { *m = QueryFinalizedCh func (m *QueryFinalizedChainInfoRequest) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoRequest) ProtoMessage() {} func (*QueryFinalizedChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{12} + return fileDescriptor_2caab7ee15063236, []int{14} } func (m *QueryFinalizedChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -678,7 +784,7 @@ func (m *QueryFinalizedChainInfoResponse) Reset() { *m = QueryFinalizedC func (m *QueryFinalizedChainInfoResponse) String() string { return proto.CompactTextString(m) } func (*QueryFinalizedChainInfoResponse) ProtoMessage() {} func (*QueryFinalizedChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{13} + return fileDescriptor_2caab7ee15063236, []int{15} } func (m *QueryFinalizedChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -782,7 +888,7 @@ func (m *QueryFinalizedChainInfoUntilHeightRequest) String() string { } func (*QueryFinalizedChainInfoUntilHeightRequest) ProtoMessage() {} func (*QueryFinalizedChainInfoUntilHeightRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{14} + return fileDescriptor_2caab7ee15063236, []int{16} } func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -861,7 +967,7 @@ func (m *QueryFinalizedChainInfoUntilHeightResponse) String() string { } func (*QueryFinalizedChainInfoUntilHeightResponse) ProtoMessage() {} func (*QueryFinalizedChainInfoUntilHeightResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{15} + return fileDescriptor_2caab7ee15063236, []int{17} } func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -949,6 +1055,8 @@ func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofEpochSubmitted() [] func init() { proto.RegisterType((*QueryParamsRequest)(nil), "babylon.zoneconcierge.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.zoneconcierge.v1.QueryParamsResponse") + proto.RegisterType((*QueryHeaderRequest)(nil), "babylon.zoneconcierge.v1.QueryHeaderRequest") + proto.RegisterType((*QueryHeaderResponse)(nil), "babylon.zoneconcierge.v1.QueryHeaderResponse") proto.RegisterType((*QueryChainListRequest)(nil), "babylon.zoneconcierge.v1.QueryChainListRequest") proto.RegisterType((*QueryChainListResponse)(nil), "babylon.zoneconcierge.v1.QueryChainListResponse") proto.RegisterType((*QueryChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryChainInfoRequest") @@ -968,83 +1076,88 @@ func init() { func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } var fileDescriptor_2caab7ee15063236 = []byte{ - // 1213 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6e, 0xdb, 0xc6, - 0x13, 0x36, 0xe3, 0xff, 0x6b, 0xfc, 0x0c, 0xff, 0x36, 0x4e, 0xa2, 0xd0, 0xb1, 0xe2, 0xb2, 0x40, - 0x62, 0x1b, 0x29, 0x59, 0xa9, 0x71, 0x53, 0xa3, 0x40, 0x0b, 0xff, 0x69, 0x12, 0xd5, 0x45, 0x6a, - 0x33, 0x76, 0x51, 0x14, 0x2d, 0x04, 0x92, 0x5a, 0x4b, 0x84, 0xa5, 0x5d, 0x86, 0xa4, 0x14, 0x29, - 0xae, 0x2f, 0x7d, 0x81, 0x16, 0xe8, 0xa5, 0x4f, 0x90, 0x02, 0xbd, 0xf5, 0x2d, 0x52, 0x20, 0x87, - 0x00, 0xbd, 0xf4, 0x54, 0x14, 0x76, 0x5f, 0xa1, 0xc7, 0x02, 0x05, 0x67, 0x97, 0x14, 0x29, 0x89, - 0x91, 0xe4, 0xe6, 0xd0, 0x43, 0x2e, 0x36, 0xb9, 0x3b, 0xdf, 0x37, 0xdf, 0xcc, 0xee, 0x70, 0xc6, - 0x46, 0x6f, 0x98, 0x86, 0xd9, 0xaa, 0x32, 0xaa, 0x3d, 0x61, 0x94, 0x58, 0x8c, 0x5a, 0x36, 0x71, - 0xcb, 0x44, 0x7b, 0x54, 0x27, 0x6e, 0x4b, 0x75, 0x5c, 0xe6, 0x33, 0x9c, 0x11, 0x26, 0x6a, 0xc2, - 0x44, 0x6d, 0xe4, 0xe4, 0xf9, 0x32, 0x2b, 0x33, 0x30, 0xd2, 0x82, 0x27, 0x6e, 0x2f, 0x5f, 0x2b, - 0x33, 0x56, 0xae, 0x12, 0xcd, 0x70, 0x6c, 0xcd, 0xa0, 0x94, 0xf9, 0x86, 0x6f, 0x33, 0xea, 0x85, - 0xbb, 0x3e, 0xa1, 0x25, 0xe2, 0xd6, 0x6c, 0xea, 0x6b, 0x7e, 0xcb, 0x21, 0x1e, 0xff, 0x29, 0x76, - 0x17, 0x63, 0xbb, 0x96, 0xdb, 0x72, 0x7c, 0xa6, 0x39, 0x2e, 0x63, 0x87, 0x62, 0x7b, 0xd5, 0x62, - 0x5e, 0x8d, 0x79, 0x9a, 0x69, 0x78, 0x42, 0xa3, 0xd6, 0xc8, 0x99, 0xc4, 0x37, 0x72, 0x9a, 0x63, - 0x94, 0x6d, 0x0a, 0x9e, 0x84, 0x6d, 0x36, 0x8c, 0xcc, 0xf4, 0x2d, 0xab, 0x42, 0xac, 0x23, 0x87, - 0x81, 0xcf, 0xa6, 0xd8, 0x5f, 0xe9, 0xbd, 0x9f, 0x78, 0x13, 0xa6, 0x51, 0x92, 0xda, 0x3b, 0x36, - 0x2d, 0xc7, 0x93, 0x24, 0xdf, 0xe8, 0x6d, 0xd2, 0x45, 0xa5, 0x84, 0x76, 0xc4, 0x61, 0x56, 0x25, - 0x30, 0x69, 0xe4, 0xa2, 0xe7, 0x4e, 0x9b, 0xe4, 0x99, 0x38, 0x86, 0x6b, 0xd4, 0xbc, 0x4e, 0xf5, - 0x49, 0x9b, 0xe4, 0x11, 0x81, 0xa9, 0x32, 0x8f, 0xf0, 0x5e, 0xa0, 0x74, 0x17, 0xf0, 0x3a, 0x79, - 0x54, 0x27, 0x9e, 0xaf, 0x1c, 0xa0, 0x8b, 0x89, 0x55, 0xcf, 0x61, 0xd4, 0x23, 0xf8, 0x03, 0x34, - 0xc1, 0xfd, 0x64, 0xa4, 0x25, 0x69, 0x79, 0x26, 0xbf, 0xa4, 0xa6, 0x9d, 0xbe, 0xca, 0x91, 0x9b, - 0x63, 0xcf, 0x7e, 0xbf, 0x3e, 0xa2, 0x0b, 0x94, 0x72, 0x05, 0x5d, 0x02, 0xda, 0xad, 0x8a, 0x61, - 0xd3, 0x4f, 0x6c, 0xcf, 0x0f, 0xfd, 0xad, 0xa1, 0xcb, 0x9d, 0x1b, 0xc2, 0xe5, 0x02, 0x9a, 0xb6, - 0x82, 0xc5, 0xa2, 0x5d, 0x0a, 0xbc, 0x8e, 0x2e, 0x4f, 0xeb, 0x53, 0xb0, 0x50, 0x28, 0x79, 0x4a, - 0x3e, 0xce, 0x57, 0xa0, 0x87, 0x4c, 0xf0, 0xe1, 0xab, 0x68, 0x2a, 0x44, 0x81, 0xd4, 0x69, 0x7d, - 0x52, 0x80, 0x94, 0x2f, 0xe3, 0xae, 0x38, 0x46, 0xb8, 0xda, 0x44, 0x48, 0x80, 0xe8, 0x21, 0x13, - 0x11, 0xbe, 0x99, 0x1e, 0x61, 0x9b, 0x80, 0x2b, 0x0c, 0x1e, 0x95, 0x7d, 0x24, 0x03, 0xfb, 0x47, - 0xc1, 0xa1, 0x75, 0xc9, 0x5a, 0x40, 0xd3, 0x70, 0x9a, 0x45, 0x5a, 0xaf, 0x81, 0x83, 0x31, 0x7d, - 0x0a, 0x16, 0x1e, 0xd4, 0x6b, 0x09, 0xcd, 0x17, 0x92, 0x9a, 0x0d, 0xb4, 0xd0, 0x93, 0xf5, 0x15, - 0x0a, 0xff, 0x1a, 0x5d, 0x01, 0x17, 0x41, 0xf2, 0xef, 0x13, 0xa3, 0x44, 0x5c, 0xaf, 0x7f, 0x32, - 0xf1, 0x5d, 0x84, 0xda, 0xa5, 0x05, 0xaa, 0x67, 0xf2, 0x37, 0x54, 0x5e, 0x87, 0x6a, 0x50, 0x87, - 0x2a, 0x2f, 0x03, 0x51, 0x87, 0xea, 0xae, 0x51, 0x26, 0x82, 0x56, 0x8f, 0x21, 0x95, 0xa7, 0x12, - 0xca, 0x74, 0xbb, 0x17, 0xe1, 0x6d, 0xa0, 0xc9, 0x0a, 0x5f, 0x82, 0x0b, 0x30, 0x93, 0xbf, 0x99, - 0x1e, 0x5b, 0x81, 0x96, 0x48, 0x93, 0x94, 0x38, 0x85, 0x1e, 0xe2, 0xf0, 0xbd, 0x1e, 0x3a, 0x6f, - 0xf6, 0xd5, 0xc9, 0xfd, 0x27, 0x84, 0x7e, 0x86, 0xae, 0x45, 0x3a, 0xe1, 0x34, 0x3a, 0x72, 0x75, - 0xde, 0x13, 0x36, 0xd1, 0x62, 0x0a, 0xef, 0x2b, 0x4b, 0x82, 0xb2, 0x87, 0xb2, 0xe0, 0xe3, 0xae, - 0x4d, 0x8d, 0xaa, 0xfd, 0x84, 0x94, 0x86, 0x28, 0x1b, 0x3c, 0x8f, 0xc6, 0x1d, 0x97, 0x35, 0x08, - 0x08, 0x9f, 0xd2, 0xf9, 0x8b, 0xf2, 0x74, 0x1c, 0x5d, 0x4f, 0xe5, 0x14, 0xca, 0x0f, 0xd0, 0xfc, - 0x61, 0xb8, 0x5b, 0x3c, 0xdf, 0x3d, 0xc5, 0x87, 0x5d, 0xf4, 0x78, 0x1d, 0x21, 0x9e, 0x69, 0x20, - 0xe3, 0x47, 0x2a, 0x47, 0x64, 0xd1, 0x47, 0xb3, 0x91, 0x53, 0x21, 0x9f, 0x3a, 0x3f, 0x17, 0x80, - 0x3e, 0x40, 0xb3, 0xae, 0xf1, 0xb8, 0xd8, 0xfe, 0xfc, 0x66, 0x46, 0xc5, 0x8d, 0x08, 0xe1, 0x89, - 0xef, 0x74, 0xc0, 0xa1, 0x1b, 0x8f, 0xb7, 0xa2, 0x35, 0xfd, 0x7f, 0x6e, 0xfc, 0x15, 0x1f, 0x20, - 0x6c, 0xfa, 0x56, 0xd1, 0xab, 0x9b, 0x35, 0xdb, 0xf3, 0x6c, 0x46, 0x8b, 0x47, 0xa4, 0x95, 0x19, - 0xeb, 0xe0, 0x4c, 0xf6, 0x8e, 0x46, 0x4e, 0x7d, 0x18, 0xd9, 0xef, 0x90, 0x96, 0x3e, 0x67, 0xfa, - 0x56, 0x62, 0x05, 0x6f, 0xa3, 0xff, 0x43, 0x7b, 0x2b, 0xfa, 0xcd, 0xa2, 0x4d, 0x8b, 0x66, 0x95, - 0x59, 0x47, 0x99, 0x71, 0x60, 0xbd, 0xaa, 0xb6, 0x5b, 0xa1, 0xca, 0x5b, 0xe4, 0x7e, 0x73, 0x37, - 0x30, 0xd6, 0x67, 0x01, 0xb3, 0xdf, 0x2c, 0xd0, 0xcd, 0x00, 0x80, 0x77, 0xd0, 0x25, 0xce, 0xc2, - 0xaf, 0x41, 0xc0, 0x04, 0x99, 0xc8, 0x4c, 0x00, 0x53, 0x26, 0xce, 0xc4, 0x9b, 0xaa, 0xca, 0x89, - 0x30, 0xc0, 0xf8, 0x25, 0x2a, 0x50, 0x48, 0x22, 0xfe, 0x1c, 0xf1, 0x55, 0x4e, 0x51, 0xf4, 0x88, - 0x51, 0x25, 0xa5, 0xcc, 0x24, 0x30, 0xad, 0xbe, 0xa4, 0x19, 0x04, 0x18, 0x60, 0x78, 0x08, 0x08, - 0x7d, 0xce, 0xe9, 0x58, 0xc1, 0x5f, 0x85, 0x32, 0x05, 0x73, 0x90, 0x09, 0xdf, 0x27, 0xa5, 0xcc, - 0x14, 0xdc, 0xf6, 0x95, 0xf4, 0x34, 0xee, 0xbb, 0x06, 0xf5, 0x0c, 0x2b, 0x28, 0x4f, 0xb8, 0x2c, - 0x17, 0x63, 0xdc, 0x21, 0x8b, 0xe2, 0xa3, 0x95, 0x94, 0x7b, 0x7a, 0x40, 0x7d, 0xbb, 0x7a, 0x9f, - 0xd8, 0xe5, 0x8a, 0x3f, 0x40, 0x19, 0x5c, 0x46, 0x13, 0x15, 0xb0, 0x85, 0x1b, 0x37, 0xa6, 0x8b, - 0xb7, 0x76, 0x79, 0x8c, 0xc6, 0xcb, 0xe3, 0xe7, 0x71, 0xb4, 0x3a, 0x88, 0xdb, 0xd7, 0x95, 0xf2, - 0xba, 0x52, 0xfe, 0x23, 0x95, 0x92, 0xff, 0x6b, 0x06, 0x8d, 0xc3, 0x9d, 0xc5, 0xdf, 0x4a, 0x68, - 0x82, 0x8f, 0x71, 0xf8, 0x56, 0xba, 0xe2, 0xee, 0xe9, 0x51, 0x7e, 0x6b, 0x40, 0x6b, 0x7e, 0xed, - 0x95, 0xe5, 0x6f, 0x7e, 0xfd, 0xf3, 0xfb, 0x0b, 0x0a, 0x5e, 0xd2, 0x7a, 0x8f, 0xad, 0x8d, 0x9c, - 0x98, 0x6e, 0xf1, 0x0f, 0x12, 0x9a, 0x8e, 0x46, 0x44, 0xac, 0xf5, 0x71, 0xd3, 0x39, 0x65, 0xca, - 0x6f, 0x0f, 0x0e, 0x18, 0x5c, 0x1a, 0xd4, 0xa9, 0x87, 0x7f, 0x0c, 0xa5, 0x41, 0xdd, 0x0c, 0x24, - 0x2d, 0xd6, 0x79, 0x07, 0x93, 0x16, 0x6f, 0xab, 0xca, 0x1d, 0x90, 0x96, 0xc3, 0x5a, 0x1f, 0x69, - 0x50, 0xf5, 0xda, 0x71, 0xf8, 0x41, 0x3b, 0xc1, 0xbf, 0x48, 0x68, 0x36, 0x39, 0x48, 0xe2, 0xdb, - 0x7d, 0xbc, 0xf7, 0x9c, 0x66, 0xe5, 0xb5, 0x21, 0x51, 0x42, 0xf8, 0xc7, 0x20, 0x7c, 0x1b, 0x6f, - 0x0e, 0x29, 0x9c, 0xff, 0x21, 0xe4, 0x69, 0xc7, 0xd1, 0x80, 0x75, 0x82, 0x7f, 0x92, 0xd0, 0x4c, - 0x6c, 0x64, 0xc4, 0xb9, 0x3e, 0x92, 0xba, 0xa7, 0x5b, 0x39, 0x3f, 0x0c, 0x44, 0x84, 0x70, 0x1b, - 0x42, 0x50, 0xf1, 0xad, 0xf4, 0x10, 0xc4, 0xd0, 0x15, 0x4f, 0xfc, 0x73, 0x09, 0xcd, 0x75, 0xce, - 0x77, 0xf8, 0xdd, 0x01, 0xdc, 0xf7, 0x18, 0x34, 0xe5, 0x3b, 0x43, 0xe3, 0x84, 0xf6, 0x7b, 0xa0, - 0x7d, 0x03, 0x7f, 0x38, 0x8c, 0xf6, 0x5e, 0xb9, 0x7f, 0x2e, 0x21, 0xdc, 0xdd, 0xd7, 0xf0, 0x7b, - 0x7d, 0x84, 0xa5, 0x4e, 0x9f, 0xf2, 0xfa, 0x39, 0x90, 0x22, 0xa8, 0x0d, 0x08, 0xea, 0x7d, 0xbc, - 0x9e, 0x1e, 0x54, 0xaf, 0xce, 0x1a, 0x3f, 0x9d, 0xbf, 0x25, 0xb4, 0xf8, 0xd2, 0x36, 0x8d, 0xb7, - 0x86, 0xd6, 0xd7, 0x3d, 0x5b, 0xc8, 0xdb, 0xff, 0x8e, 0x44, 0xc4, 0xbb, 0x07, 0xf1, 0xee, 0xe0, - 0xc2, 0xb9, 0xe3, 0xd5, 0xf8, 0xe0, 0xa2, 0x1d, 0xf3, 0xdf, 0x27, 0x9b, 0x9f, 0x3e, 0x3b, 0xcd, - 0x4a, 0x2f, 0x4e, 0xb3, 0xd2, 0x1f, 0xa7, 0x59, 0xe9, 0xbb, 0xb3, 0xec, 0xc8, 0x8b, 0xb3, 0xec, - 0xc8, 0x6f, 0x67, 0xd9, 0x91, 0x2f, 0xd6, 0xca, 0xb6, 0x5f, 0xa9, 0x9b, 0xaa, 0xc5, 0x6a, 0xa1, - 0x3b, 0xa0, 0x89, 0x7c, 0x37, 0x3b, 0xbc, 0x43, 0x9b, 0x35, 0x27, 0xe0, 0x1f, 0x0c, 0xef, 0xfc, - 0x13, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x9c, 0x97, 0x6d, 0x45, 0x12, 0x00, 0x00, + // 1294 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x5f, 0x6f, 0xdb, 0x54, + 0x14, 0xaf, 0xfb, 0x27, 0x6d, 0x6f, 0x59, 0x55, 0xee, 0xba, 0x2d, 0x73, 0xd7, 0xb4, 0x18, 0x69, + 0x6b, 0xab, 0x61, 0x93, 0xb2, 0x32, 0x2a, 0x24, 0xa6, 0xa6, 0xa5, 0x5d, 0x28, 0x1a, 0xad, 0xd7, + 0x22, 0x84, 0x40, 0x91, 0xed, 0xdc, 0x24, 0x56, 0x13, 0x5f, 0xcf, 0x76, 0xb2, 0x64, 0xa5, 0x2f, + 0x7c, 0x01, 0x90, 0x78, 0x41, 0x7c, 0x80, 0x22, 0xf1, 0x80, 0xc4, 0xb7, 0x18, 0xd2, 0x1e, 0x26, + 0xf1, 0xc2, 0x13, 0x42, 0x2d, 0x5f, 0x03, 0x09, 0xf9, 0xdc, 0xeb, 0xc4, 0x4e, 0xe2, 0x25, 0x29, + 0x7b, 0xd8, 0xc3, 0x5e, 0xaa, 0xf8, 0xde, 0x73, 0x7e, 0xe7, 0x77, 0xce, 0x3d, 0xe7, 0xde, 0x9f, + 0x8a, 0xde, 0xd2, 0x35, 0xbd, 0x51, 0xa6, 0x96, 0xf2, 0x84, 0x5a, 0xc4, 0xa0, 0x96, 0x61, 0x12, + 0xa7, 0x48, 0x94, 0x47, 0x55, 0xe2, 0x34, 0x64, 0xdb, 0xa1, 0x1e, 0xc5, 0x49, 0x6e, 0x22, 0x47, + 0x4c, 0xe4, 0x5a, 0x5a, 0x9c, 0x2d, 0xd2, 0x22, 0x05, 0x23, 0xc5, 0xff, 0xc5, 0xec, 0xc5, 0x1b, + 0x45, 0x4a, 0x8b, 0x65, 0xa2, 0x68, 0xb6, 0xa9, 0x68, 0x96, 0x45, 0x3d, 0xcd, 0x33, 0xa9, 0xe5, + 0x06, 0xbb, 0x1e, 0xb1, 0xf2, 0xc4, 0xa9, 0x98, 0x96, 0xa7, 0x78, 0x0d, 0x9b, 0xb8, 0xec, 0x2f, + 0xdf, 0x9d, 0x0f, 0xed, 0x1a, 0x4e, 0xc3, 0xf6, 0xa8, 0x62, 0x3b, 0x94, 0x16, 0xf8, 0xf6, 0x8a, + 0x41, 0xdd, 0x0a, 0x75, 0x15, 0x5d, 0x73, 0x39, 0x47, 0xa5, 0x96, 0xd6, 0x89, 0xa7, 0xa5, 0x15, + 0x5b, 0x2b, 0x9a, 0x16, 0x44, 0xe2, 0xb6, 0xa9, 0x20, 0x33, 0xdd, 0x33, 0x8c, 0x12, 0x31, 0x8e, + 0x6c, 0x0a, 0x31, 0xeb, 0x7c, 0x7f, 0xb9, 0xfb, 0x7e, 0xe4, 0x8b, 0x9b, 0x36, 0x8b, 0xd4, 0xda, + 0x31, 0xad, 0x62, 0xb8, 0x48, 0xe2, 0xcd, 0xee, 0x26, 0x1d, 0x50, 0x52, 0x60, 0x47, 0x6c, 0x6a, + 0x94, 0x7c, 0x93, 0x5a, 0xba, 0xf9, 0xbb, 0xdd, 0x26, 0x7a, 0x26, 0xb6, 0xe6, 0x68, 0x15, 0xb7, + 0x9d, 0x7d, 0xd4, 0x26, 0x7a, 0x44, 0x60, 0x2a, 0xcd, 0x22, 0xbc, 0xef, 0x33, 0xdd, 0x03, 0x7f, + 0x95, 0x3c, 0xaa, 0x12, 0xd7, 0x93, 0x0e, 0xd1, 0xe5, 0xc8, 0xaa, 0x6b, 0x53, 0xcb, 0x25, 0xf8, + 0x23, 0x94, 0x60, 0x71, 0x92, 0xc2, 0xa2, 0xb0, 0x34, 0xb5, 0xba, 0x28, 0xc7, 0x9d, 0xbe, 0xcc, + 0x3c, 0x33, 0xa3, 0x4f, 0xff, 0x5a, 0x18, 0x52, 0xb9, 0x97, 0xb4, 0xc3, 0x83, 0xdd, 0x27, 0x5a, + 0x9e, 0x38, 0x3c, 0x18, 0xbe, 0x8e, 0x26, 0x8c, 0x92, 0x66, 0x5a, 0x39, 0x33, 0x0f, 0xb8, 0x93, + 0xea, 0x38, 0x7c, 0x67, 0xf3, 0xf8, 0x2a, 0x4a, 0x94, 0x88, 0x59, 0x2c, 0x79, 0xc9, 0xe1, 0x45, + 0x61, 0x69, 0x54, 0xe5, 0x5f, 0xd2, 0x4f, 0x02, 0x27, 0x18, 0x20, 0x71, 0x82, 0xf7, 0x7c, 0x7b, + 0x7f, 0x85, 0x13, 0xbc, 0x15, 0x4f, 0x30, 0x6b, 0xe5, 0x49, 0x9d, 0xe4, 0x39, 0x00, 0x77, 0xc3, + 0x19, 0xf4, 0x46, 0x81, 0x3a, 0x47, 0x39, 0xf6, 0xe9, 0x42, 0xd8, 0xa9, 0xd5, 0x85, 0x78, 0x98, + 0x6d, 0xea, 0x1c, 0xb9, 0xea, 0x94, 0xef, 0xc4, 0xa0, 0x5c, 0xe9, 0x1a, 0xba, 0x02, 0xdc, 0x36, + 0xfd, 0x24, 0x3e, 0x35, 0x5d, 0x2f, 0xa8, 0xea, 0x1a, 0xba, 0xda, 0xbe, 0xc1, 0x79, 0xcf, 0xa1, + 0xc9, 0xa0, 0x04, 0x7e, 0x6d, 0x47, 0x96, 0x26, 0xd5, 0x09, 0x5e, 0x03, 0x57, 0x5a, 0x0d, 0xe3, + 0x65, 0xad, 0x02, 0xed, 0x5d, 0x38, 0xe9, 0xab, 0x70, 0x28, 0xe6, 0xc3, 0x43, 0x65, 0x10, 0xe2, + 0x4e, 0x56, 0x81, 0xf2, 0x32, 0xbd, 0x1d, 0x9f, 0x5f, 0x0b, 0x80, 0x31, 0xf4, 0x7f, 0x4a, 0x07, + 0x48, 0x04, 0xf4, 0x8f, 0xfd, 0xd6, 0xec, 0xa0, 0x35, 0x87, 0x26, 0xa1, 0x67, 0x73, 0x56, 0xb5, + 0x02, 0x01, 0x46, 0xd5, 0x09, 0x58, 0x78, 0x50, 0xad, 0x44, 0x38, 0x0f, 0x47, 0x39, 0x6b, 0x68, + 0xae, 0x2b, 0xea, 0x4b, 0x24, 0xfe, 0x0d, 0xba, 0x06, 0x21, 0xfc, 0xe2, 0xf3, 0xe3, 0xea, 0xa3, + 0x0b, 0xb7, 0x11, 0x6a, 0x5d, 0x20, 0xbc, 0x25, 0x6e, 0xca, 0xec, 0xb6, 0x91, 0xfd, 0xdb, 0x46, + 0x66, 0xc3, 0xce, 0x6f, 0x1b, 0x79, 0x4f, 0x2b, 0x12, 0x0e, 0xab, 0x86, 0x3c, 0xa5, 0x53, 0x01, + 0x25, 0x3b, 0xc3, 0xf3, 0xf4, 0x36, 0xd0, 0x78, 0xd0, 0x74, 0x7e, 0x03, 0x0c, 0xd0, 0xbb, 0x81, + 0x1f, 0xde, 0xe9, 0xc2, 0xf3, 0x56, 0x4f, 0x9e, 0x2c, 0x7e, 0x84, 0xe8, 0xe7, 0xe8, 0x46, 0x93, + 0x27, 0x9c, 0x46, 0x5b, 0xad, 0x2e, 0x7a, 0xc2, 0x3a, 0x9a, 0x8f, 0xc1, 0x7d, 0x69, 0x45, 0x90, + 0xf6, 0x51, 0x0a, 0x62, 0x6c, 0x9b, 0x96, 0x56, 0x36, 0x9f, 0x90, 0xfc, 0x00, 0x63, 0x83, 0x67, + 0xd1, 0x98, 0xed, 0xd0, 0x1a, 0x01, 0xe2, 0x13, 0x2a, 0xfb, 0x90, 0x4e, 0xc7, 0xd0, 0x42, 0x2c, + 0x26, 0x67, 0x7e, 0x88, 0x66, 0x0b, 0xc1, 0x6e, 0xee, 0x62, 0x7d, 0x8a, 0x0b, 0x1d, 0xf0, 0x78, + 0x1d, 0x21, 0x56, 0x69, 0x00, 0x63, 0x47, 0x2a, 0x36, 0xc1, 0x9a, 0x4f, 0x43, 0x2d, 0x2d, 0x43, + 0x3d, 0x55, 0x76, 0x2e, 0xe0, 0xfa, 0x00, 0x4d, 0x3b, 0xda, 0xe3, 0x5c, 0xeb, 0x91, 0x49, 0x8e, + 0xb4, 0xdd, 0x89, 0x91, 0xd7, 0xc8, 0xc7, 0x50, 0xb5, 0xc7, 0x9b, 0xcd, 0x35, 0xf5, 0x92, 0x13, + 0xfe, 0xc4, 0x87, 0x08, 0xeb, 0x9e, 0x91, 0x73, 0xab, 0x7a, 0xc5, 0x74, 0x5d, 0x93, 0x5a, 0xb9, + 0x23, 0xd2, 0x48, 0x8e, 0xb6, 0x61, 0x46, 0x5f, 0xc8, 0x5a, 0x5a, 0x7e, 0xd8, 0xb4, 0xdf, 0x25, + 0x0d, 0x75, 0x46, 0xf7, 0x8c, 0xc8, 0x0a, 0xde, 0x42, 0x6f, 0xc2, 0x23, 0x9e, 0xf3, 0xea, 0x39, + 0xd3, 0xca, 0xe9, 0x65, 0x6a, 0x1c, 0x25, 0xc7, 0x00, 0xf5, 0xba, 0xdc, 0x7a, 0xf0, 0x65, 0x26, + 0x04, 0x0e, 0xea, 0x7b, 0xbe, 0xb1, 0x3a, 0x0d, 0x3e, 0x07, 0xf5, 0xac, 0x95, 0xf1, 0x1d, 0xf0, + 0x2e, 0xba, 0xc2, 0x50, 0x58, 0x1b, 0xf8, 0x48, 0x50, 0x89, 0x64, 0x02, 0x90, 0x92, 0x61, 0x24, + 0x26, 0x1d, 0x64, 0x06, 0x84, 0xc1, 0x8d, 0x35, 0x51, 0xd6, 0x82, 0x22, 0xe2, 0x2f, 0x10, 0x5b, + 0x65, 0x10, 0x39, 0x97, 0x68, 0x65, 0x92, 0x4f, 0x8e, 0x03, 0xd2, 0xca, 0x0b, 0x9e, 0x3c, 0xdf, + 0x07, 0x10, 0x1e, 0x82, 0x87, 0x3a, 0x63, 0xb7, 0xad, 0xe0, 0xaf, 0x03, 0x9a, 0x1c, 0xd9, 0xaf, + 0x84, 0xe7, 0x91, 0x7c, 0x72, 0x02, 0xba, 0x7d, 0x39, 0xbe, 0x8c, 0x07, 0x8e, 0x66, 0xb9, 0x9a, + 0xe1, 0x8f, 0x27, 0x34, 0xcb, 0xe5, 0x10, 0x76, 0x80, 0x22, 0x79, 0x68, 0x39, 0xa6, 0x4f, 0x0f, + 0x2d, 0xcf, 0x2c, 0xdf, 0x87, 0xc7, 0xf3, 0xe2, 0xcf, 0x6e, 0x6b, 0x3c, 0x46, 0xc2, 0xe3, 0xf1, + 0xdb, 0x18, 0x5a, 0xe9, 0x27, 0xec, 0xeb, 0x49, 0x79, 0x3d, 0x29, 0xaf, 0xc8, 0xa4, 0xac, 0x9e, + 0x5e, 0x42, 0x63, 0xd0, 0xb3, 0xf8, 0x3b, 0x01, 0x25, 0x98, 0x58, 0xc5, 0xb7, 0xe3, 0x19, 0x77, + 0x6a, 0x64, 0xf1, 0x9d, 0x3e, 0xad, 0x59, 0xdb, 0x4b, 0x4b, 0xdf, 0xfe, 0xf1, 0xcf, 0x0f, 0xc3, + 0x12, 0x5e, 0x54, 0xba, 0x8b, 0xf3, 0x5a, 0x9a, 0x6b, 0x78, 0xfc, 0xab, 0x80, 0x12, 0xac, 0xcc, + 0x3d, 0x19, 0x45, 0x84, 0x74, 0x4f, 0x46, 0x51, 0xb1, 0x2c, 0xed, 0x00, 0xa3, 0x0d, 0x7c, 0x2f, + 0x9e, 0x51, 0x6b, 0x3c, 0x95, 0xe3, 0xe0, 0xb2, 0x38, 0x51, 0x58, 0xe7, 0x28, 0xc7, 0xec, 0x56, + 0x38, 0xc1, 0x3f, 0x0a, 0x68, 0xb2, 0xa9, 0x69, 0xb1, 0xd2, 0x83, 0x45, 0xbb, 0x2c, 0x16, 0xdf, + 0xed, 0xdf, 0xa1, 0xff, 0x5a, 0x02, 0x5b, 0x17, 0xff, 0x1c, 0x50, 0x83, 0x41, 0xef, 0x8b, 0x5a, + 0x48, 0x2a, 0xf4, 0x47, 0x2d, 0xac, 0x03, 0xa4, 0xbb, 0x40, 0x2d, 0x8d, 0x95, 0x01, 0x8b, 0x8a, + 0x7f, 0x17, 0xd0, 0x74, 0x54, 0xf9, 0xe2, 0x3b, 0x3d, 0xa2, 0x77, 0x95, 0xdf, 0xe2, 0xda, 0x80, + 0x5e, 0x9c, 0xf8, 0x27, 0x40, 0x7c, 0x0b, 0x67, 0x06, 0xed, 0x06, 0x18, 0x50, 0x57, 0x39, 0x6e, + 0x2a, 0xc2, 0x13, 0xfc, 0x8b, 0x80, 0xa6, 0x42, 0x1a, 0x17, 0xa7, 0x7b, 0x50, 0xea, 0x94, 0xe3, + 0xe2, 0xea, 0x20, 0x2e, 0x3c, 0x85, 0x3b, 0x90, 0x82, 0x8c, 0x6f, 0xc7, 0xa7, 0xc0, 0x55, 0x62, + 0xb8, 0xf0, 0xcf, 0x04, 0x34, 0xd3, 0x2e, 0x48, 0xf1, 0xfb, 0x7d, 0x84, 0xef, 0xa2, 0x8c, 0xc5, + 0xbb, 0x03, 0xfb, 0xf5, 0x3f, 0x8c, 0x9d, 0xdc, 0xbb, 0xd5, 0xfe, 0x99, 0x80, 0x70, 0xe7, 0x43, + 0x8c, 0x3f, 0xe8, 0x41, 0x2c, 0x56, 0x2e, 0x8b, 0xeb, 0x17, 0xf0, 0xe4, 0x49, 0x6d, 0x40, 0x52, + 0x1f, 0xe2, 0xf5, 0xf8, 0xa4, 0xba, 0x49, 0x81, 0xf0, 0xe9, 0xfc, 0x2b, 0xa0, 0xf9, 0x17, 0xea, + 0x0a, 0xbc, 0x39, 0x30, 0xbf, 0x4e, 0x31, 0x24, 0x6e, 0xfd, 0x3f, 0x10, 0x9e, 0xef, 0x3e, 0xe4, + 0xbb, 0x8b, 0xb3, 0x17, 0xce, 0x57, 0x61, 0x77, 0x6a, 0xf3, 0x6e, 0xcd, 0x7c, 0xf6, 0xf4, 0x2c, + 0x25, 0x3c, 0x3f, 0x4b, 0x09, 0x7f, 0x9f, 0xa5, 0x84, 0xef, 0xcf, 0x53, 0x43, 0xcf, 0xcf, 0x53, + 0x43, 0x7f, 0x9e, 0xa7, 0x86, 0xbe, 0x5c, 0x2b, 0x9a, 0x5e, 0xa9, 0xaa, 0xcb, 0x06, 0xad, 0x04, + 0xe1, 0x00, 0xa6, 0x19, 0xbb, 0xde, 0x16, 0x1d, 0x74, 0x81, 0x9e, 0x80, 0xff, 0xfb, 0xbc, 0xf7, + 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x27, 0x00, 0x97, 0xe7, 0xdc, 0x13, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1061,6 +1174,8 @@ const _ = grpc.SupportPackageIsVersion4 type QueryClient interface { // Parameters queries the parameters of the module. Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // Header queries the CZ header and fork headers at a given height. + Header(ctx context.Context, in *QueryHeaderRequest, opts ...grpc.CallOption) (*QueryHeaderResponse, error) // ChainList queries the list of chains that checkpoint to Babylon ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) // ChainInfo queries the latest info of a chain in Babylon's view @@ -1094,6 +1209,15 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts . return out, nil } +func (c *queryClient) Header(ctx context.Context, in *QueryHeaderRequest, opts ...grpc.CallOption) (*QueryHeaderResponse, error) { + out := new(QueryHeaderResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/Header", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) { out := new(QueryChainListResponse) err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ChainList", in, out, opts...) @@ -1161,6 +1285,8 @@ func (c *queryClient) FinalizedChainInfoUntilHeight(ctx context.Context, in *Que type QueryServer interface { // Parameters queries the parameters of the module. Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // Header queries the CZ header and fork headers at a given height. + Header(context.Context, *QueryHeaderRequest) (*QueryHeaderResponse, error) // ChainList queries the list of chains that checkpoint to Babylon ChainList(context.Context, *QueryChainListRequest) (*QueryChainListResponse, error) // ChainInfo queries the latest info of a chain in Babylon's view @@ -1184,6 +1310,9 @@ type UnimplementedQueryServer struct { func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") } +func (*UnimplementedQueryServer) Header(ctx context.Context, req *QueryHeaderRequest) (*QueryHeaderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Header not implemented") +} func (*UnimplementedQueryServer) ChainList(ctx context.Context, req *QueryChainListRequest) (*QueryChainListResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ChainList not implemented") } @@ -1228,6 +1357,24 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +func _Query_Header_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryHeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Header(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/Header", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Header(ctx, req.(*QueryHeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Query_ChainList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryChainListRequest) if err := dec(in); err != nil { @@ -1362,6 +1509,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "Params", Handler: _Query_Params_Handler, }, + { + MethodName: "Header", + Handler: _Query_Header_Handler, + }, { MethodName: "ChainList", Handler: _Query_ChainList_Handler, @@ -1451,6 +1602,88 @@ func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *QueryHeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryHeaderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryHeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryHeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryHeaderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryHeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ForkHeaders != nil { + { + size, err := m.ForkHeaders.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *QueryChainListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2162,6 +2395,39 @@ func (m *QueryParamsResponse) Size() (n int) { return n } +func (m *QueryHeaderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovQuery(uint64(m.Height)) + } + return n +} + +func (m *QueryHeaderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ForkHeaders != nil { + l = m.ForkHeaders.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + func (m *QueryChainListRequest) Size() (n int) { if m == nil { return 0 @@ -2568,6 +2834,229 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryHeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryHeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryHeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryHeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryHeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryHeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &IndexedHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForkHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ForkHeaders == nil { + m.ForkHeaders = &Forks{} + } + if err := m.ForkHeaders.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryChainListRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go index 19f747ec0..5ffdf1014 100644 --- a/x/zoneconcierge/types/query.pb.gw.go +++ b/x/zoneconcierge/types/query.pb.gw.go @@ -51,6 +51,82 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal } +func request_Query_Header_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryHeaderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + msg, err := client.Header(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Header_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryHeaderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + msg, err := server.Header(ctx, &protoReq) + return msg, metadata, err + +} + func request_Query_ChainList_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryChainListRequest var metadata runtime.ServerMetadata @@ -542,6 +618,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_Header_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Header_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Header_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_ChainList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -764,6 +863,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_Header_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Header_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Header_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_ChainList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -910,6 +1029,8 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie var ( pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_Header_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id", "header", "height"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_ChainList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "chains"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_ChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) @@ -928,6 +1049,8 @@ var ( var ( forward_Query_Params_0 = runtime.ForwardResponseMessage + forward_Query_Header_0 = runtime.ForwardResponseMessage + forward_Query_ChainList_0 = runtime.ForwardResponseMessage forward_Query_ChainInfo_0 = runtime.ForwardResponseMessage From 02b8c58acce04278c88ff7f431faec13a329dfe3 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Thu, 12 Jan 2023 10:10:48 +1100 Subject: [PATCH 12/37] checkpointing API: add checkpoint lifecycle in `RawCheckpointWithMeta` (#267) --- client/docs/swagger-ui/swagger.yaml | 6517 +++++++++++++----- proto/babylon/checkpointing/checkpoint.proto | 15 + testutil/datagen/tendermint.go | 3 + x/checkpointing/keeper/hooks.go | 4 +- x/checkpointing/keeper/keeper.go | 41 +- x/checkpointing/keeper/keeper_test.go | 76 +- x/checkpointing/types/checkpoint.pb.go | 453 +- x/checkpointing/types/types.go | 33 +- x/checkpointing/types/types_test.go | 15 +- x/zoneconcierge/keeper/grpc_query_test.go | 2 +- 10 files changed, 5160 insertions(+), 1999 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index b9301446d..5e162eddf 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -4144,6 +4144,49 @@ paths: title: >- power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition + towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that + triggers the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block + that triggers the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, i.e., + each state transition and + + the time (in both timestamp and block height) of this + transition. description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. description: >- QueryRawCheckpointResponse is the response type for the @@ -4252,6 +4295,49 @@ paths: title: >- power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition + towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block + that triggers the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block + that triggers the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, + i.e., each state transition and + + the time (in both timestamp and block height) of this + transition. description: >- RawCheckpointWithMeta wraps the raw checkpoint with meta data. @@ -4392,9 +4478,7 @@ paths: type: string title: chain_id is the ID of the chain latest_header: - title: >- - latest_header is the latest header in the canonical chain - of CZ + title: latest_header is the latest header in CZ's canonical chain type: object properties: chain_id: @@ -4667,6 +4751,12 @@ paths: the subsequent headers cannot be verified without knowing the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain description: >- QueryChainInfoResponse is response type for the Query/ChainInfo RPC method. @@ -4869,218 +4959,7 @@ paths: type: string tags: - Query - /babylon/zoneconcierge/v1/chains: - get: - summary: ChainList queries the list of chains that checkpoint to Babylon - operationId: ChainList - responses: - '200': - description: A successful response. - schema: - type: object - properties: - chain_ids: - type: array - items: - type: string - title: >- - QueryChainListResponse is response type for the Query/ChainList - RPC method - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - tags: - - Query - /babylon/zoneconcierge/v1/epochs/{epoch_num}/chain_info/{chain_id}: + /babylon/zoneconcierge/v1/chain_info/{chain_id}/epochs/{epoch_num}: get: summary: >- EpochChainInfo queries the latest info of a chain in a given epoch of @@ -5100,9 +4979,7 @@ paths: type: string title: chain_id is the ID of the chain latest_header: - title: >- - latest_header is the latest header in the canonical chain - of CZ + title: latest_header is the latest header in CZ's canonical chain type: object properties: chain_id: @@ -5375,6 +5252,12 @@ paths: the subsequent headers cannot be verified without knowing the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain description: >- QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo RPC method. @@ -5571,429 +5454,1984 @@ paths: "value": "1.212s" } parameters: - - name: epoch_num + - name: chain_id in: path required: true type: string - format: uint64 - - name: chain_id + - name: epoch_num in: path required: true type: string + format: uint64 tags: - Query - /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}: + /babylon/zoneconcierge/v1/chain_info/{chain_id}/header/{height}: get: - summary: >- - FinalizedChainInfo queries the BTC-finalised info of a chain, with - proofs - operationId: FinalizedChainInfo + summary: Header queries the CZ header and fork headers at a given height. + operationId: Header responses: '200': description: A successful response. schema: type: object properties: - finalized_chain_info: - title: finalized_chain_info is the info of the CZ + header: type: object properties: chain_id: type: string - title: chain_id is the ID of the chain - latest_header: + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header + on CZ ledger + babylon_header: title: >- - latest_header is the latest header in the canonical chain - of CZ + babylon_header is the header of the babylon block that + includes this CZ header type: object properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. chain_id: type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header height: type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the - header on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info type: object properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, - - including all blockchain data structures and the - rules of the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: + hash: type: string - format: date-time - last_block_id: - title: prev block info + format: byte + part_set_header: type: object properties: + total: + type: integer + format: int64 hash: type: string format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: >- - Header defines the structure of a Tendermint block - header. - babylon_epoch: + title: PartsetHeader + last_commit_hash: type: string - format: uint64 - title: >- - epoch is the epoch number of this header on Babylon - ledger - babylon_tx_hash: + format: byte + title: hashes of block data + data_hash: type: string format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes - this header - - (babylon_block_height, babylon_tx_hash) jointly - provides the position of the header on Babylon ledger - latest_forks: + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte title: >- - latest_forks is the latest forks, formed as a series of - IndexedHeader (from low to high) - type: object - properties: - headers: - type: array - items: + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + fork_headers: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block + that includes this CZ header type: object properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. chain_id: type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header height: type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of - the header on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon - block that includes this CZ header + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info type: object properties: - version: - title: basic block info + hash: + type: string + format: byte + part_set_header: type: object properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, - - including all blockchain data structures and - the rules of the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: + total: + type: integer + format: int64 + hash: type: string format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: >- - hashes from the app output from the prev - block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: >- - Header defines the structure of a Tendermint - block header. - babylon_epoch: + title: PartsetHeader + last_commit_hash: type: string - format: uint64 - title: >- - epoch is the epoch number of this header on - Babylon ledger - babylon_tx_hash: + format: byte + title: hashes of block data + data_hash: type: string format: byte - title: >- - babylon_tx_hash is the hash of the tx that - includes this header + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes + this header - (babylon_block_height, babylon_tx_hash) jointly - provides the position of the header on Babylon - ledger - title: IndexedHeader is the metadata of a CZ header - title: >- - blocks is the list of non-canonical indexed headers at - the same height - description: >- - Forks is a list of non-canonical `IndexedHeader`s at the - same height. + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon + ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. - For example, assuming the following blockchain + For example, assuming the following blockchain - ``` + ``` - A <- B <- C <- D <- E - \ -- D1 - \ -- D2 - ``` + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - Then the fork will be {[D1, D2]} where each item is in - struct `IndexedBlock`. + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - Note that each `IndexedHeader` in the fork should have a - valid quorum certificate. + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. - Such forks exist since Babylon considers CZs might have - dishonest majority. + Such forks exist since Babylon considers CZs might have + dishonest majority. - Also note that the IBC-Go implementation will only - consider the first header in a fork valid, since + Also note that the IBC-Go implementation will only consider + the first header in a fork valid, since - the subsequent headers cannot be verified without knowing - the validator set in the previous header. - epoch_info: - title: epoch_info is the metadata of the last BTC-finalised epoch - type: object - properties: - epoch_number: - type: string - format: uint64 - current_epoch_interval: - type: string - format: uint64 - first_block_height: - type: string - format: uint64 - last_block_header: - description: >- - last_block_header is the header of the last block in this - epoch. + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + description: >- + QueryParamsResponse is response type for the Query/Header RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - Babylon needs to remember the last header of each epoch to - complete unbonding validators/delegations when a previous - epoch's checkpoint is finalised. + protocol buffer message. This string must contain at + least - The last_block_header field is nil in the epoch's - beginning, and is set upon the end of this epoch. - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, + one "/" character. The last segment of the URL's path + must represent - including all blockchain data structures and the rules - of the application's + the fully qualified name of the type (as in - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - app_hash_root: - type: string - format: byte - title: >- - app_hash_root is the Merkle root of all AppHashs in this - epoch + `path/google.protobuf.Duration`). The name should be in + a canonical form - It will be used for proving a block is in an epoch - sealer_header: - title: >- - sealer_header is the 2nd header of the next epoch + (e.g., leading "." is not accepted). - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - type: object - properties: - version: - title: basic block info + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + in: path + required: true + type: string + - name: height + in: path + required: true + type: string + format: uint64 + tags: + - Query + /babylon/zoneconcierge/v1/chains: + get: + summary: ChainList queries the list of chains that checkpoint to Babylon + operationId: ChainList + responses: + '200': + description: A successful response. + schema: + type: object + properties: + chain_ids: + type: array + items: + type: string + title: >- + QueryChainListResponse is response type for the Query/ChainList + RPC method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + tags: + - Query + /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}: + get: + summary: >- + FinalizedChainInfo queries the BTC-finalised info of a chain, with + proofs + operationId: FinalizedChainInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_chain_info: + title: finalized_chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes + this header + + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of + the header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon + block that includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and + the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on + Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that + includes this header + + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon + ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at + the same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the + same height. + + For example, assuming the following blockchain + + ``` + + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` + + Then the fork will be {[D1, D2]} where each item is in + struct `IndexedBlock`. + + + Note that each `IndexedHeader` in the fork should have a + valid quorum certificate. + + Such forks exist since Babylon considers CZs might have + dishonest majority. + + Also note that the IBC-Go implementation will only + consider the first header in a fork valid, since + + the subsequent headers cannot be verified without knowing + the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain + epoch_info: + title: epoch_info is the metadata of the last BTC-finalised epoch + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this + epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. + + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch + + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: + type: string + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + btc_submission_key: + title: >- + btc_submission_key is position of two BTC txs that include the + raw checkpoint of this epoch + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header + is included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a + transaction in the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header is + in a certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting + power, and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + metadata is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating + Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating + Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + + Although it is already a part of SubmissionKey, we store + it here again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo to + not relay on + + the fact that TransactionInfo will be ordered in the + same order as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in + the position in `key` + + TODO: maybe it could use here better format as we + already processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint + is included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission + description: >- + QueryFinalizedChainInfoResponse is response type for the + Query/FinalizedChainInfo RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + description: chain_id is the ID of the CZ + in: path + required: true + type: string + - name: prove + description: >- + prove indicates whether the querier wants to get proofs of this + timestamp. + in: query + required: false + type: boolean + tags: + - Query + /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}/height/{height}: + get: + summary: >- + FinalizedChainInfoUntilHeight queries the BTC-finalised info no later + than the provided CZ height, with proofs + operationId: FinalizedChainInfoUntilHeight + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_chain_info: + title: finalized_chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes + this header + + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of + the header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon + block that includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and + the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on + Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that + includes this header + + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon + ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at + the same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the + same height. + + For example, assuming the following blockchain + + ``` + + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` + + Then the fork will be {[D1, D2]} where each item is in + struct `IndexedBlock`. + + + Note that each `IndexedHeader` in the fork should have a + valid quorum certificate. + + Such forks exist since Babylon considers CZs might have + dishonest majority. + + Also note that the IBC-Go implementation will only + consider the first header in a fork valid, since + + the subsequent headers cannot be verified without knowing + the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain + epoch_info: + title: epoch_info is the metadata of the last BTC-finalised epoch + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this + epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. + + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info type: object properties: block: @@ -6006,318 +7444,787 @@ paths: Consensus captures the consensus rules for processing a block in the blockchain, - including all blockchain data structures and the rules - of the application's + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch + + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: + type: string + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + btc_submission_key: + title: >- + btc_submission_key is position of two BTC txs that include the + raw checkpoint of this epoch + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header + is included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a + transaction in the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header is + in a certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting + power, and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + metadata is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating + Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating + Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + + Although it is already a part of SubmissionKey, we store + it here again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo to + not relay on + + the fact that TransactionInfo will be ordered in the + same order as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in + the position in `key` + + TODO: maybe it could use here better format as we + already processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint + is included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission + description: >- + QueryFinalizedChainInfoUntilHeightResponse is response type for + the Query/FinalizedChainInfoUntilHeight RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - raw_checkpoint: - title: raw_checkpoint is the raw checkpoint of this epoch - type: object - properties: - epoch_num: - type: string - format: uint64 - title: >- - epoch_num defines the epoch number the raw checkpoint is - for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of - the BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated - from individual BLS sigs - btc_submission_key: - title: >- - btc_submission_key is position of two BTC txs that include the - raw checkpoint of this epoch - type: object - properties: - key: - type: array - items: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - which transaction was included and transaction index in - the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header - is included in a certain Babylon block - type: object - properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - description: >- - TxProof represents a Merkle proof of the presence of a - transaction in the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + description: chain_id is the ID of the CZ + in: path + required: true + type: string + - name: height + description: >- + height is the height of the CZ chain + + such that the returned finalised chain info will be no later than + this height + in: path + required: true + type: string + format: uint64 + - name: prove + description: >- + prove indicates whether the querier wants to get proofs of this + timestamp. + in: query + required: false + type: boolean + tags: + - Query + /babylon/zoneconcierge/v1/headers/{chain_id}: + get: + summary: >- + ListHeaders queries the headers of a chain in Babylon's view, with + pagination support + operationId: ListHeaders + responses: + '200': + description: A successful response. + schema: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: type: string format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is - in a certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 + title: hash is the hash of this header + height: + type: string + format: uint64 title: >- - ValidatorWithBlsKey couples validator address, voting - power, and its bls public key - title: >- - validator_set is the validator set of the sealed epoch + height is the height of this header on CZ ledger - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - metadata is committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info type: object properties: - type: - type: string - key: + block: type: string - format: byte - data: + format: uint64 + app: type: string - format: byte - title: >- - ProofOp defines an operation used for calculating - Merkle root + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - The data could be arbitrary format, providing - nessecary data + including all blockchain data structures and the + rules of the application's - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - validator set is committed to `app_hash` of the sealer - header - type: object - properties: - ops: - type: array - items: + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info type: object properties: - type: - type: string - key: - type: string - format: byte - data: + hash: type: string format: byte - title: >- - ProofOp defines an operation used for calculating - Merkle root - - The data could be arbitrary format, providing - nessecary data - - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: type: string format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in - - which transaction was included and transaction index in - the block description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain - - Although it is already a part of SubmissionKey, we store - it here again - - to make TransactionInfo self-contained. - - For example, storing the key allows TransactionInfo to - not relay on - - the fact that TransactionInfo will be ordered in the - same order as - - TransactionKeys in SubmissionKey. - transaction: + Header defines the structure of a Tendermint block + header. + babylon_epoch: type: string - format: byte - title: transaction is the full transaction in bytes - proof: + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: type: string format: byte title: >- - proof is the Merkle proof that this tx is included in - the position in `key` - - TODO: maybe it could use here better format as we - already processed and - - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including - - - the position of the tx on BTC blockchain + babylon_tx_hash is the hash of the tx that includes this + header - - the full tx content + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint - is included in BTC ledger + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the - It is the two TransactionInfo in the best (i.e., earliest) - checkpoint submission + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } description: >- - QueryFinalizedChainInfoResponse is response type for the - Query/FinalizedChainInfo RPC method. + QueryListHeadersResponse is response type for the + Query/ListHeaders RPC method. default: description: An unexpected error response. schema: @@ -6512,25 +8419,73 @@ paths: } parameters: - name: chain_id - description: chain_id is the ID of the CZ in: path required: true type: string - - name: prove + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset description: >- - prove indicates whether the querier wants to get proofs of this - timestamp. + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean tags: - Query - /babylon/zoneconcierge/v1/headers/{chain_id}: + /babylon/zoneconcierge/v1/headers/{chain_id}/epochs/{epoch_num}: get: summary: >- - ListHeaders queries the headers of a chain in Babylon's view, with - pagination support - operationId: ListHeaders + ListEpochHeaders queries the headers of a chain timestamped in a given + epoch of Babylon, with pagination support + operationId: ListEpochHeaders responses: '200': description: A successful response. @@ -6656,38 +8611,9 @@ paths: the position of the header on Babylon ledger title: IndexedHeader is the metadata of a CZ header title: headers is the list of headers - pagination: - title: pagination defines the pagination in the response - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - PageResponse is to be embedded in gRPC response messages where - the - - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } description: >- - QueryListHeadersResponse is response type for the - Query/ListHeaders RPC method. + QueryListEpochHeadersResponse is response type for the + Query/ListEpochHeaders RPC method. default: description: An unexpected error response. schema: @@ -6885,62 +8811,11 @@ paths: in: path required: true type: string - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false + - name: epoch_num + in: path + required: true type: string format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean tags: - Query /babylon/zoneconcierge/v1/params: @@ -10536,6 +12411,39 @@ definitions: application's state transition machine. + babylon.checkpointing.v1.CheckpointStateUpdate: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: state defines the event of a state transition towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that triggers the + state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block that triggers the + state update babylon.checkpointing.v1.CheckpointStatus: type: string enum: @@ -10720,6 +12628,49 @@ definitions: title: >- power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition towards this + state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that + triggers the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block that + triggers the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, i.e., each + state transition and + + the time (in both timestamp and block height) of this + transition. description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. title: the order is going from the newest to oldest based on the epoch number pagination: @@ -10804,6 +12755,48 @@ definitions: type: string format: uint64 title: power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition towards this + state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that + triggers the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block that + triggers the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, i.e., each + state transition and + + the time (in both timestamp and block height) of this transition. description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. description: >- QueryRawCheckpointResponse is the response type for the @@ -10910,6 +12903,46 @@ definitions: type: string format: uint64 title: power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: state defines the event of a state transition towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that triggers + the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block that triggers + the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, i.e., each state + transition and + + the time (in both timestamp and block height) of this transition. description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. babylon.checkpointing.v1.ValidatorWithBlsKey: type: object @@ -10980,7 +13013,7 @@ definitions: type: string title: chain_id is the ID of the chain latest_header: - title: latest_header is the latest header in the canonical chain of CZ + title: latest_header is the latest header in CZ's canonical chain type: object properties: chain_id: @@ -11240,6 +13273,12 @@ definitions: the subsequent headers cannot be verified without knowing the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in CZ's + canonical chain title: ChainInfo is the information of a CZ babylon.zoneconcierge.v1.Forks: type: object @@ -11416,180 +13455,474 @@ definitions: block: type: string format: uint64 - app: + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header + + (babylon_block_height, babylon_tx_hash) jointly provides the position + of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + babylon.zoneconcierge.v1.Params: + type: object + description: Params defines the parameters for the module. + babylon.zoneconcierge.v1.ProofEpochSealed: + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and its + bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on `last_commit_hash` + of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator set is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + title: >- + ProofEpochSealed is the proof that an epoch is sealed by the sealer + header, i.e., the 2nd header of the next epoch + + With the access of metadata + + - Metadata of this epoch, which includes the sealer header + + - Raw checkpoint of this epoch + + The verifier can perform the following verification rules: + + - The raw checkpoint's `last_commit_hash` is same as in the sealer header + + - More than 1/3 (in voting power) validators in the validator set of this + epoch have signed `last_commit_hash` of the sealer header + + - The epoch medatata is committed to the `app_hash` of the sealer header + + - The validator set is committed to the `app_hash` of the sealer header + babylon.zoneconcierge.v1.QueryChainInfoResponse: + type: object + properties: + chain_info: + title: chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on + CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in - the blockchain, - - including all blockchain data structures and the rules of the - application's + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) type: object properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this header + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - (babylon_block_height, babylon_tx_hash) jointly provides the position - of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - babylon.zoneconcierge.v1.Params: - type: object - description: Params defines the parameters for the module. - babylon.zoneconcierge.v1.ProofEpochSealed: - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and its - bls public key - title: >- - validator_set is the validator set of the sealed epoch + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - This validator set has generated a BLS multisig on `last_commit_hash` - of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: - type: string - format: byte - data: - type: string - format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's validator set is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: - type: string - format: byte - data: - type: string - format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - title: >- - ProofEpochSealed is the proof that an epoch is sealed by the sealer - header, i.e., the 2nd header of the next epoch + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. - With the access of metadata + For example, assuming the following blockchain - - Metadata of this epoch, which includes the sealer header + ``` - - Raw checkpoint of this epoch + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - The verifier can perform the following verification rules: + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - - The raw checkpoint's `last_commit_hash` is same as in the sealer header - - More than 1/3 (in voting power) validators in the validator set of this - epoch have signed `last_commit_hash` of the sealer header + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. - - The epoch medatata is committed to the `app_hash` of the sealer header + Such forks exist since Babylon considers CZs might have dishonest + majority. - - The validator set is committed to the `app_hash` of the sealer header - babylon.zoneconcierge.v1.QueryChainInfoResponse: + Also note that the IBC-Go implementation will only consider the + first header in a fork valid, since + + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain + description: >- + QueryChainInfoResponse is response type for the Query/ChainInfo RPC + method. + babylon.zoneconcierge.v1.QueryChainListResponse: + type: object + properties: + chain_ids: + type: array + items: + type: string + title: QueryChainListResponse is response type for the Query/ChainList RPC method + babylon.zoneconcierge.v1.QueryEpochChainInfoResponse: type: object properties: chain_info: @@ -11600,7 +13933,7 @@ definitions: type: string title: chain_id is the ID of the chain latest_header: - title: latest_header is the latest header in the canonical chain of CZ + title: latest_header is the latest header in CZ's canonical chain type: object properties: chain_id: @@ -11613,191 +13946,280 @@ definitions: height: type: string format: uint64 - app: + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on + CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: type: string format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in - the blockchain, + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - including all blockchain data structures and the rules of the - application's + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this header + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - (babylon_block_height, babylon_tx_hash) jointly provides the position - of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - babylon.zoneconcierge.v1.Params: - type: object - description: Params defines the parameters for the module. - babylon.zoneconcierge.v1.ProofEpochSealed: - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and its - bls public key - title: >- - validator_set is the validator set of the sealed epoch + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. - This validator set has generated a BLS multisig on `last_commit_hash` - of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: - type: string - format: byte - data: - type: string - format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's validator set is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: - type: string - format: byte - data: - type: string - format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - title: >- - ProofEpochSealed is the proof that an epoch is sealed by the sealer - header, i.e., the 2nd header of the next epoch + For example, assuming the following blockchain - With the access of metadata + ``` - - Metadata of this epoch, which includes the sealer header + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - - Raw checkpoint of this epoch + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - The verifier can perform the following verification rules: - - The raw checkpoint's `last_commit_hash` is same as in the sealer header + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. - - More than 1/3 (in voting power) validators in the validator set of this - epoch have signed `last_commit_hash` of the sealer header + Such forks exist since Babylon considers CZs might have dishonest + majority. - - The epoch medatata is committed to the `app_hash` of the sealer header + Also note that the IBC-Go implementation will only consider the + first header in a fork valid, since - - The validator set is committed to the `app_hash` of the sealer header - babylon.zoneconcierge.v1.QueryChainInfoResponse: + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain + description: >- + QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo + RPC method. + babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse: type: object properties: - chain_info: - title: chain_info is the info of the CZ + finalized_chain_info: + title: finalized_chain_info is the info of the CZ type: object properties: chain_id: type: string title: chain_id is the ID of the chain latest_header: - title: latest_header is the latest header in the canonical chain of CZ + title: latest_header is the latest header in CZ's canonical chain type: object properties: chain_id: @@ -12063,298 +14485,446 @@ definitions: the subsequent headers cannot be verified without knowing the validator set in the previous header. - description: >- - QueryChainInfoResponse is response type for the Query/ChainInfo RPC - method. - babylon.zoneconcierge.v1.QueryChainListResponse: - type: object - properties: - chain_ids: - type: array - items: - type: string - title: QueryChainListResponse is response type for the Query/ChainList RPC method - babylon.zoneconcierge.v1.QueryEpochChainInfoResponse: - type: object - properties: - chain_info: - title: chain_info is the info of the CZ + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain + epoch_info: + title: epoch_info is the metadata of the last BTC-finalised epoch + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous epoch's + checkpoint is finalised. + + The last_block_header field is nil in the epoch's beginning, and + is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: |- + app_hash_root is the Merkle root of all AppHashs in this epoch + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: + type: string + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that individual BLS + sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the BLS + multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + btc_submission_key: + title: >- + btc_submission_key is position of two BTC txs that include the raw + checkpoint of this epoch + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in + + which transaction was included and transaction index in the + block + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block type: object properties: - chain_id: + root_hash: type: string - title: chain_id is the ID of the chain - latest_header: - title: latest_header is the latest header in the canonical chain of CZ + format: byte + data: + type: string + format: byte + proof: type: object properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: + total: type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the header on - CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a - block in the blockchain, - - including all blockchain data structures and the rules of - the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: + format: int64 + index: type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: + format: int64 + leaf_hash: type: string format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this - header - - (babylon_block_height, babylon_tx_hash) jointly provides the - position of the header on Babylon ledger - latest_forks: - title: >- - latest_forks is the latest forks, formed as a series of - IndexedHeader (from low to high) - type: object - properties: - headers: + aunts: type: array items: - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the - header on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, - - including all blockchain data structures and the - rules of the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: >- - Header defines the structure of a Tendermint block - header. - babylon_epoch: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction in + the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and + its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: type: string - format: uint64 - title: >- - epoch is the epoch number of this header on Babylon - ledger - babylon_tx_hash: + key: type: string format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this - header + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root - (babylon_block_height, babylon_tx_hash) jointly provides - the position of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - title: >- - blocks is the list of non-canonical indexed headers at the - same height - description: >- - Forks is a list of non-canonical `IndexedHeader`s at the same - height. + The data could be arbitrary format, providing nessecary data - For example, assuming the following blockchain + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator + set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root - ``` + The data could be arbitrary format, providing nessecary data - A <- B <- C <- D <- E - \ -- D1 - \ -- D2 - ``` + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in - Then the fork will be {[D1, D2]} where each item is in struct - `IndexedBlock`. + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + Although it is already a part of SubmissionKey, we store it here + again - Note that each `IndexedHeader` in the fork should have a valid - quorum certificate. + to make TransactionInfo self-contained. - Such forks exist since Babylon considers CZs might have dishonest - majority. + For example, storing the key allows TransactionInfo to not relay + on - Also note that the IBC-Go implementation will only consider the - first header in a fork valid, since + the fact that TransactionInfo will be ordered in the same order + as - the subsequent headers cannot be verified without knowing the - validator set in the previous header. + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in the + position in `key` + + TODO: maybe it could use here better format as we already + processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) checkpoint + submission description: >- - QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo - RPC method. - babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse: + QueryFinalizedChainInfoResponse is response type for the + Query/FinalizedChainInfo RPC method. + babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightResponse: type: object properties: finalized_chain_info: @@ -12365,7 +14935,7 @@ definitions: type: string title: chain_id is the ID of the chain latest_header: - title: latest_header is the latest header in the canonical chain of CZ + title: latest_header is the latest header in CZ's canonical chain type: object properties: chain_id: @@ -12631,6 +15201,12 @@ definitions: the subsequent headers cannot be verified without knowing the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain epoch_info: title: epoch_info is the metadata of the last BTC-finalised epoch type: object @@ -12643,17 +15219,104 @@ definitions: format: uint64 first_block_height: type: string - format: uint64 - last_block_header: - description: >- - last_block_header is the header of the last block in this epoch. - - Babylon needs to remember the last header of each epoch to - complete unbonding validators/delegations when a previous epoch's - checkpoint is finalised. + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous epoch's + checkpoint is finalised. + + The last_block_header field is nil in the epoch's beginning, and + is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: |- + app_hash_root is the Merkle root of all AppHashs in this epoch + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch - The last_block_header field is nil in the epoch's beginning, and - is set upon the end of this epoch. + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header type: object properties: version: @@ -12729,18 +15392,279 @@ definitions: proposer_address: type: string format: byte - app_hash_root: + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: + type: string + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that individual BLS + sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the BLS + multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + btc_submission_key: + title: >- + btc_submission_key is position of two BTC txs that include the raw + checkpoint of this epoch + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in + + which transaction was included and transaction index in the + block + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction in + the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and + its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator + set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in + + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + + Although it is already a part of SubmissionKey, we store it here + again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo to not relay + on + + the fact that TransactionInfo will be ordered in the same order + as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in the + position in `key` + + TODO: maybe it could use here better format as we already + processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) checkpoint + submission + description: >- + QueryFinalizedChainInfoUntilHeightResponse is response type for the + Query/FinalizedChainInfoUntilHeight RPC method. + babylon.zoneconcierge.v1.QueryHeaderResponse: + type: object + properties: + header: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: type: string format: byte - title: |- - app_hash_root is the Merkle root of all AppHashs in this epoch - It will be used for proving a block is in an epoch - sealer_header: + title: hash is the hash of this header + height: + type: string + format: uint64 title: >- - sealer_header is the 2nd header of the next epoch + height is the height of this header on CZ ledger - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header type: object properties: version: @@ -12817,253 +15741,167 @@ definitions: type: string format: byte description: Header defines the structure of a Tendermint block header. - raw_checkpoint: - title: raw_checkpoint is the raw checkpoint of this epoch - type: object - properties: - epoch_num: + babylon_epoch: type: string format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that individual BLS - sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of the BLS - multi sig - bls_multi_sig: + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string format: byte title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - btc_submission_key: - title: >- - btc_submission_key is position of two BTC txs that include the raw - checkpoint of this epoch - type: object - properties: - key: - type: array - items: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash - of block in + babylon_tx_hash is the hash of the tx that includes this header - which transaction was included and transaction index in the - block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header is - included in a certain Babylon block - type: object - properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - description: >- - TxProof represents a Merkle proof of the presence of a transaction in - the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is in a - certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + fork_headers: type: object properties: - validator_set: + headers: type: array items: type: object properties: - validator_address: + chain_id: type: string - bls_pub_key: + title: chain_id is the unique ID of the chain + hash: type: string format: byte - voting_power: + title: hash is the hash of this header + height: type: string format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and - its bls public key - title: >- - validator_set is the validator set of the sealed epoch + title: >- + height is the height of this header on CZ ledger - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: + (hash, height) jointly provides the position of the header + on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header type: object properties: - type: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: type: string - key: + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: type: string format: byte - data: + title: hashes of block data + data_hash: type: string format: byte - title: >- - ProofOp defines an operation used for calculating Merkle - root - - The data could be arbitrary format, providing nessecary data - - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's validator - set is committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: type: string - key: + format: byte + app_hash: type: string format: byte - data: + last_results_hash: type: string format: byte - title: >- - ProofOp defines an operation used for calculating Merkle - root - - The data could be arbitrary format, providing nessecary data - - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash - of block in - - which transaction was included and transaction index in the - block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain - - Although it is already a part of SubmissionKey, we store it here - again + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - to make TransactionInfo self-contained. + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the same + height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same height. - For example, storing the key allows TransactionInfo to not relay - on + For example, assuming the following blockchain - the fact that TransactionInfo will be ordered in the same order - as + ``` - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte - title: >- - proof is the Merkle proof that this tx is included in the - position in `key` + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - TODO: maybe it could use here better format as we already - processed and + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including - - the position of the tx on BTC blockchain + Note that each `IndexedHeader` in the fork should have a valid quorum + certificate. - - the full tx content + Such forks exist since Babylon considers CZs might have dishonest + majority. - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint is - included in BTC ledger + Also note that the IBC-Go implementation will only consider the first + header in a fork valid, since - It is the two TransactionInfo in the best (i.e., earliest) checkpoint - submission - description: >- - QueryFinalizedChainInfoResponse is response type for the - Query/FinalizedChainInfo RPC method. + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + description: QueryParamsResponse is response type for the Query/Header RPC method. babylon.zoneconcierge.v1.QueryListEpochHeadersResponse: type: object properties: @@ -13328,153 +16166,6 @@ definitions: repeated Bar results = 1; PageResponse page = 2; } - description: >- - QueryFinalizedChainInfoResponse is response type for the - Query/FinalizedChainInfo RPC method. - babylon.zoneconcierge.v1.QueryListHeadersResponse: - type: object - properties: - headers: - type: array - items: - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the header on CZ - ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that includes - this CZ header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a - block in the blockchain, - - including all blockchain data structures and the rules of - the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this header - - (babylon_block_height, babylon_tx_hash) jointly provides the - position of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - title: headers is the list of headers - pagination: - title: pagination defines the pagination in the response - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } description: >- QueryListHeadersResponse is response type for the Query/ListHeaders RPC method. diff --git a/proto/babylon/checkpointing/checkpoint.proto b/proto/babylon/checkpointing/checkpoint.proto index e314c5d07..ccbac63c4 100644 --- a/proto/babylon/checkpointing/checkpoint.proto +++ b/proto/babylon/checkpointing/checkpoint.proto @@ -1,6 +1,7 @@ syntax = "proto3"; package babylon.checkpointing.v1; +import "google/protobuf/timestamp.proto"; import "cosmos_proto/cosmos.proto"; import "gogoproto/gogo.proto"; @@ -37,6 +38,9 @@ message RawCheckpointWithMeta { ]; // power_sum defines the accumulated voting power for the checkpoint uint64 power_sum = 4; + // lifecycle defines the lifecycle of this checkpoint, i.e., each state transition and + // the time (in both timestamp and block height) of this transition. + repeated CheckpointStateUpdate lifecycle = 5; } // CkptStatus is the status of a checkpoint. @@ -55,6 +59,17 @@ enum CheckpointStatus { CKPT_STATUS_FINALIZED = 4 [(gogoproto.enumvalue_customname) = "Finalized"]; } +message CheckpointStateUpdate { + option (gogoproto.equal) = true; + + // state defines the event of a state transition towards this state + CheckpointStatus state = 1; + // block_height is the height of the Babylon block that triggers the state update + uint64 block_height = 2; + // block_time is the timestamp in the Babylon block that triggers the state update + google.protobuf.Timestamp block_time = 3 [(gogoproto.stdtime) = true]; +} + // BlsSig wraps the BLS sig with meta data. message BlsSig { option (gogoproto.equal) = false; diff --git a/testutil/datagen/tendermint.go b/testutil/datagen/tendermint.go index 722b2b5d5..0a1193d31 100644 --- a/testutil/datagen/tendermint.go +++ b/testutil/datagen/tendermint.go @@ -1,6 +1,8 @@ package datagen import ( + "time" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -9,6 +11,7 @@ func GenRandomTMHeader(chainID string, height uint64) *tmproto.Header { return &tmproto.Header{ ChainID: chainID, Height: int64(height), + Time: time.Now(), LastCommitHash: GenRandomByteArray(32), } } diff --git a/x/checkpointing/keeper/hooks.go b/x/checkpointing/keeper/hooks.go index 730c0d176..fca60aba1 100644 --- a/x/checkpointing/keeper/hooks.go +++ b/x/checkpointing/keeper/hooks.go @@ -16,7 +16,7 @@ func (k Keeper) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) e return nil } -// AfterRawCheckpointConfirmed - call hook if registered +// AfterRawCheckpointConfirmed - call hook if the checkpoint is confirmed func (k Keeper) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { if k.hooks != nil { return k.hooks.AfterRawCheckpointConfirmed(ctx, epoch) @@ -24,7 +24,7 @@ func (k Keeper) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error return nil } -// AfterRawCheckpointFinalized - call hook if registered +// AfterRawCheckpointFinalized - call hook if the checkpoint is finalized func (k Keeper) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error { if k.hooks != nil { return k.hooks.AfterRawCheckpointFinalized(ctx, epoch) diff --git a/x/checkpointing/keeper/keeper.go b/x/checkpointing/keeper/keeper.go index cc12155db..ac74100da 100644 --- a/x/checkpointing/keeper/keeper.go +++ b/x/checkpointing/keeper/keeper.go @@ -109,29 +109,31 @@ func (k Keeper) addBlsSig(ctx sdk.Context, sig *types.BlsSig) error { } // accumulate BLS signatures - updated, err := ckptWithMeta.Accumulate( - vals, signerAddr, signerBlsKey, *sig.BlsSig, k.GetTotalVotingPower(ctx, sig.GetEpochNum())) + err = ckptWithMeta.Accumulate(vals, signerAddr, signerBlsKey, *sig.BlsSig, k.GetTotalVotingPower(ctx, sig.GetEpochNum())) if err != nil { return err } - if updated { - err = k.UpdateCheckpoint(ctx, ckptWithMeta) - } - if err != nil { - return err - } - - if updated && ckptWithMeta.Status == types.Sealed { + if ckptWithMeta.Status == types.Sealed { + // emit event err = ctx.EventManager().EmitTypedEvent( &types.EventCheckpointSealed{Checkpoint: ckptWithMeta}, ) if err != nil { ctx.Logger().Error("failed to emit checkpoint sealed event for epoch %v", ckptWithMeta.Ckpt.EpochNum) } + // record state update of Sealed + ckptWithMeta.RecordStateUpdate(ctx, types.Sealed) + // log in console ctx.Logger().Info(fmt.Sprintf("Checkpointing: checkpoint for epoch %v is Sealed", ckptWithMeta.Ckpt.EpochNum)) } + // if reaching this line, it means ckptWithMeta is updated, + // and we need to write the updated ckptWithMeta back to KVStore + if err := k.UpdateCheckpoint(ctx, ckptWithMeta); err != nil { + return err + } + return nil } @@ -154,6 +156,7 @@ func (k Keeper) AddRawCheckpoint(ctx sdk.Context, ckptWithMeta *types.RawCheckpo func (k Keeper) BuildRawCheckpoint(ctx sdk.Context, epochNum uint64, lch types.LastCommitHash) (*types.RawCheckpointWithMeta, error) { ckptWithMeta := types.NewCheckpointWithMeta(types.NewCheckpoint(epochNum, lch), types.Accumulating) + ckptWithMeta.RecordStateUpdate(ctx, types.Accumulating) // record the state update of Accumulating err := k.AddRawCheckpoint(ctx, ckptWithMeta) if err != nil { return nil, err @@ -255,7 +258,8 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.Ra return nil, types.ErrConflictingCheckpoint } -// SetCheckpointSubmitted sets the status of a checkpoint to SUBMITTED +// SetCheckpointSubmitted sets the status of a checkpoint to SUBMITTED, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointSubmitted(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Sealed, types.Submitted) err := ctx.EventManager().EmitTypedEvent( @@ -266,7 +270,8 @@ func (k Keeper) SetCheckpointSubmitted(ctx sdk.Context, epoch uint64) { } } -// SetCheckpointConfirmed sets the status of a checkpoint to CONFIRMED +// SetCheckpointConfirmed sets the status of a checkpoint to CONFIRMED, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointConfirmed(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Submitted, types.Confirmed) err := ctx.EventManager().EmitTypedEvent( @@ -281,7 +286,8 @@ func (k Keeper) SetCheckpointConfirmed(ctx sdk.Context, epoch uint64) { } } -// SetCheckpointFinalized sets the status of a checkpoint to FINALIZED +// SetCheckpointFinalized sets the status of a checkpoint to FINALIZED, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointFinalized(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Confirmed, types.Finalized) err := ctx.EventManager().EmitTypedEvent( @@ -296,6 +302,8 @@ func (k Keeper) SetCheckpointFinalized(ctx sdk.Context, epoch uint64) { } } +// SetCheckpointForgotten rolls back the status of a checkpoint to Sealed, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointForgotten(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Submitted, types.Sealed) err := ctx.EventManager().EmitTypedEvent( @@ -306,6 +314,8 @@ func (k Keeper) SetCheckpointForgotten(ctx sdk.Context, epoch uint64) { } } +// setCheckpointStatus sets a ckptWithMeta to the given state, +// and records the state update in its lifecycle func (k Keeper) setCheckpointStatus(ctx sdk.Context, epoch uint64, from types.CheckpointStatus, to types.CheckpointStatus) *types.RawCheckpointWithMeta { ckptWithMeta, err := k.GetRawCheckpoint(ctx, epoch) if err != nil { @@ -319,8 +329,9 @@ func (k Keeper) setCheckpointStatus(ctx sdk.Context, epoch uint64, from types.Ch return nil } } - ckptWithMeta.Status = to - err = k.UpdateCheckpoint(ctx, ckptWithMeta) + ckptWithMeta.Status = to // set status + ckptWithMeta.RecordStateUpdate(ctx, to) // record state update to the lifecycle + err = k.UpdateCheckpoint(ctx, ckptWithMeta) // write back to KVStore if err != nil { panic("failed to update checkpoint status") } diff --git a/x/checkpointing/keeper/keeper_test.go b/x/checkpointing/keeper/keeper_test.go index 70fd5430a..ac49fc157 100644 --- a/x/checkpointing/keeper/keeper_test.go +++ b/x/checkpointing/keeper/keeper_test.go @@ -1,12 +1,13 @@ package keeper_test import ( + "math/rand" + "testing" + "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/crypto/bls12381" "github.com/boljen/go-bitmap" sdk "github.com/cosmos/cosmos-sdk/types" - "math/rand" - "testing" "github.com/babylonchain/babylon/testutil/datagen" testkeeper "github.com/babylonchain/babylon/testutil/keeper" @@ -68,37 +69,86 @@ func FuzzKeeperSetCheckpointStatus(f *testing.F) { ek := mocks.NewMockEpochingKeeper(ctrl) ckptKeeper, ctx, _ := testkeeper.CheckpointingKeeper(t, ek, nil, client.Context{}) + /* new accumulating checkpoint*/ mockCkptWithMeta := datagen.GenRandomRawCheckpointWithMeta() mockCkptWithMeta.Status = types.Accumulating + mockCkptWithMeta.RecordStateUpdate(ctx, types.Accumulating) epoch := mockCkptWithMeta.Ckpt.EpochNum + require.Len(t, mockCkptWithMeta.Lifecycle, 1) + require.Equal(t, curStateUpdate(ctx, types.Accumulating), mockCkptWithMeta.Lifecycle[0]) - _ = ckptKeeper.AddRawCheckpoint( + err := ckptKeeper.AddRawCheckpoint( ctx, mockCkptWithMeta, ) + require.NoError(t, err) + + /* incorrect state transition of a checkpoint */ + // ensure status and lifecycle from an incorrect state transition + // will not be recorded ckptKeeper.SetCheckpointSubmitted(ctx, epoch) status, err := ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Accumulating, status) + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 1) + require.Equal(t, curStateUpdate(ctx, types.Accumulating), mockCkptWithMeta.Lifecycle[0]) + + /* Accumulating -> Sealed */ + ctx = updateRandomCtx(ctx) mockCkptWithMeta.Status = types.Sealed + mockCkptWithMeta.RecordStateUpdate(ctx, types.Sealed) err = ckptKeeper.UpdateCheckpoint(ctx, mockCkptWithMeta) require.NoError(t, err) + // ensure status is updated + status, err = ckptKeeper.GetStatus(ctx, epoch) + require.NoError(t, err) + require.Equal(t, types.Sealed, status) + // ensure state update of Sealed is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 2) + require.Equal(t, curStateUpdate(ctx, types.Sealed), mockCkptWithMeta.Lifecycle[1]) + + /* Sealed -> Submitted */ + ctx = updateRandomCtx(ctx) ckptKeeper.SetCheckpointSubmitted(ctx, epoch) + // ensure status is updated status, err = ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Submitted, status) - ckptKeeper.SetCheckpointConfirmed(ctx, epoch) - status, err = ckptKeeper.GetStatus(ctx, epoch) + // ensure state update of Submitted is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) require.NoError(t, err) - require.Equal(t, types.Confirmed, status) + require.Len(t, mockCkptWithMeta.Lifecycle, 3) + require.Equal(t, curStateUpdate(ctx, types.Submitted), mockCkptWithMeta.Lifecycle[2]) + + /* Submitted -> Confirmed */ + ctx = updateRandomCtx(ctx) ckptKeeper.SetCheckpointConfirmed(ctx, epoch) + // ensure status is updated status, err = ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Confirmed, status) + // ensure state update of Confirmed is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 4) + require.Equal(t, curStateUpdate(ctx, types.Confirmed), mockCkptWithMeta.Lifecycle[3]) + + /* Confirmed -> Finalized */ + ctx = updateRandomCtx(ctx) ckptKeeper.SetCheckpointFinalized(ctx, epoch) + // ensure status is updated status, err = ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Finalized, status) + // ensure state update of Finalized is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 5) + require.Equal(t, curStateUpdate(ctx, types.Finalized), mockCkptWithMeta.Lifecycle[4]) }) } @@ -203,3 +253,17 @@ func makeBtcCkptBytes(epoch uint64, lch []byte, bitmap []byte, blsSig []byte, t return ckptData } + +func curStateUpdate(ctx sdk.Context, status types.CheckpointStatus) *types.CheckpointStateUpdate { + height, time := ctx.BlockHeight(), ctx.BlockTime() + return &types.CheckpointStateUpdate{ + State: status, + BlockHeight: uint64(height), + BlockTime: &time, + } +} + +func updateRandomCtx(ctx sdk.Context) sdk.Context { + header := datagen.GenRandomTMHeader("test", datagen.RandomInt(1000)) + return ctx.WithBlockHeader(*header) +} diff --git a/x/checkpointing/types/checkpoint.pb.go b/x/checkpointing/types/checkpoint.pb.go index 491364660..7754a8e5b 100644 --- a/x/checkpointing/types/checkpoint.pb.go +++ b/x/checkpointing/types/checkpoint.pb.go @@ -10,15 +10,19 @@ import ( _ "github.com/cosmos/cosmos-proto" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "google.golang.org/protobuf/types/known/timestamppb" io "io" math "math" math_bits "math/bits" + time "time" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -134,6 +138,9 @@ type RawCheckpointWithMeta struct { BlsAggrPk *github_com_babylonchain_babylon_crypto_bls12381.PublicKey `protobuf:"bytes,3,opt,name=bls_aggr_pk,json=blsAggrPk,proto3,customtype=github.com/babylonchain/babylon/crypto/bls12381.PublicKey" json:"bls_aggr_pk,omitempty"` // power_sum defines the accumulated voting power for the checkpoint PowerSum uint64 `protobuf:"varint,4,opt,name=power_sum,json=powerSum,proto3" json:"power_sum,omitempty"` + // lifecycle defines the lifecycle of this checkpoint, i.e., each state transition and + // the time (in both timestamp and block height) of this transition. + Lifecycle []*CheckpointStateUpdate `protobuf:"bytes,5,rep,name=lifecycle,proto3" json:"lifecycle,omitempty"` } func (m *RawCheckpointWithMeta) Reset() { *m = RawCheckpointWithMeta{} } @@ -190,6 +197,76 @@ func (m *RawCheckpointWithMeta) GetPowerSum() uint64 { return 0 } +func (m *RawCheckpointWithMeta) GetLifecycle() []*CheckpointStateUpdate { + if m != nil { + return m.Lifecycle + } + return nil +} + +type CheckpointStateUpdate struct { + // state defines the event of a state transition towards this state + State CheckpointStatus `protobuf:"varint,1,opt,name=state,proto3,enum=babylon.checkpointing.v1.CheckpointStatus" json:"state,omitempty"` + // block_height is the height of the Babylon block that triggers the state update + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + // block_time is the timestamp in the Babylon block that triggers the state update + BlockTime *time.Time `protobuf:"bytes,3,opt,name=block_time,json=blockTime,proto3,stdtime" json:"block_time,omitempty"` +} + +func (m *CheckpointStateUpdate) Reset() { *m = CheckpointStateUpdate{} } +func (m *CheckpointStateUpdate) String() string { return proto.CompactTextString(m) } +func (*CheckpointStateUpdate) ProtoMessage() {} +func (*CheckpointStateUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_63ff05f0a47b36f7, []int{2} +} +func (m *CheckpointStateUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckpointStateUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckpointStateUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckpointStateUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckpointStateUpdate.Merge(m, src) +} +func (m *CheckpointStateUpdate) XXX_Size() int { + return m.Size() +} +func (m *CheckpointStateUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_CheckpointStateUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckpointStateUpdate proto.InternalMessageInfo + +func (m *CheckpointStateUpdate) GetState() CheckpointStatus { + if m != nil { + return m.State + } + return Accumulating +} + +func (m *CheckpointStateUpdate) GetBlockHeight() uint64 { + if m != nil { + return m.BlockHeight + } + return 0 +} + +func (m *CheckpointStateUpdate) GetBlockTime() *time.Time { + if m != nil { + return m.BlockTime + } + return nil +} + // BlsSig wraps the BLS sig with meta data. type BlsSig struct { // epoch_num defines the epoch number that the BLS sig is signed on @@ -207,7 +284,7 @@ func (m *BlsSig) Reset() { *m = BlsSig{} } func (m *BlsSig) String() string { return proto.CompactTextString(m) } func (*BlsSig) ProtoMessage() {} func (*BlsSig) Descriptor() ([]byte, []int) { - return fileDescriptor_63ff05f0a47b36f7, []int{2} + return fileDescriptor_63ff05f0a47b36f7, []int{3} } func (m *BlsSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,6 +331,7 @@ func init() { proto.RegisterEnum("babylon.checkpointing.v1.CheckpointStatus", CheckpointStatus_name, CheckpointStatus_value) proto.RegisterType((*RawCheckpoint)(nil), "babylon.checkpointing.v1.RawCheckpoint") proto.RegisterType((*RawCheckpointWithMeta)(nil), "babylon.checkpointing.v1.RawCheckpointWithMeta") + proto.RegisterType((*CheckpointStateUpdate)(nil), "babylon.checkpointing.v1.CheckpointStateUpdate") proto.RegisterType((*BlsSig)(nil), "babylon.checkpointing.v1.BlsSig") } @@ -262,47 +340,56 @@ func init() { } var fileDescriptor_63ff05f0a47b36f7 = []byte{ - // 640 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x5f, 0x6b, 0xd3, 0x50, - 0x1c, 0x6d, 0xb6, 0x52, 0xd7, 0xbb, 0xb5, 0x84, 0xe0, 0xa4, 0x56, 0xc8, 0xca, 0x40, 0x1d, 0x7b, - 0x48, 0xd9, 0x86, 0xe0, 0xdf, 0x87, 0x34, 0xed, 0xb4, 0xac, 0xed, 0x46, 0xd2, 0x2a, 0x0c, 0x24, - 0xdc, 0xa4, 0x31, 0xb9, 0xf4, 0xde, 0xdc, 0x90, 0x7b, 0xe3, 0xac, 0x9f, 0x40, 0xf6, 0xe4, 0xb3, - 0x30, 0x10, 0xfc, 0x32, 0x3e, 0xee, 0x51, 0xf6, 0x30, 0x64, 0x7b, 0x19, 0xfa, 0x25, 0x24, 0x37, - 0x65, 0xae, 0x1b, 0x43, 0x10, 0x7d, 0xcb, 0xef, 0xe4, 0x9c, 0x1f, 0xf7, 0x9c, 0x73, 0xb9, 0xe0, - 0x9e, 0x03, 0x9d, 0x31, 0xa6, 0x61, 0xdd, 0x0d, 0x3c, 0x77, 0x14, 0x51, 0x14, 0x72, 0x14, 0xfa, - 0x17, 0x26, 0x2d, 0x8a, 0x29, 0xa7, 0x4a, 0x65, 0xc2, 0xd3, 0xa6, 0x78, 0xda, 0xdb, 0xb5, 0xea, - 0x6d, 0x97, 0x32, 0x42, 0x99, 0x2d, 0x78, 0xf5, 0x6c, 0xc8, 0x44, 0xd5, 0x9b, 0x3e, 0xf5, 0x69, - 0x86, 0xa7, 0x5f, 0x19, 0xba, 0xfc, 0x53, 0x02, 0x25, 0x13, 0xee, 0x19, 0xe7, 0x8b, 0x94, 0x3b, - 0xa0, 0xe8, 0x45, 0xd4, 0x0d, 0xec, 0x30, 0x21, 0x15, 0xa9, 0x26, 0xad, 0xe4, 0xcd, 0x39, 0x01, - 0xf4, 0x12, 0xa2, 0x3c, 0x05, 0x32, 0x86, 0x8c, 0xdb, 0x2e, 0x25, 0x04, 0x71, 0x3b, 0x80, 0x2c, - 0xa8, 0xcc, 0xd4, 0xa4, 0x95, 0x85, 0x86, 0x72, 0x74, 0xbc, 0x54, 0xee, 0x40, 0xc6, 0x0d, 0xf1, - 0xeb, 0x05, 0x64, 0x81, 0x59, 0xc6, 0x53, 0xb3, 0x72, 0x0b, 0x14, 0x1c, 0xc4, 0x09, 0x8c, 0x2a, - 0xb3, 0xa9, 0xc6, 0x9c, 0x4c, 0x0a, 0x04, 0x25, 0x07, 0x33, 0x9b, 0x24, 0x98, 0x23, 0x9b, 0x21, - 0xbf, 0x92, 0x17, 0x2b, 0x9f, 0x1d, 0x1d, 0x2f, 0x3d, 0xf2, 0x11, 0x0f, 0x12, 0x47, 0x73, 0x29, - 0xa9, 0x4f, 0x5c, 0xbb, 0x01, 0x44, 0x61, 0xfd, 0x3c, 0xaa, 0x78, 0x1c, 0x71, 0x5a, 0x77, 0x30, - 0x5b, 0x5b, 0xdf, 0x78, 0xb8, 0xa6, 0x59, 0xc8, 0x0f, 0x21, 0x4f, 0x62, 0xcf, 0x9c, 0x77, 0x30, - 0xeb, 0xa6, 0x2b, 0x2d, 0xe4, 0x3f, 0xce, 0x9f, 0x7d, 0x5e, 0x92, 0x96, 0x3f, 0xcd, 0x80, 0xc5, - 0x29, 0xb7, 0xaf, 0x10, 0x0f, 0xba, 0x1e, 0x87, 0xca, 0x13, 0x90, 0x77, 0x47, 0x11, 0x17, 0x86, - 0xe7, 0xd7, 0xef, 0x6b, 0xd7, 0x25, 0xac, 0x4d, 0xc9, 0x4d, 0x21, 0x52, 0x1a, 0xa0, 0xc0, 0x38, - 0xe4, 0x09, 0x13, 0x59, 0x94, 0xd7, 0x57, 0xaf, 0x97, 0xff, 0xd6, 0x5a, 0x42, 0x61, 0x4e, 0x94, - 0xca, 0x6b, 0x90, 0x9e, 0xd7, 0x86, 0xbe, 0x1f, 0xdb, 0xd1, 0x28, 0x0b, 0xe8, 0xef, 0x12, 0xd8, - 0x49, 0x1c, 0x8c, 0xdc, 0x2d, 0x6f, 0x6c, 0x16, 0x1d, 0xcc, 0x74, 0xdf, 0x8f, 0x77, 0x46, 0x69, - 0xab, 0x11, 0xdd, 0xf3, 0x62, 0x9b, 0x25, 0x44, 0xc4, 0x9b, 0x37, 0xe7, 0x04, 0x60, 0x25, 0x64, - 0x12, 0xce, 0x99, 0x04, 0x0a, 0x0d, 0xcc, 0x2c, 0xe4, 0xff, 0xcf, 0x3b, 0xf0, 0x12, 0xdc, 0x48, - 0x7d, 0xa6, 0x2d, 0xcf, 0xfe, 0x8b, 0x96, 0x0b, 0x4e, 0x76, 0xe4, 0xbb, 0xa0, 0xcc, 0x90, 0x1f, - 0x7a, 0xb1, 0x0d, 0x87, 0xc3, 0xd8, 0x63, 0x4c, 0xb8, 0x2c, 0x9a, 0xa5, 0x0c, 0xd5, 0x33, 0x50, - 0x58, 0xcd, 0xad, 0xfe, 0x90, 0x80, 0x7c, 0xb9, 0x09, 0x45, 0x03, 0x15, 0x63, 0x6b, 0xa7, 0x6f, - 0x5b, 0x7d, 0xbd, 0x3f, 0xb0, 0x6c, 0xdd, 0x30, 0x06, 0xdd, 0x41, 0x47, 0xef, 0xb7, 0x7b, 0xcf, - 0xe5, 0x5c, 0x55, 0xde, 0x3f, 0xa8, 0x2d, 0xe8, 0xae, 0x9b, 0x90, 0x04, 0xc3, 0xb4, 0x4d, 0x65, - 0x19, 0x28, 0x17, 0xf9, 0x56, 0x4b, 0xef, 0xb4, 0x9a, 0xb2, 0x54, 0x05, 0xfb, 0x07, 0xb5, 0x82, - 0xe5, 0x41, 0xec, 0x0d, 0x95, 0x15, 0xb0, 0x38, 0xc5, 0x19, 0x34, 0xba, 0xed, 0x7e, 0xbf, 0xd5, - 0x94, 0x67, 0xaa, 0xa5, 0xfd, 0x83, 0x5a, 0xd1, 0x4a, 0x1c, 0x82, 0x38, 0xbf, 0xca, 0x34, 0xb6, - 0x7b, 0x9b, 0x6d, 0xb3, 0xdb, 0x6a, 0xca, 0xb3, 0x19, 0xd3, 0xa0, 0xe1, 0x1b, 0x14, 0x93, 0xab, - 0xcc, 0xcd, 0x76, 0x4f, 0xef, 0xb4, 0x77, 0x5b, 0x4d, 0x39, 0x9f, 0x31, 0x37, 0x51, 0x08, 0x31, - 0x7a, 0xef, 0x0d, 0xab, 0xf9, 0x0f, 0x5f, 0xd4, 0x5c, 0x63, 0xfb, 0xeb, 0x89, 0x2a, 0x1d, 0x9e, - 0xa8, 0xd2, 0xf7, 0x13, 0x55, 0xfa, 0x78, 0xaa, 0xe6, 0x0e, 0x4f, 0xd5, 0xdc, 0xb7, 0x53, 0x35, - 0xb7, 0xfb, 0xe0, 0x4f, 0xb1, 0xbf, 0xbb, 0xf4, 0x12, 0xf1, 0x71, 0xe4, 0x31, 0xa7, 0x20, 0x9e, - 0x8e, 0x8d, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x0d, 0xb7, 0x12, 0xaf, 0x04, 0x00, 0x00, + // 773 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0xdb, 0x8e, 0xdb, 0x44, + 0x18, 0xc7, 0xe3, 0x5d, 0x37, 0x34, 0xb3, 0x07, 0x59, 0x23, 0x16, 0x85, 0x20, 0x25, 0x61, 0x25, + 0x20, 0xea, 0x85, 0xad, 0x4d, 0x85, 0xc4, 0x51, 0xe0, 0x1c, 0x96, 0x46, 0x4d, 0xd2, 0x95, 0x9d, + 0x80, 0x54, 0x09, 0x59, 0xe3, 0xc9, 0xec, 0x78, 0x94, 0xb1, 0xc7, 0xf2, 0x8c, 0x29, 0xe1, 0x09, + 0xd0, 0x5e, 0xf5, 0x05, 0x56, 0x42, 0xe2, 0x51, 0xb8, 0xe1, 0xb2, 0x97, 0xa8, 0x17, 0x05, 0xed, + 0xde, 0x14, 0x78, 0x09, 0xe4, 0x71, 0x7a, 0x48, 0xcb, 0x8a, 0x83, 0xe0, 0x2e, 0xdf, 0xdf, 0xff, + 0xff, 0xc4, 0xdf, 0xef, 0x9b, 0xcf, 0xe0, 0xed, 0x10, 0x85, 0x2b, 0x2e, 0x12, 0x07, 0x47, 0x04, + 0x2f, 0x53, 0xc1, 0x12, 0xc5, 0x12, 0xfa, 0x5c, 0x65, 0xa7, 0x99, 0x50, 0x02, 0xd6, 0xd7, 0x3e, + 0x7b, 0xc3, 0x67, 0x7f, 0x75, 0xd4, 0x68, 0x51, 0x21, 0x28, 0x27, 0x8e, 0xf6, 0x85, 0xf9, 0xa9, + 0xa3, 0x58, 0x4c, 0xa4, 0x42, 0x71, 0x5a, 0x46, 0x1b, 0xaf, 0x63, 0x21, 0x63, 0x21, 0x03, 0x5d, + 0x39, 0x65, 0xb1, 0x7e, 0xf4, 0x2a, 0x15, 0x54, 0x94, 0x7a, 0xf1, 0xab, 0x54, 0x0f, 0x7f, 0x37, + 0xc0, 0x9e, 0x87, 0xee, 0xf5, 0x9f, 0xfe, 0x13, 0x7c, 0x03, 0xd4, 0x48, 0x2a, 0x70, 0x14, 0x24, + 0x79, 0x5c, 0x37, 0xda, 0x46, 0xc7, 0xf4, 0xae, 0x6b, 0x61, 0x9a, 0xc7, 0xf0, 0x23, 0x60, 0x71, + 0x24, 0x55, 0x80, 0x45, 0x1c, 0x33, 0x15, 0x44, 0x48, 0x46, 0xf5, 0xad, 0xb6, 0xd1, 0xd9, 0xed, + 0xc1, 0x87, 0x8f, 0x5a, 0xfb, 0x63, 0x24, 0x55, 0x5f, 0x3f, 0xba, 0x85, 0x64, 0xe4, 0xed, 0xf3, + 0x8d, 0x1a, 0xbe, 0x06, 0xaa, 0x21, 0x53, 0x31, 0x4a, 0xeb, 0xdb, 0x45, 0xc6, 0x5b, 0x57, 0x10, + 0x81, 0xbd, 0x90, 0xcb, 0x20, 0xce, 0xb9, 0x62, 0x81, 0x64, 0xb4, 0x6e, 0xea, 0x23, 0x3f, 0x7e, + 0xf8, 0xa8, 0xf5, 0x3e, 0x65, 0x2a, 0xca, 0x43, 0x1b, 0x8b, 0xd8, 0x59, 0x63, 0xc1, 0x11, 0x62, + 0x89, 0xf3, 0x94, 0x65, 0xb6, 0x4a, 0x95, 0x70, 0x42, 0x2e, 0x8f, 0xba, 0x37, 0xdf, 0x3b, 0xb2, + 0x7d, 0x46, 0x13, 0xa4, 0xf2, 0x8c, 0x78, 0x3b, 0x21, 0x97, 0x93, 0xe2, 0x48, 0x9f, 0xd1, 0x0f, + 0xcc, 0xc7, 0xdf, 0xb5, 0x8c, 0xc3, 0x5f, 0xb7, 0xc0, 0xc1, 0x46, 0xb7, 0x5f, 0x30, 0x15, 0x4d, + 0x88, 0x42, 0xf0, 0x43, 0x60, 0xe2, 0x65, 0xaa, 0x74, 0xc3, 0x3b, 0xdd, 0x77, 0xec, 0xab, 0x46, + 0x60, 0x6f, 0xc4, 0x3d, 0x1d, 0x82, 0x3d, 0x50, 0x95, 0x0a, 0xa9, 0x5c, 0x6a, 0x16, 0xfb, 0xdd, + 0x1b, 0x57, 0xc7, 0x9f, 0x65, 0x7d, 0x9d, 0xf0, 0xd6, 0x49, 0xf8, 0x25, 0x28, 0xde, 0x37, 0x40, + 0x94, 0x66, 0x41, 0xba, 0x2c, 0x01, 0xfd, 0x3b, 0x02, 0x27, 0x79, 0xc8, 0x19, 0xbe, 0x4d, 0x56, + 0x5e, 0x2d, 0xe4, 0xd2, 0xa5, 0x34, 0x3b, 0x59, 0x16, 0x53, 0x4d, 0xc5, 0x3d, 0x92, 0x05, 0x32, + 0x8f, 0x35, 0x5e, 0xd3, 0xbb, 0xae, 0x05, 0x3f, 0x8f, 0xe1, 0x04, 0xd4, 0x38, 0x3b, 0x25, 0x78, + 0x85, 0x39, 0xa9, 0x5f, 0x6b, 0x6f, 0x77, 0x76, 0xba, 0xce, 0xdf, 0x6d, 0x81, 0xcc, 0xd3, 0x05, + 0x52, 0xc4, 0x7b, 0x76, 0xc2, 0x9a, 0xf5, 0x0f, 0x06, 0x38, 0xf8, 0x53, 0x2b, 0xfc, 0x14, 0x5c, + 0x2b, 0x9a, 0x26, 0x1a, 0xf6, 0x3f, 0xa3, 0x55, 0x06, 0xe1, 0x9b, 0x60, 0x37, 0xe4, 0x02, 0x2f, + 0x83, 0x88, 0x30, 0x1a, 0x29, 0x8d, 0xdd, 0x2c, 0x06, 0x2e, 0xf0, 0xf2, 0x96, 0x96, 0xe0, 0x27, + 0x00, 0x94, 0x96, 0x62, 0x45, 0x34, 0xce, 0x9d, 0x6e, 0xc3, 0x2e, 0xf7, 0xc7, 0x7e, 0xb2, 0x3f, + 0xf6, 0xec, 0xc9, 0xfe, 0xf4, 0xcc, 0xfb, 0x3f, 0xb7, 0x8c, 0x82, 0x98, 0xc0, 0xcb, 0x42, 0x5d, + 0x77, 0xf1, 0xd8, 0x00, 0xd5, 0x1e, 0x97, 0x3e, 0xa3, 0xff, 0xe7, 0x62, 0x7c, 0x0e, 0x5e, 0x29, + 0x86, 0x5f, 0x5c, 0xfd, 0xed, 0xff, 0xe2, 0xea, 0x57, 0xc3, 0xf2, 0x95, 0xdf, 0x02, 0xfb, 0x92, + 0xd1, 0x84, 0x64, 0x01, 0x5a, 0x2c, 0x32, 0x22, 0xa5, 0x1e, 0x7d, 0xcd, 0xdb, 0x2b, 0x55, 0xb7, + 0x14, 0x75, 0xab, 0x95, 0x1b, 0xbf, 0x19, 0xc0, 0x7a, 0x11, 0x38, 0xb4, 0x41, 0xbd, 0x7f, 0xfb, + 0x64, 0x16, 0xf8, 0x33, 0x77, 0x36, 0xf7, 0x03, 0xb7, 0xdf, 0x9f, 0x4f, 0xe6, 0x63, 0x77, 0x36, + 0x9a, 0x7e, 0x66, 0x55, 0x1a, 0xd6, 0xd9, 0x79, 0x7b, 0xd7, 0xc5, 0x38, 0x8f, 0x73, 0x8e, 0x8a, + 0xa1, 0xc1, 0x43, 0x00, 0x9f, 0xf7, 0xfb, 0x43, 0x77, 0x3c, 0x1c, 0x58, 0x46, 0x03, 0x9c, 0x9d, + 0xb7, 0xab, 0x3e, 0x41, 0x9c, 0x2c, 0x60, 0x07, 0x1c, 0x6c, 0x78, 0xe6, 0xbd, 0xc9, 0x68, 0x36, + 0x1b, 0x0e, 0xac, 0xad, 0xc6, 0xde, 0xd9, 0x79, 0xbb, 0xe6, 0xe7, 0x61, 0xcc, 0x94, 0x7a, 0xd9, + 0xd9, 0xbf, 0x33, 0x3d, 0x1e, 0x79, 0x93, 0xe1, 0xc0, 0xda, 0x2e, 0x9d, 0x7d, 0x91, 0x9c, 0xb2, + 0x2c, 0x7e, 0xd9, 0x79, 0x3c, 0x9a, 0xba, 0xe3, 0xd1, 0xdd, 0xe1, 0xc0, 0x32, 0x4b, 0xe7, 0x31, + 0x4b, 0x10, 0x67, 0xdf, 0x90, 0x45, 0xc3, 0xfc, 0xf6, 0xfb, 0x66, 0xa5, 0x77, 0xe7, 0xc7, 0x8b, + 0xa6, 0xf1, 0xe0, 0xa2, 0x69, 0xfc, 0x72, 0xd1, 0x34, 0xee, 0x5f, 0x36, 0x2b, 0x0f, 0x2e, 0x9b, + 0x95, 0x9f, 0x2e, 0x9b, 0x95, 0xbb, 0xef, 0xfe, 0x15, 0xf6, 0xaf, 0x5f, 0xf8, 0x7e, 0xab, 0x55, + 0x4a, 0x64, 0x58, 0xd5, 0x77, 0xea, 0xe6, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xd0, 0xbc, + 0xe5, 0xe5, 0x05, 0x00, 0x00, } func (this *RawCheckpoint) Equal(that interface{}) bool { @@ -381,6 +468,48 @@ func (this *RawCheckpointWithMeta) Equal(that interface{}) bool { if this.PowerSum != that1.PowerSum { return false } + if len(this.Lifecycle) != len(that1.Lifecycle) { + return false + } + for i := range this.Lifecycle { + if !this.Lifecycle[i].Equal(that1.Lifecycle[i]) { + return false + } + } + return true +} +func (this *CheckpointStateUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CheckpointStateUpdate) + if !ok { + that2, ok := that.(CheckpointStateUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.State != that1.State { + return false + } + if this.BlockHeight != that1.BlockHeight { + return false + } + if that1.BlockTime == nil { + if this.BlockTime != nil { + return false + } + } else if !this.BlockTime.Equal(*that1.BlockTime) { + return false + } return true } func (m *RawCheckpoint) Marshal() (dAtA []byte, err error) { @@ -462,6 +591,20 @@ func (m *RawCheckpointWithMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Lifecycle) > 0 { + for iNdEx := len(m.Lifecycle) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Lifecycle[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckpoint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if m.PowerSum != 0 { i = encodeVarintCheckpoint(dAtA, i, uint64(m.PowerSum)) i-- @@ -499,6 +642,49 @@ func (m *RawCheckpointWithMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CheckpointStateUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointStateUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckpointStateUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockTime != nil { + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.BlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.BlockTime):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintCheckpoint(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + } + if m.BlockHeight != 0 { + i = encodeVarintCheckpoint(dAtA, i, uint64(m.BlockHeight)) + i-- + dAtA[i] = 0x10 + } + if m.State != 0 { + i = encodeVarintCheckpoint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *BlsSig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -613,6 +799,31 @@ func (m *RawCheckpointWithMeta) Size() (n int) { if m.PowerSum != 0 { n += 1 + sovCheckpoint(uint64(m.PowerSum)) } + if len(m.Lifecycle) > 0 { + for _, e := range m.Lifecycle { + l = e.Size() + n += 1 + l + sovCheckpoint(uint64(l)) + } + } + return n +} + +func (m *CheckpointStateUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovCheckpoint(uint64(m.State)) + } + if m.BlockHeight != 0 { + n += 1 + sovCheckpoint(uint64(m.BlockHeight)) + } + if m.BlockTime != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.BlockTime) + n += 1 + l + sovCheckpoint(uint64(l)) + } return n } @@ -957,6 +1168,164 @@ func (m *RawCheckpointWithMeta) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Lifecycle = append(m.Lifecycle, &CheckpointStateUpdate{}) + if err := m.Lifecycle[len(m.Lifecycle)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointStateUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointStateUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointStateUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= CheckpointStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) + } + m.BlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockTime == nil { + m.BlockTime = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.BlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipCheckpoint(dAtA[iNdEx:]) diff --git a/x/checkpointing/types/types.go b/x/checkpointing/types/types.go index 6b3896e29..25bc56288 100644 --- a/x/checkpointing/types/types.go +++ b/x/checkpointing/types/types.go @@ -36,8 +36,9 @@ func NewCheckpoint(epochNum uint64, lch LastCommitHash) *RawCheckpoint { func NewCheckpointWithMeta(ckpt *RawCheckpoint, status CheckpointStatus) *RawCheckpointWithMeta { return &RawCheckpointWithMeta{ - Ckpt: ckpt, - Status: status, + Ckpt: ckpt, + Status: status, + Lifecycle: []*CheckpointStateUpdate{}, } } @@ -46,35 +47,35 @@ func NewCheckpointWithMeta(ckpt *RawCheckpoint, status CheckpointStatus) *RawChe // 2. aggregates the BLS public key // 3. updates Bitmap // 4. accumulates voting power -// it returns True if the checkpoint is updated +// it returns nil if the checkpoint is updated, otherwise it returns an error func (cm *RawCheckpointWithMeta) Accumulate( vals epochingtypes.ValidatorSet, signerAddr sdk.ValAddress, signerBlsKey bls12381.PublicKey, sig bls12381.Signature, - totalPower int64) (bool, error) { + totalPower int64) error { // the checkpoint should be accumulating if cm.Status != Accumulating { - return false, ErrCkptNotAccumulating + return ErrCkptNotAccumulating } // get validator and its index val, index, err := vals.FindValidatorWithIndex(signerAddr) if err != nil { - return false, err + return err } // return an error if the validator has already voted if bitmap.Get(cm.Ckpt.Bitmap, index) { - return false, ErrCkptAlreadyVoted + return ErrCkptAlreadyVoted } // aggregate BLS sig if cm.Ckpt.BlsMultiSig != nil { aggSig, err := bls12381.AggrSig(*cm.Ckpt.BlsMultiSig, sig) if err != nil { - return false, err + return err } cm.Ckpt.BlsMultiSig = &aggSig } else { @@ -85,7 +86,7 @@ func (cm *RawCheckpointWithMeta) Accumulate( if cm.BlsAggrPk != nil { aggPK, err := bls12381.AggrPK(*cm.BlsAggrPk, signerBlsKey) if err != nil { - return false, err + return err } cm.BlsAggrPk = &aggPK } else { @@ -101,13 +102,25 @@ func (cm *RawCheckpointWithMeta) Accumulate( cm.Status = Sealed } - return true, nil + return nil } func (cm *RawCheckpointWithMeta) IsMoreMatureThanStatus(status CheckpointStatus) bool { return cm.Status > status } +// RecordStateUpdate appends a new state update to the raw ckpt with meta +// where the time/height are captured by the current ctx +func (cm *RawCheckpointWithMeta) RecordStateUpdate(ctx sdk.Context, status CheckpointStatus) { + height, time := ctx.BlockHeight(), ctx.BlockTime() + stateUpdate := &CheckpointStateUpdate{ + State: status, + BlockHeight: uint64(height), + BlockTime: &time, + } + cm.Lifecycle = append(cm.Lifecycle, stateUpdate) +} + func NewLastCommitHashFromHex(s string) (LastCommitHash, error) { bz, err := hex.DecodeString(s) if err != nil { diff --git a/x/checkpointing/types/types_test.go b/x/checkpointing/types/types_test.go index fa5ecf8ed..3bfe4aa33 100644 --- a/x/checkpointing/types/types_test.go +++ b/x/checkpointing/types/types_test.go @@ -1,13 +1,14 @@ package types_test import ( + "testing" + "github.com/babylonchain/babylon/testutil/datagen" testkeeper "github.com/babylonchain/babylon/testutil/keeper" "github.com/babylonchain/babylon/x/checkpointing/types" "github.com/cosmos/cosmos-sdk/client" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" - "testing" ) // a single validator @@ -22,15 +23,13 @@ func TestRawCheckpointWithMeta_Accumulate1(t *testing.T) { ckpt, err := ckptkeeper.BuildRawCheckpoint(ctx, epochNum, lch) require.NoError(t, err) valSet := datagen.GenRandomValSet(n) - updated, err := ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) + err = ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) require.NoError(t, err) - require.True(t, updated) require.Equal(t, types.Sealed, ckpt.Status) // accumulate the same BLS sig - updated, err = ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) + err = ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) require.ErrorIs(t, err, types.ErrCkptNotAccumulating) - require.False(t, updated) require.Equal(t, types.Sealed, ckpt.Status) } @@ -47,21 +46,17 @@ func TestRawCheckpointWithMeta_Accumulate4(t *testing.T) { require.NoError(t, err) valSet := datagen.GenRandomValSet(n) for i := 0; i < n; i++ { - var updated bool - updated, err = ckpt.Accumulate(valSet, valSet[i].Addr, blsPubkeys[i], blsSigs[i], totalPower) + err = ckpt.Accumulate(valSet, valSet[i].Addr, blsPubkeys[i], blsSigs[i], totalPower) if i == 0 { require.NoError(t, err) - require.True(t, updated) require.Equal(t, types.Accumulating, ckpt.Status) } if i == 1 { require.NoError(t, err) - require.True(t, updated) require.Equal(t, types.Sealed, ckpt.Status) } if i >= 2 { require.ErrorIs(t, err, types.ErrCkptNotAccumulating) - require.False(t, updated) require.Equal(t, types.Sealed, ckpt.Status) } } diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index d738c1b1e..e9c7ea08d 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -101,7 +101,7 @@ func FuzzHeader(f *testing.F) { hooks := zcKeeper.Hooks() // invoke the hook a random number of times to simulate a random number of blocks - numHeaders := datagen.RandomInt(100) + 1 + numHeaders := datagen.RandomInt(100) + 2 numForkHeaders := datagen.RandomInt(10) + 1 headers, forkHeaders := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) From 9336994b50b4ee9e1304f2ba5290bc5ae6fa4175 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Thu, 12 Jan 2023 10:26:27 +1100 Subject: [PATCH 13/37] chore: refactor `FinalizedChainInfo` API (#268) --- client/docs/swagger-ui/swagger.yaml | 1395 ++++++++++------- proto/babylon/zoneconcierge/query.proto | 38 +- .../babylon/zoneconcierge/zoneconcierge.proto | 22 +- .../keeper/epoch_chain_info_indexer_test.go | 10 +- x/zoneconcierge/keeper/grpc_query.go | 65 +- .../keeper/proof_finalized_chain_info.go | 60 + x/zoneconcierge/types/errors.go | 1 + x/zoneconcierge/types/query.pb.go | 577 ++----- x/zoneconcierge/types/zoneconcierge.go | 79 + x/zoneconcierge/types/zoneconcierge.pb.go | 451 +++++- 10 files changed, 1501 insertions(+), 1197 deletions(-) create mode 100644 x/zoneconcierge/keeper/proof_finalized_chain_info.go diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 5e162eddf..8ba1ffd78 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -6697,19 +6697,43 @@ paths: which transaction was included and transaction index in the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header - is included in a certain Babylon block + proof: + title: proof is the proof that the chain info is finalized type: object properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the + header is included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a + transaction in the Merkle tree. + proof_header_in_epoch: type: object properties: total: @@ -6726,175 +6750,156 @@ paths: items: type: string format: byte - description: >- - TxProof represents a Merkle proof of the presence of a - transaction in the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is - in a certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting - power, and its bls public key title: >- - validator_set is the validator set of the sealed epoch - - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - metadata is committed to `app_hash` of the sealer header + proof_header_in_epoch is the proof that the Babylon header + is in a certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed type: object properties: - ops: + validator_set: type: array items: type: object properties: - type: + validator_address: type: string - key: + bls_pub_key: type: string format: byte - data: + voting_power: type: string - format: byte + format: uint64 title: >- - ProofOp defines an operation used for calculating - Merkle root + ValidatorWithBlsKey couples validator address, + voting power, and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + metadata is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root - The data could be arbitrary format, providing - nessecary data + The data could be arbitrary format, providing + nessecary data - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - validator set is committed to `app_hash` of the sealer - header - type: object - properties: - ops: - type: array - items: + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: type: object properties: - type: - type: string - key: - type: string - format: byte - data: + index: + type: integer + format: int64 + hash: type: string format: byte title: >- - ProofOp defines an operation used for calculating - Merkle root + Each provided OP_RETURN transaction can be + idendtified by hash of block in - The data could be arbitrary format, providing - nessecary data + which transaction was included and transaction index + in the block + description: >- + key is the position (txIdx, blockHash) of this tx on + BTC blockchain - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in + Although it is already a part of SubmissionKey, we + store it here again - which transaction was included and transaction index in - the block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain + to make TransactionInfo self-contained. - Although it is already a part of SubmissionKey, we store - it here again + For example, storing the key allows TransactionInfo + to not relay on - to make TransactionInfo self-contained. + the fact that TransactionInfo will be ordered in the + same order as - For example, storing the key allows TransactionInfo to - not relay on + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included + in the position in `key` - the fact that TransactionInfo will be ordered in the - same order as + TODO: maybe it could use here better format as we + already processed and - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte + valideated the proof? title: >- - proof is the Merkle proof that this tx is included in - the position in `key` - - TODO: maybe it could use here better format as we - already processed and - - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including + TransactionInfo is the info of a tx that contains + Babylon checkpoint, including - - the position of the tx on BTC blockchain + - the position of the tx on BTC blockchain - - the full tx content + - the full tx content - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint - is included in BTC ledger + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's + checkpoint is included in BTC ledger - It is the two TransactionInfo in the best (i.e., earliest) - checkpoint submission + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission description: >- QueryFinalizedChainInfoResponse is response type for the Query/FinalizedChainInfo RPC method. @@ -7644,19 +7649,43 @@ paths: which transaction was included and transaction index in the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header - is included in a certain Babylon block + proof: + title: proof is the proof that the chain info is finalized type: object properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the + header is included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a + transaction in the Merkle tree. + proof_header_in_epoch: type: object properties: total: @@ -7673,175 +7702,156 @@ paths: items: type: string format: byte - description: >- - TxProof represents a Merkle proof of the presence of a - transaction in the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is - in a certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting - power, and its bls public key - title: >- - validator_set is the validator set of the sealed epoch - - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: title: >- - proof_epoch_info is the Merkle proof that the epoch's - metadata is committed to `app_hash` of the sealer header + proof_header_in_epoch is the proof that the Babylon header + is in a certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed type: object properties: - ops: + validator_set: type: array items: type: object properties: - type: + validator_address: type: string - key: + bls_pub_key: type: string format: byte - data: + voting_power: type: string - format: byte + format: uint64 title: >- - ProofOp defines an operation used for calculating - Merkle root + ValidatorWithBlsKey couples validator address, + voting power, and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + metadata is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root - The data could be arbitrary format, providing - nessecary data + The data could be arbitrary format, providing + nessecary data - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - validator set is committed to `app_hash` of the sealer - header - type: object - properties: - ops: - type: array - items: + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: type: object properties: - type: - type: string - key: - type: string - format: byte - data: + index: + type: integer + format: int64 + hash: type: string format: byte title: >- - ProofOp defines an operation used for calculating - Merkle root + Each provided OP_RETURN transaction can be + idendtified by hash of block in - The data could be arbitrary format, providing - nessecary data + which transaction was included and transaction index + in the block + description: >- + key is the position (txIdx, blockHash) of this tx on + BTC blockchain - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in + Although it is already a part of SubmissionKey, we + store it here again - which transaction was included and transaction index in - the block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain + to make TransactionInfo self-contained. - Although it is already a part of SubmissionKey, we store - it here again + For example, storing the key allows TransactionInfo + to not relay on - to make TransactionInfo self-contained. + the fact that TransactionInfo will be ordered in the + same order as - For example, storing the key allows TransactionInfo to - not relay on + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included + in the position in `key` - the fact that TransactionInfo will be ordered in the - same order as + TODO: maybe it could use here better format as we + already processed and - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte + valideated the proof? title: >- - proof is the Merkle proof that this tx is included in - the position in `key` - - TODO: maybe it could use here better format as we - already processed and + TransactionInfo is the info of a tx that contains + Babylon checkpoint, including - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including - - - the position of the tx on BTC blockchain + - the position of the tx on BTC blockchain - - the full tx content + - the full tx content - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint - is included in BTC ledger + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's + checkpoint is included in BTC ledger - It is the two TransactionInfo in the best (i.e., earliest) - checkpoint submission + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission description: >- QueryFinalizedChainInfoUntilHeightResponse is response type for the Query/FinalizedChainInfoUntilHeight RPC method. @@ -13628,6 +13638,207 @@ definitions: - The epoch medatata is committed to the `app_hash` of the sealer header - The validator set is committed to the `app_hash` of the sealer header + babylon.zoneconcierge.v1.ProofFinalizedChainInfo: + type: object + properties: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction in + the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and + its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator + set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in + + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + + Although it is already a part of SubmissionKey, we store it here + again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo to not relay + on + + the fact that TransactionInfo will be ordered in the same order + as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in the + position in `key` + + TODO: maybe it could use here better format as we already + processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) checkpoint + submission + title: >- + ProofFinalizedChainInfo is a set of proofs that attest a chain info is + BTC-finalised babylon.zoneconcierge.v1.QueryChainInfoResponse: type: object properties: @@ -14726,19 +14937,43 @@ definitions: which transaction was included and transaction index in the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header is - included in a certain Babylon block + proof: + title: proof is the proof that the chain info is finalized type: object properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction + in the Merkle tree. + proof_header_in_epoch: type: object properties: total: @@ -14755,172 +14990,154 @@ definitions: items: type: string format: byte - description: >- - TxProof represents a Merkle proof of the presence of a transaction in - the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is in a - certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and - its bls public key - title: >- - validator_set is the validator set of the sealed epoch - - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed type: object properties: - ops: + validator_set: type: array items: type: object properties: - type: + validator_address: type: string - key: + bls_pub_key: type: string format: byte - data: + voting_power: type: string - format: byte + format: uint64 title: >- - ProofOp defines an operation used for calculating Merkle - root + ValidatorWithBlsKey couples validator address, voting power, + and its bls public key + title: >- + validator_set is the validator set of the sealed epoch - The data could be arbitrary format, providing nessecary data + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata + is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's validator - set is committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: type: object properties: - type: - type: string - key: - type: string - format: byte - data: + index: + type: integer + format: int64 + hash: type: string format: byte title: >- - ProofOp defines an operation used for calculating Merkle - root + Each provided OP_RETURN transaction can be idendtified by + hash of block in - The data could be arbitrary format, providing nessecary data + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash - of block in + Although it is already a part of SubmissionKey, we store it + here again - which transaction was included and transaction index in the - block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain + to make TransactionInfo self-contained. - Although it is already a part of SubmissionKey, we store it here - again + For example, storing the key allows TransactionInfo to not + relay on - to make TransactionInfo self-contained. + the fact that TransactionInfo will be ordered in the same + order as - For example, storing the key allows TransactionInfo to not relay - on + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in the + position in `key` - the fact that TransactionInfo will be ordered in the same order - as + TODO: maybe it could use here better format as we already + processed and - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte + valideated the proof? title: >- - proof is the Merkle proof that this tx is included in the - position in `key` - - TODO: maybe it could use here better format as we already - processed and - - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including - - the position of the tx on BTC blockchain + - the position of the tx on BTC blockchain - - the full tx content + - the full tx content - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint is - included in BTC ledger + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger - It is the two TransactionInfo in the best (i.e., earliest) checkpoint - submission + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission description: >- QueryFinalizedChainInfoResponse is response type for the Query/FinalizedChainInfo RPC method. @@ -15442,19 +15659,43 @@ definitions: which transaction was included and transaction index in the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header is - included in a certain Babylon block + proof: + title: proof is the proof that the chain info is finalized type: object properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction + in the Merkle tree. + proof_header_in_epoch: type: object properties: total: @@ -15471,172 +15712,154 @@ definitions: items: type: string format: byte - description: >- - TxProof represents a Merkle proof of the presence of a transaction in - the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is in a - certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and - its bls public key title: >- - validator_set is the validator set of the sealed epoch - - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed type: object properties: - ops: + validator_set: type: array items: type: object properties: - type: + validator_address: type: string - key: + bls_pub_key: type: string format: byte - data: + voting_power: type: string - format: byte + format: uint64 title: >- - ProofOp defines an operation used for calculating Merkle - root + ValidatorWithBlsKey couples validator address, voting power, + and its bls public key + title: >- + validator_set is the validator set of the sealed epoch - The data could be arbitrary format, providing nessecary data + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata + is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's validator - set is committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: type: object properties: - type: - type: string - key: - type: string - format: byte - data: + index: + type: integer + format: int64 + hash: type: string format: byte title: >- - ProofOp defines an operation used for calculating Merkle - root + Each provided OP_RETURN transaction can be idendtified by + hash of block in - The data could be arbitrary format, providing nessecary data + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash - of block in + Although it is already a part of SubmissionKey, we store it + here again - which transaction was included and transaction index in the - block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain + to make TransactionInfo self-contained. - Although it is already a part of SubmissionKey, we store it here - again + For example, storing the key allows TransactionInfo to not + relay on - to make TransactionInfo self-contained. + the fact that TransactionInfo will be ordered in the same + order as - For example, storing the key allows TransactionInfo to not relay - on + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in the + position in `key` - the fact that TransactionInfo will be ordered in the same order - as + TODO: maybe it could use here better format as we already + processed and - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte + valideated the proof? title: >- - proof is the Merkle proof that this tx is included in the - position in `key` + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including - TODO: maybe it could use here better format as we already - processed and + - the position of the tx on BTC blockchain - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including - - - the position of the tx on BTC blockchain + - the full tx content - - the full tx content - - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint is - included in BTC ledger + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger - It is the two TransactionInfo in the best (i.e., earliest) checkpoint - submission + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission description: >- QueryFinalizedChainInfoUntilHeightResponse is response type for the Query/FinalizedChainInfoUntilHeight RPC method. diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 800c9d8d1..400f94053 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -3,8 +3,6 @@ package babylon.zoneconcierge.v1; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; -import "tendermint/types/types.proto"; -import "tendermint/crypto/proof.proto"; import "cosmos/base/query/v1beta1/pagination.proto"; import "babylon/btccheckpoint/tx.proto"; import "babylon/btccheckpoint/btccheckpoint.proto"; @@ -148,9 +146,6 @@ message QueryFinalizedChainInfoResponse { // finalized_chain_info is the info of the CZ babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 1; - /* - The following fields include metadata related to this chain info - */ // epoch_info is the metadata of the last BTC-finalised epoch babylon.epoching.v1.Epoch epoch_info = 2; // raw_checkpoint is the raw checkpoint of this epoch @@ -158,18 +153,8 @@ message QueryFinalizedChainInfoResponse { // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 4; - /* - The following fields include proofs that attest the chain info is BTC-finalised - */ - // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block - tendermint.types.TxProof proof_tx_in_block = 5; - // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch - tendermint.crypto.Proof proof_header_in_epoch = 6; - // proof_epoch_sealed is the proof that the epoch is sealed - babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 7; - // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger - // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission - repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 8; + // proof is the proof that the chain info is finalized + babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 5; } // QueryFinalizedChainInfoUntilHeightRequest is request type for the Query/FinalizedChainInfoUntilHeight RPC method. @@ -187,10 +172,7 @@ message QueryFinalizedChainInfoUntilHeightRequest { message QueryFinalizedChainInfoUntilHeightResponse { // finalized_chain_info is the info of the CZ babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 1; - - /* - The following fields include metadata related to this chain info - */ + // epoch_info is the metadata of the last BTC-finalised epoch babylon.epoching.v1.Epoch epoch_info = 2; // raw_checkpoint is the raw checkpoint of this epoch @@ -198,16 +180,6 @@ message QueryFinalizedChainInfoUntilHeightResponse { // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 4; - /* - The following fields include proofs that attest the chain info is BTC-finalised - */ - // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block - tendermint.types.TxProof proof_tx_in_block = 5; - // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch - tendermint.crypto.Proof proof_header_in_epoch = 6; - // proof_epoch_sealed is the proof that the epoch is sealed - babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 7; - // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger - // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission - repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 8; + // proof is the proof that the chain info is finalized + babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 5; } diff --git a/proto/babylon/zoneconcierge/zoneconcierge.proto b/proto/babylon/zoneconcierge/zoneconcierge.proto index 680240573..07f219530 100644 --- a/proto/babylon/zoneconcierge/zoneconcierge.proto +++ b/proto/babylon/zoneconcierge/zoneconcierge.proto @@ -1,10 +1,12 @@ syntax = "proto3"; package babylon.zoneconcierge.v1; -import "babylon/checkpointing/bls_key.proto"; -import "babylon/checkpointing/checkpoint.proto"; import "tendermint/types/types.proto"; import "tendermint/crypto/proof.proto"; +import "babylon/btccheckpoint/btccheckpoint.proto"; +import "babylon/checkpointing/bls_key.proto"; +import "babylon/checkpointing/query.proto"; +import "babylon/checkpointing/checkpoint.proto"; option go_package = "github.com/babylonchain/babylon/x/zoneconcierge/types"; @@ -74,3 +76,19 @@ message ProofEpochSealed { // proof_epoch_info is the Merkle proof that the epoch's validator set is committed to `app_hash` of the sealer header tendermint.crypto.ProofOps proof_epoch_val_set = 3; } + +// ProofFinalizedChainInfo is a set of proofs that attest a chain info is BTC-finalised +message ProofFinalizedChainInfo { + /* + The following fields include proofs that attest the chain info is BTC-finalised + */ + // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block + tendermint.types.TxProof proof_tx_in_block = 4; + // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch + tendermint.crypto.Proof proof_header_in_epoch = 5; + // proof_epoch_sealed is the proof that the epoch is sealed + babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 6; + // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger + // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission + repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 7; +} diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go index 57f52f781..02f8b0606 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go @@ -17,17 +17,23 @@ func FuzzEpochChainInfoIndexer(f *testing.F) { _, babylonChain, czChain, babylonApp := SetupTest(t) zcKeeper := babylonApp.ZoneConciergeKeeper + epochingKeeper := babylonApp.EpochingKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() + // enter a random epoch + epochNum := datagen.RandomInt(10) + for j := uint64(0); j < epochNum; j++ { + epochingKeeper.IncEpoch(ctx) + } + // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) - // simulate the scenario that a random epoch has ended - epochNum := datagen.RandomInt(10) + // end this epoch hooks.AfterEpochEnds(ctx, epochNum) // check if the chain info of this epoch is recorded or not diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index 78754aef3..cbf7f69f9 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -3,13 +3,9 @@ package keeper import ( "context" - btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" - epochingtypes "github.com/babylonchain/babylon/x/epoching/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/query" - tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -157,40 +153,34 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC } ctx := sdk.UnwrapSDKContext(c) + resp := &types.QueryFinalizedChainInfoResponse{} // find the last finalised chain info and the earliest epoch that snapshots this chain info finalizedEpoch, chainInfo, err := k.GetLastFinalizedChainInfo(ctx, req.ChainId) if err != nil { return nil, err } + resp.FinalizedChainInfo = chainInfo // find the epoch metadata of the finalised epoch - epochInfo, err := k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) + resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) if err != nil { return nil, err } // find the raw checkpoint and the best submission key for the finalised epoch - _, rawCheckpoint, bestSubmissionKey, err := k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) + _, resp.RawCheckpoint, resp.BtcSubmissionKey, err = k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) if err != nil { return nil, err } - resp := &types.QueryFinalizedChainInfoResponse{ - FinalizedChainInfo: chainInfo, - // metadata related to this chain info, including the epoch, the raw checkpoint of this epoch, and the BTC tx index of the raw checkpoint - EpochInfo: epochInfo, - RawCheckpoint: rawCheckpoint, - BtcSubmissionKey: bestSubmissionKey, - } - // if the query does not want the proofs, return here if !req.Prove { return resp, nil } // generate all proofs - resp.ProofTxInBlock, resp.ProofHeaderInEpoch, resp.ProofEpochSealed, resp.ProofEpochSubmitted, err = k.proveFinalizedChainInfo(ctx, chainInfo, epochInfo, bestSubmissionKey) + resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.RawCheckpoint, resp.BtcSubmissionKey) if err != nil { return nil, err } @@ -237,10 +227,11 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer } // assign the finalizedEpoch, and retrieve epoch info, raw ckpt and submission key finalizedEpoch = closestHeader.BabylonEpoch - resp.FinalizedChainInfo, err = k.GetEpochChainInfo(ctx, req.ChainId, finalizedEpoch) + chainInfo, err = k.GetEpochChainInfo(ctx, req.ChainId, finalizedEpoch) if err != nil { return nil, err } + resp.FinalizedChainInfo = chainInfo resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) if err != nil { return nil, err @@ -257,50 +248,10 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer } // generate all proofs - resp.ProofTxInBlock, resp.ProofHeaderInEpoch, resp.ProofEpochSealed, resp.ProofEpochSubmitted, err = k.proveFinalizedChainInfo(ctx, resp.FinalizedChainInfo, resp.EpochInfo, resp.BtcSubmissionKey) + resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.RawCheckpoint, resp.BtcSubmissionKey) if err != nil { return nil, err } return resp, nil } - -// proveFinalizedChainInfo generates proofs that a chainInfo has been finalised by the given epoch with epochInfo -// It includes proofTxInBlock, proofHeaderInEpoch, proofEpochSealed and proofEpochSubmitted -// The proofs can be verified by a verifier with access to a BTC and Babylon light client -// CONTRACT: this is only a private helper function for simplifying the implementation of RPC calls -func (k Keeper) proveFinalizedChainInfo( - ctx sdk.Context, - chainInfo *types.ChainInfo, - epochInfo *epochingtypes.Epoch, - bestSubmissionKey *btcctypes.SubmissionKey, -) (*tmproto.TxProof, *tmcrypto.Proof, *types.ProofEpochSealed, []*btcctypes.TransactionInfo, error) { - // Proof that the Babylon tx is in block - proofTxInBlock, err := k.ProveTxInBlock(ctx, chainInfo.LatestHeader.BabylonTxHash) - if err != nil { - return nil, nil, nil, nil, err - } - - // proof that the block is in this epoch - proofHeaderInEpoch, err := k.ProveHeaderInEpoch(ctx, chainInfo.LatestHeader.BabylonHeader, epochInfo) - if err != nil { - return nil, nil, nil, nil, err - } - - // proof that the epoch is sealed - proofEpochSealed, err := k.ProveEpochSealed(ctx, epochInfo.EpochNumber) - if err != nil { - return nil, nil, nil, nil, err - } - - // proof that the epoch's checkpoint is submitted to BTC - // i.e., the two `TransactionInfo`s for the checkpoint - proofEpochSubmitted, err := k.ProveEpochSubmitted(ctx, bestSubmissionKey) - if err != nil { - // The only error in ProveEpochSubmitted is the nil bestSubmission. - // Since the epoch w.r.t. the bestSubmissionKey is finalised, this - // can only be a programming error, so we should panic here. - panic(err) - } - return proofTxInBlock, proofHeaderInEpoch, proofEpochSealed, proofEpochSubmitted, nil -} diff --git a/x/zoneconcierge/keeper/proof_finalized_chain_info.go b/x/zoneconcierge/keeper/proof_finalized_chain_info.go new file mode 100644 index 000000000..d26c15c68 --- /dev/null +++ b/x/zoneconcierge/keeper/proof_finalized_chain_info.go @@ -0,0 +1,60 @@ +package keeper + +import ( + btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" + checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" + epochingtypes "github.com/babylonchain/babylon/x/epoching/types" + "github.com/babylonchain/babylon/x/zoneconcierge/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// proveFinalizedChainInfo generates proofs that a chainInfo has been finalised by the given epoch with epochInfo +// It includes proofTxInBlock, proofHeaderInEpoch, proofEpochSealed and proofEpochSubmitted +// The proofs can be verified by a verifier with access to a BTC and Babylon light client +// CONTRACT: this is only a private helper function for simplifying the implementation of RPC calls +func (k Keeper) proveFinalizedChainInfo( + ctx sdk.Context, + chainInfo *types.ChainInfo, + epochInfo *epochingtypes.Epoch, + rawCheckpoint *checkpointingtypes.RawCheckpoint, + bestSubmissionKey *btcctypes.SubmissionKey, +) (*types.ProofFinalizedChainInfo, error) { + var ( + err error + proof = &types.ProofFinalizedChainInfo{} + ) + + // Proof that the Babylon tx is in block + proof.ProofTxInBlock, err = k.ProveTxInBlock(ctx, chainInfo.LatestHeader.BabylonTxHash) + if err != nil { + return nil, err + } + + // proof that the block is in this epoch + proof.ProofHeaderInEpoch, err = k.ProveHeaderInEpoch(ctx, chainInfo.LatestHeader.BabylonHeader, epochInfo) + if err != nil { + return nil, err + } + + // proof that the epoch is sealed + proof.ProofEpochSealed, err = k.ProveEpochSealed(ctx, epochInfo.EpochNumber) + if err != nil { + return nil, err + } + + // proof that the epoch's checkpoint is submitted to BTC + // i.e., the two `TransactionInfo`s for the checkpoint + proof.ProofEpochSubmitted, err = k.ProveEpochSubmitted(ctx, bestSubmissionKey) + if err != nil { + // The only error in ProveEpochSubmitted is the nil bestSubmission. + // Since the epoch w.r.t. the bestSubmissionKey is finalised, this + // can only be a programming error, so we should panic here. + panic(err) + } + + return proof, nil +} + +// TODO: implement a standalone verifier VerifyFinalizedChainInfo that +// verifies whether a chainInfo is finalised or not, with access to +// Bitcoin and Babylon light clients diff --git a/x/zoneconcierge/types/errors.go b/x/zoneconcierge/types/errors.go index 416c02409..5224d40be 100644 --- a/x/zoneconcierge/types/errors.go +++ b/x/zoneconcierge/types/errors.go @@ -21,4 +21,5 @@ var ( ErrFinalizedEpochNotFound = sdkerrors.Register(ModuleName, 1110, "cannot find a finalized epoch") ErrInvalidProofEpochSealed = sdkerrors.Register(ModuleName, 1111, "invalid ProofEpochSealed") ErrInvalidMerkleProof = sdkerrors.Register(ModuleName, 1112, "invalid Merkle inclusion proof") + ErrInvalidChainInfo = sdkerrors.Register(ModuleName, 1113, "invalid chain info") ) diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index 72ea849f2..c0bd55bdb 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -13,8 +13,6 @@ import ( _ "github.com/gogo/protobuf/gogoproto" grpc1 "github.com/gogo/protobuf/grpc" proto "github.com/gogo/protobuf/proto" - crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" - types3 "github.com/tendermint/tendermint/proto/tendermint/types" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -769,15 +767,8 @@ type QueryFinalizedChainInfoResponse struct { RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,4,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` - // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block - ProofTxInBlock *types3.TxProof `protobuf:"bytes,5,opt,name=proof_tx_in_block,json=proofTxInBlock,proto3" json:"proof_tx_in_block,omitempty"` - // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch - ProofHeaderInEpoch *crypto.Proof `protobuf:"bytes,6,opt,name=proof_header_in_epoch,json=proofHeaderInEpoch,proto3" json:"proof_header_in_epoch,omitempty"` - // proof_epoch_sealed is the proof that the epoch is sealed - ProofEpochSealed *ProofEpochSealed `protobuf:"bytes,7,opt,name=proof_epoch_sealed,json=proofEpochSealed,proto3" json:"proof_epoch_sealed,omitempty"` - // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger - // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission - ProofEpochSubmitted []*types2.TransactionInfo `protobuf:"bytes,8,rep,name=proof_epoch_submitted,json=proofEpochSubmitted,proto3" json:"proof_epoch_submitted,omitempty"` + // proof is the proof that the chain info is finalized + Proof *ProofFinalizedChainInfo `protobuf:"bytes,5,opt,name=proof,proto3" json:"proof,omitempty"` } func (m *QueryFinalizedChainInfoResponse) Reset() { *m = QueryFinalizedChainInfoResponse{} } @@ -841,30 +832,9 @@ func (m *QueryFinalizedChainInfoResponse) GetBtcSubmissionKey() *types2.Submissi return nil } -func (m *QueryFinalizedChainInfoResponse) GetProofTxInBlock() *types3.TxProof { +func (m *QueryFinalizedChainInfoResponse) GetProof() *ProofFinalizedChainInfo { if m != nil { - return m.ProofTxInBlock - } - return nil -} - -func (m *QueryFinalizedChainInfoResponse) GetProofHeaderInEpoch() *crypto.Proof { - if m != nil { - return m.ProofHeaderInEpoch - } - return nil -} - -func (m *QueryFinalizedChainInfoResponse) GetProofEpochSealed() *ProofEpochSealed { - if m != nil { - return m.ProofEpochSealed - } - return nil -} - -func (m *QueryFinalizedChainInfoResponse) GetProofEpochSubmitted() []*types2.TransactionInfo { - if m != nil { - return m.ProofEpochSubmitted + return m.Proof } return nil } @@ -948,15 +918,8 @@ type QueryFinalizedChainInfoUntilHeightResponse struct { RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,4,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` - // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block - ProofTxInBlock *types3.TxProof `protobuf:"bytes,5,opt,name=proof_tx_in_block,json=proofTxInBlock,proto3" json:"proof_tx_in_block,omitempty"` - // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch - ProofHeaderInEpoch *crypto.Proof `protobuf:"bytes,6,opt,name=proof_header_in_epoch,json=proofHeaderInEpoch,proto3" json:"proof_header_in_epoch,omitempty"` - // proof_epoch_sealed is the proof that the epoch is sealed - ProofEpochSealed *ProofEpochSealed `protobuf:"bytes,7,opt,name=proof_epoch_sealed,json=proofEpochSealed,proto3" json:"proof_epoch_sealed,omitempty"` - // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger - // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission - ProofEpochSubmitted []*types2.TransactionInfo `protobuf:"bytes,8,rep,name=proof_epoch_submitted,json=proofEpochSubmitted,proto3" json:"proof_epoch_submitted,omitempty"` + // proof is the proof that the chain info is finalized + Proof *ProofFinalizedChainInfo `protobuf:"bytes,5,opt,name=proof,proto3" json:"proof,omitempty"` } func (m *QueryFinalizedChainInfoUntilHeightResponse) Reset() { @@ -1024,30 +987,9 @@ func (m *QueryFinalizedChainInfoUntilHeightResponse) GetBtcSubmissionKey() *type return nil } -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofTxInBlock() *types3.TxProof { - if m != nil { - return m.ProofTxInBlock - } - return nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofHeaderInEpoch() *crypto.Proof { - if m != nil { - return m.ProofHeaderInEpoch - } - return nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofEpochSealed() *ProofEpochSealed { - if m != nil { - return m.ProofEpochSealed - } - return nil -} - -func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProofEpochSubmitted() []*types2.TransactionInfo { +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProof() *ProofFinalizedChainInfo { if m != nil { - return m.ProofEpochSubmitted + return m.Proof } return nil } @@ -1076,88 +1018,79 @@ func init() { func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } var fileDescriptor_2caab7ee15063236 = []byte{ - // 1294 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x5f, 0x6f, 0xdb, 0x54, - 0x14, 0xaf, 0xfb, 0x27, 0x6d, 0x6f, 0x59, 0x55, 0xee, 0xba, 0x2d, 0x73, 0xd7, 0xb4, 0x18, 0x69, - 0x6b, 0xab, 0x61, 0x93, 0xb2, 0x32, 0x2a, 0x24, 0xa6, 0xa6, 0xa5, 0x5d, 0x28, 0x1a, 0xad, 0xd7, - 0x22, 0x84, 0x40, 0x91, 0xed, 0xdc, 0x24, 0x56, 0x13, 0x5f, 0xcf, 0x76, 0xb2, 0x64, 0xa5, 0x2f, - 0x7c, 0x01, 0x90, 0x78, 0x41, 0x7c, 0x80, 0x22, 0xf1, 0x80, 0xc4, 0xb7, 0x18, 0xd2, 0x1e, 0x26, - 0xf1, 0xc2, 0x13, 0x42, 0x2d, 0x5f, 0x03, 0x09, 0xf9, 0xdc, 0xeb, 0xc4, 0x4e, 0xe2, 0x25, 0x29, - 0x7b, 0xd8, 0xc3, 0x5e, 0xaa, 0xf8, 0xde, 0x73, 0x7e, 0xe7, 0x77, 0xce, 0x3d, 0xe7, 0xde, 0x9f, - 0x8a, 0xde, 0xd2, 0x35, 0xbd, 0x51, 0xa6, 0x96, 0xf2, 0x84, 0x5a, 0xc4, 0xa0, 0x96, 0x61, 0x12, - 0xa7, 0x48, 0x94, 0x47, 0x55, 0xe2, 0x34, 0x64, 0xdb, 0xa1, 0x1e, 0xc5, 0x49, 0x6e, 0x22, 0x47, - 0x4c, 0xe4, 0x5a, 0x5a, 0x9c, 0x2d, 0xd2, 0x22, 0x05, 0x23, 0xc5, 0xff, 0xc5, 0xec, 0xc5, 0x1b, - 0x45, 0x4a, 0x8b, 0x65, 0xa2, 0x68, 0xb6, 0xa9, 0x68, 0x96, 0x45, 0x3d, 0xcd, 0x33, 0xa9, 0xe5, - 0x06, 0xbb, 0x1e, 0xb1, 0xf2, 0xc4, 0xa9, 0x98, 0x96, 0xa7, 0x78, 0x0d, 0x9b, 0xb8, 0xec, 0x2f, - 0xdf, 0x9d, 0x0f, 0xed, 0x1a, 0x4e, 0xc3, 0xf6, 0xa8, 0x62, 0x3b, 0x94, 0x16, 0xf8, 0xf6, 0x8a, - 0x41, 0xdd, 0x0a, 0x75, 0x15, 0x5d, 0x73, 0x39, 0x47, 0xa5, 0x96, 0xd6, 0x89, 0xa7, 0xa5, 0x15, - 0x5b, 0x2b, 0x9a, 0x16, 0x44, 0xe2, 0xb6, 0xa9, 0x20, 0x33, 0xdd, 0x33, 0x8c, 0x12, 0x31, 0x8e, - 0x6c, 0x0a, 0x31, 0xeb, 0x7c, 0x7f, 0xb9, 0xfb, 0x7e, 0xe4, 0x8b, 0x9b, 0x36, 0x8b, 0xd4, 0xda, - 0x31, 0xad, 0x62, 0xb8, 0x48, 0xe2, 0xcd, 0xee, 0x26, 0x1d, 0x50, 0x52, 0x60, 0x47, 0x6c, 0x6a, - 0x94, 0x7c, 0x93, 0x5a, 0xba, 0xf9, 0xbb, 0xdd, 0x26, 0x7a, 0x26, 0xb6, 0xe6, 0x68, 0x15, 0xb7, - 0x9d, 0x7d, 0xd4, 0x26, 0x7a, 0x44, 0x60, 0x2a, 0xcd, 0x22, 0xbc, 0xef, 0x33, 0xdd, 0x03, 0x7f, - 0x95, 0x3c, 0xaa, 0x12, 0xd7, 0x93, 0x0e, 0xd1, 0xe5, 0xc8, 0xaa, 0x6b, 0x53, 0xcb, 0x25, 0xf8, - 0x23, 0x94, 0x60, 0x71, 0x92, 0xc2, 0xa2, 0xb0, 0x34, 0xb5, 0xba, 0x28, 0xc7, 0x9d, 0xbe, 0xcc, - 0x3c, 0x33, 0xa3, 0x4f, 0xff, 0x5a, 0x18, 0x52, 0xb9, 0x97, 0xb4, 0xc3, 0x83, 0xdd, 0x27, 0x5a, - 0x9e, 0x38, 0x3c, 0x18, 0xbe, 0x8e, 0x26, 0x8c, 0x92, 0x66, 0x5a, 0x39, 0x33, 0x0f, 0xb8, 0x93, - 0xea, 0x38, 0x7c, 0x67, 0xf3, 0xf8, 0x2a, 0x4a, 0x94, 0x88, 0x59, 0x2c, 0x79, 0xc9, 0xe1, 0x45, - 0x61, 0x69, 0x54, 0xe5, 0x5f, 0xd2, 0x4f, 0x02, 0x27, 0x18, 0x20, 0x71, 0x82, 0xf7, 0x7c, 0x7b, - 0x7f, 0x85, 0x13, 0xbc, 0x15, 0x4f, 0x30, 0x6b, 0xe5, 0x49, 0x9d, 0xe4, 0x39, 0x00, 0x77, 0xc3, - 0x19, 0xf4, 0x46, 0x81, 0x3a, 0x47, 0x39, 0xf6, 0xe9, 0x42, 0xd8, 0xa9, 0xd5, 0x85, 0x78, 0x98, - 0x6d, 0xea, 0x1c, 0xb9, 0xea, 0x94, 0xef, 0xc4, 0xa0, 0x5c, 0xe9, 0x1a, 0xba, 0x02, 0xdc, 0x36, - 0xfd, 0x24, 0x3e, 0x35, 0x5d, 0x2f, 0xa8, 0xea, 0x1a, 0xba, 0xda, 0xbe, 0xc1, 0x79, 0xcf, 0xa1, - 0xc9, 0xa0, 0x04, 0x7e, 0x6d, 0x47, 0x96, 0x26, 0xd5, 0x09, 0x5e, 0x03, 0x57, 0x5a, 0x0d, 0xe3, - 0x65, 0xad, 0x02, 0xed, 0x5d, 0x38, 0xe9, 0xab, 0x70, 0x28, 0xe6, 0xc3, 0x43, 0x65, 0x10, 0xe2, - 0x4e, 0x56, 0x81, 0xf2, 0x32, 0xbd, 0x1d, 0x9f, 0x5f, 0x0b, 0x80, 0x31, 0xf4, 0x7f, 0x4a, 0x07, - 0x48, 0x04, 0xf4, 0x8f, 0xfd, 0xd6, 0xec, 0xa0, 0x35, 0x87, 0x26, 0xa1, 0x67, 0x73, 0x56, 0xb5, - 0x02, 0x01, 0x46, 0xd5, 0x09, 0x58, 0x78, 0x50, 0xad, 0x44, 0x38, 0x0f, 0x47, 0x39, 0x6b, 0x68, - 0xae, 0x2b, 0xea, 0x4b, 0x24, 0xfe, 0x0d, 0xba, 0x06, 0x21, 0xfc, 0xe2, 0xf3, 0xe3, 0xea, 0xa3, - 0x0b, 0xb7, 0x11, 0x6a, 0x5d, 0x20, 0xbc, 0x25, 0x6e, 0xca, 0xec, 0xb6, 0x91, 0xfd, 0xdb, 0x46, - 0x66, 0xc3, 0xce, 0x6f, 0x1b, 0x79, 0x4f, 0x2b, 0x12, 0x0e, 0xab, 0x86, 0x3c, 0xa5, 0x53, 0x01, - 0x25, 0x3b, 0xc3, 0xf3, 0xf4, 0x36, 0xd0, 0x78, 0xd0, 0x74, 0x7e, 0x03, 0x0c, 0xd0, 0xbb, 0x81, - 0x1f, 0xde, 0xe9, 0xc2, 0xf3, 0x56, 0x4f, 0x9e, 0x2c, 0x7e, 0x84, 0xe8, 0xe7, 0xe8, 0x46, 0x93, - 0x27, 0x9c, 0x46, 0x5b, 0xad, 0x2e, 0x7a, 0xc2, 0x3a, 0x9a, 0x8f, 0xc1, 0x7d, 0x69, 0x45, 0x90, - 0xf6, 0x51, 0x0a, 0x62, 0x6c, 0x9b, 0x96, 0x56, 0x36, 0x9f, 0x90, 0xfc, 0x00, 0x63, 0x83, 0x67, - 0xd1, 0x98, 0xed, 0xd0, 0x1a, 0x01, 0xe2, 0x13, 0x2a, 0xfb, 0x90, 0x4e, 0xc7, 0xd0, 0x42, 0x2c, - 0x26, 0x67, 0x7e, 0x88, 0x66, 0x0b, 0xc1, 0x6e, 0xee, 0x62, 0x7d, 0x8a, 0x0b, 0x1d, 0xf0, 0x78, - 0x1d, 0x21, 0x56, 0x69, 0x00, 0x63, 0x47, 0x2a, 0x36, 0xc1, 0x9a, 0x4f, 0x43, 0x2d, 0x2d, 0x43, - 0x3d, 0x55, 0x76, 0x2e, 0xe0, 0xfa, 0x00, 0x4d, 0x3b, 0xda, 0xe3, 0x5c, 0xeb, 0x91, 0x49, 0x8e, - 0xb4, 0xdd, 0x89, 0x91, 0xd7, 0xc8, 0xc7, 0x50, 0xb5, 0xc7, 0x9b, 0xcd, 0x35, 0xf5, 0x92, 0x13, - 0xfe, 0xc4, 0x87, 0x08, 0xeb, 0x9e, 0x91, 0x73, 0xab, 0x7a, 0xc5, 0x74, 0x5d, 0x93, 0x5a, 0xb9, - 0x23, 0xd2, 0x48, 0x8e, 0xb6, 0x61, 0x46, 0x5f, 0xc8, 0x5a, 0x5a, 0x7e, 0xd8, 0xb4, 0xdf, 0x25, - 0x0d, 0x75, 0x46, 0xf7, 0x8c, 0xc8, 0x0a, 0xde, 0x42, 0x6f, 0xc2, 0x23, 0x9e, 0xf3, 0xea, 0x39, - 0xd3, 0xca, 0xe9, 0x65, 0x6a, 0x1c, 0x25, 0xc7, 0x00, 0xf5, 0xba, 0xdc, 0x7a, 0xf0, 0x65, 0x26, - 0x04, 0x0e, 0xea, 0x7b, 0xbe, 0xb1, 0x3a, 0x0d, 0x3e, 0x07, 0xf5, 0xac, 0x95, 0xf1, 0x1d, 0xf0, - 0x2e, 0xba, 0xc2, 0x50, 0x58, 0x1b, 0xf8, 0x48, 0x50, 0x89, 0x64, 0x02, 0x90, 0x92, 0x61, 0x24, - 0x26, 0x1d, 0x64, 0x06, 0x84, 0xc1, 0x8d, 0x35, 0x51, 0xd6, 0x82, 0x22, 0xe2, 0x2f, 0x10, 0x5b, - 0x65, 0x10, 0x39, 0x97, 0x68, 0x65, 0x92, 0x4f, 0x8e, 0x03, 0xd2, 0xca, 0x0b, 0x9e, 0x3c, 0xdf, - 0x07, 0x10, 0x1e, 0x82, 0x87, 0x3a, 0x63, 0xb7, 0xad, 0xe0, 0xaf, 0x03, 0x9a, 0x1c, 0xd9, 0xaf, - 0x84, 0xe7, 0x91, 0x7c, 0x72, 0x02, 0xba, 0x7d, 0x39, 0xbe, 0x8c, 0x07, 0x8e, 0x66, 0xb9, 0x9a, - 0xe1, 0x8f, 0x27, 0x34, 0xcb, 0xe5, 0x10, 0x76, 0x80, 0x22, 0x79, 0x68, 0x39, 0xa6, 0x4f, 0x0f, - 0x2d, 0xcf, 0x2c, 0xdf, 0x87, 0xc7, 0xf3, 0xe2, 0xcf, 0x6e, 0x6b, 0x3c, 0x46, 0xc2, 0xe3, 0xf1, - 0xdb, 0x18, 0x5a, 0xe9, 0x27, 0xec, 0xeb, 0x49, 0x79, 0x3d, 0x29, 0xaf, 0xc8, 0xa4, 0xac, 0x9e, - 0x5e, 0x42, 0x63, 0xd0, 0xb3, 0xf8, 0x3b, 0x01, 0x25, 0x98, 0x58, 0xc5, 0xb7, 0xe3, 0x19, 0x77, - 0x6a, 0x64, 0xf1, 0x9d, 0x3e, 0xad, 0x59, 0xdb, 0x4b, 0x4b, 0xdf, 0xfe, 0xf1, 0xcf, 0x0f, 0xc3, - 0x12, 0x5e, 0x54, 0xba, 0x8b, 0xf3, 0x5a, 0x9a, 0x6b, 0x78, 0xfc, 0xab, 0x80, 0x12, 0xac, 0xcc, - 0x3d, 0x19, 0x45, 0x84, 0x74, 0x4f, 0x46, 0x51, 0xb1, 0x2c, 0xed, 0x00, 0xa3, 0x0d, 0x7c, 0x2f, - 0x9e, 0x51, 0x6b, 0x3c, 0x95, 0xe3, 0xe0, 0xb2, 0x38, 0x51, 0x58, 0xe7, 0x28, 0xc7, 0xec, 0x56, - 0x38, 0xc1, 0x3f, 0x0a, 0x68, 0xb2, 0xa9, 0x69, 0xb1, 0xd2, 0x83, 0x45, 0xbb, 0x2c, 0x16, 0xdf, - 0xed, 0xdf, 0xa1, 0xff, 0x5a, 0x02, 0x5b, 0x17, 0xff, 0x1c, 0x50, 0x83, 0x41, 0xef, 0x8b, 0x5a, - 0x48, 0x2a, 0xf4, 0x47, 0x2d, 0xac, 0x03, 0xa4, 0xbb, 0x40, 0x2d, 0x8d, 0x95, 0x01, 0x8b, 0x8a, - 0x7f, 0x17, 0xd0, 0x74, 0x54, 0xf9, 0xe2, 0x3b, 0x3d, 0xa2, 0x77, 0x95, 0xdf, 0xe2, 0xda, 0x80, - 0x5e, 0x9c, 0xf8, 0x27, 0x40, 0x7c, 0x0b, 0x67, 0x06, 0xed, 0x06, 0x18, 0x50, 0x57, 0x39, 0x6e, - 0x2a, 0xc2, 0x13, 0xfc, 0x8b, 0x80, 0xa6, 0x42, 0x1a, 0x17, 0xa7, 0x7b, 0x50, 0xea, 0x94, 0xe3, - 0xe2, 0xea, 0x20, 0x2e, 0x3c, 0x85, 0x3b, 0x90, 0x82, 0x8c, 0x6f, 0xc7, 0xa7, 0xc0, 0x55, 0x62, - 0xb8, 0xf0, 0xcf, 0x04, 0x34, 0xd3, 0x2e, 0x48, 0xf1, 0xfb, 0x7d, 0x84, 0xef, 0xa2, 0x8c, 0xc5, - 0xbb, 0x03, 0xfb, 0xf5, 0x3f, 0x8c, 0x9d, 0xdc, 0xbb, 0xd5, 0xfe, 0x99, 0x80, 0x70, 0xe7, 0x43, - 0x8c, 0x3f, 0xe8, 0x41, 0x2c, 0x56, 0x2e, 0x8b, 0xeb, 0x17, 0xf0, 0xe4, 0x49, 0x6d, 0x40, 0x52, - 0x1f, 0xe2, 0xf5, 0xf8, 0xa4, 0xba, 0x49, 0x81, 0xf0, 0xe9, 0xfc, 0x2b, 0xa0, 0xf9, 0x17, 0xea, - 0x0a, 0xbc, 0x39, 0x30, 0xbf, 0x4e, 0x31, 0x24, 0x6e, 0xfd, 0x3f, 0x10, 0x9e, 0xef, 0x3e, 0xe4, - 0xbb, 0x8b, 0xb3, 0x17, 0xce, 0x57, 0x61, 0x77, 0x6a, 0xf3, 0x6e, 0xcd, 0x7c, 0xf6, 0xf4, 0x2c, - 0x25, 0x3c, 0x3f, 0x4b, 0x09, 0x7f, 0x9f, 0xa5, 0x84, 0xef, 0xcf, 0x53, 0x43, 0xcf, 0xcf, 0x53, - 0x43, 0x7f, 0x9e, 0xa7, 0x86, 0xbe, 0x5c, 0x2b, 0x9a, 0x5e, 0xa9, 0xaa, 0xcb, 0x06, 0xad, 0x04, - 0xe1, 0x00, 0xa6, 0x19, 0xbb, 0xde, 0x16, 0x1d, 0x74, 0x81, 0x9e, 0x80, 0xff, 0xfb, 0xbc, 0xf7, - 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x27, 0x00, 0x97, 0xe7, 0xdc, 0x13, 0x00, 0x00, + // 1146 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x5f, 0x6f, 0xdb, 0x54, + 0x14, 0xaf, 0x9b, 0x36, 0x6b, 0x4e, 0xd9, 0x34, 0x5d, 0xca, 0x16, 0xdc, 0x2d, 0x2d, 0x46, 0xda, + 0xba, 0x69, 0xd8, 0x38, 0xac, 0x8c, 0x0a, 0x89, 0xa9, 0xe9, 0x68, 0x57, 0x86, 0xc6, 0x6a, 0x28, + 0x0f, 0x08, 0x29, 0xb2, 0x9d, 0x1b, 0xc7, 0x6a, 0xe3, 0x9b, 0xd9, 0x4e, 0xd6, 0xac, 0xf4, 0x85, + 0x2f, 0x00, 0x12, 0x2f, 0x88, 0x57, 0xa4, 0x21, 0xf1, 0xb0, 0xcf, 0x31, 0xa4, 0x3d, 0x4c, 0xe2, + 0x85, 0x27, 0x84, 0x5a, 0xbe, 0x06, 0x12, 0xf2, 0xbd, 0xd7, 0x8e, 0x9d, 0xd8, 0xcb, 0x1f, 0xfa, + 0xc0, 0xc3, 0xde, 0x72, 0x7d, 0xcf, 0xf9, 0x9d, 0xdf, 0xf9, 0x73, 0xcf, 0x39, 0x81, 0xb7, 0x0c, + 0xdd, 0xe8, 0xee, 0x13, 0x47, 0x79, 0x4c, 0x1c, 0x6c, 0x12, 0xc7, 0xb4, 0xb1, 0x6b, 0x61, 0xe5, + 0x61, 0x1b, 0xbb, 0x5d, 0xb9, 0xe5, 0x12, 0x9f, 0xa0, 0x22, 0x17, 0x91, 0x13, 0x22, 0x72, 0x47, + 0x15, 0x17, 0x2c, 0x62, 0x11, 0x2a, 0xa4, 0x04, 0xbf, 0x98, 0xbc, 0x78, 0xc9, 0x22, 0xc4, 0xda, + 0xc7, 0x8a, 0xde, 0xb2, 0x15, 0xdd, 0x71, 0x88, 0xaf, 0xfb, 0x36, 0x71, 0x3c, 0x7e, 0x7b, 0xdd, + 0x24, 0x5e, 0x93, 0x78, 0x8a, 0xa1, 0x7b, 0xdc, 0x8c, 0xd2, 0x51, 0x0d, 0xec, 0xeb, 0xaa, 0xd2, + 0xd2, 0x2d, 0xdb, 0xa1, 0xc2, 0x5c, 0xb6, 0x14, 0x92, 0x33, 0x7c, 0xd3, 0x6c, 0x60, 0x73, 0xaf, + 0x45, 0x6c, 0xc7, 0x57, 0xfc, 0x03, 0x7e, 0x7f, 0x2d, 0xfd, 0x3e, 0x71, 0xe2, 0xa2, 0x91, 0x9f, + 0xbd, 0x1b, 0xdb, 0xb1, 0xe2, 0x7e, 0x8a, 0x57, 0xd2, 0x45, 0x06, 0xa0, 0xa4, 0x50, 0x0e, 0xb7, + 0x88, 0xd9, 0x08, 0x44, 0x3a, 0x6a, 0xf4, 0xbb, 0x5f, 0x26, 0x19, 0xd6, 0x96, 0xee, 0xea, 0x4d, + 0xaf, 0x9f, 0x7d, 0x52, 0x26, 0x19, 0x65, 0x2a, 0x2a, 0x2d, 0x00, 0xda, 0x09, 0x98, 0x3e, 0xa0, + 0xfa, 0x1a, 0x7e, 0xd8, 0xc6, 0x9e, 0x2f, 0xed, 0xc2, 0xeb, 0x89, 0xaf, 0x5e, 0x8b, 0x38, 0x1e, + 0x46, 0x1f, 0x41, 0x9e, 0xd9, 0x29, 0x0a, 0xcb, 0xc2, 0xca, 0x7c, 0x79, 0x59, 0xce, 0x4a, 0xa0, + 0xcc, 0x34, 0x2b, 0x33, 0xcf, 0xfe, 0x5c, 0x9a, 0xd2, 0xb8, 0x96, 0xb4, 0xc5, 0x8d, 0xdd, 0xc5, + 0x7a, 0x0d, 0xbb, 0xdc, 0x18, 0x7a, 0x13, 0xe6, 0xcc, 0x86, 0x6e, 0x3b, 0x55, 0xbb, 0x46, 0x71, + 0x0b, 0xda, 0x19, 0x7a, 0xde, 0xae, 0xa1, 0x0b, 0x90, 0x6f, 0x60, 0xdb, 0x6a, 0xf8, 0xc5, 0xe9, + 0x65, 0x61, 0x65, 0x46, 0xe3, 0x27, 0xe9, 0x27, 0x81, 0x13, 0x0c, 0x91, 0x38, 0xc1, 0xdb, 0x81, + 0x7c, 0xf0, 0x85, 0x13, 0xbc, 0x9a, 0x4d, 0x70, 0xdb, 0xa9, 0xe1, 0x03, 0x5c, 0xe3, 0x00, 0x5c, + 0x0d, 0x55, 0xe0, 0xb5, 0x3a, 0x71, 0xf7, 0xaa, 0xec, 0xe8, 0x51, 0xb3, 0xf3, 0xe5, 0xa5, 0x6c, + 0x98, 0x4d, 0xe2, 0xee, 0x79, 0xda, 0x7c, 0xa0, 0xc4, 0xa0, 0x3c, 0xe9, 0x22, 0xbc, 0x41, 0xb9, + 0x6d, 0x04, 0x4e, 0x7c, 0x6a, 0x7b, 0x7e, 0x18, 0xd5, 0x55, 0xb8, 0xd0, 0x7f, 0xc1, 0x79, 0x2f, + 0x42, 0x21, 0x0c, 0x41, 0x10, 0xdb, 0xdc, 0x4a, 0x41, 0x9b, 0xe3, 0x31, 0xf0, 0xa4, 0x72, 0x1c, + 0x6f, 0xdb, 0xa9, 0x93, 0xe1, 0x81, 0x93, 0xbe, 0x8e, 0x9b, 0x62, 0x3a, 0xdc, 0x54, 0x05, 0x80, + 0x2b, 0x39, 0x75, 0xc2, 0xc3, 0xf4, 0x76, 0xb6, 0x7f, 0x3d, 0x00, 0xc6, 0x30, 0xf8, 0x29, 0x7d, + 0x01, 0x22, 0x45, 0xff, 0x38, 0x28, 0xcd, 0x01, 0x5a, 0x8b, 0x50, 0xa0, 0x35, 0x5b, 0x75, 0xda, + 0x4d, 0x6a, 0x60, 0x46, 0x9b, 0xa3, 0x1f, 0xee, 0xb7, 0x9b, 0x09, 0xce, 0xd3, 0x49, 0xce, 0x3a, + 0x2c, 0xa6, 0xa2, 0x9e, 0x22, 0xf1, 0x6f, 0xe0, 0x22, 0x35, 0x11, 0x04, 0x9f, 0xa7, 0x6b, 0x84, + 0x2a, 0xdc, 0x04, 0xe8, 0x35, 0x10, 0x5e, 0x12, 0x57, 0x64, 0xd6, 0x6d, 0xe4, 0xa0, 0xdb, 0xc8, + 0xec, 0xb1, 0xf3, 0x6e, 0x23, 0x3f, 0xd0, 0x2d, 0xcc, 0x61, 0xb5, 0x98, 0xa6, 0xf4, 0x44, 0x80, + 0xe2, 0xa0, 0x79, 0xee, 0xde, 0x3a, 0x9c, 0x09, 0x8b, 0x2e, 0x28, 0x80, 0x31, 0x6a, 0x37, 0xd4, + 0x43, 0x5b, 0x29, 0x3c, 0xaf, 0x0e, 0xe5, 0xc9, 0xec, 0x27, 0x88, 0x7e, 0x09, 0x97, 0x22, 0x9e, + 0x34, 0x1b, 0x7d, 0xb1, 0x9a, 0x34, 0xc3, 0x06, 0x5c, 0xce, 0xc0, 0x3d, 0xb5, 0x20, 0x48, 0x3b, + 0x50, 0xa2, 0x36, 0x36, 0x6d, 0x47, 0xdf, 0xb7, 0x1f, 0xe3, 0xda, 0x18, 0xcf, 0x06, 0x2d, 0xc0, + 0x6c, 0xcb, 0x25, 0x1d, 0x4c, 0x89, 0xcf, 0x69, 0xec, 0x20, 0xfd, 0x9c, 0x83, 0xa5, 0x4c, 0x4c, + 0xce, 0x7c, 0x17, 0x16, 0xea, 0xe1, 0x6d, 0x75, 0xb2, 0x3a, 0x45, 0xf5, 0x01, 0x78, 0xb4, 0x06, + 0xc0, 0x22, 0x4d, 0xc1, 0x58, 0x4a, 0xc5, 0x08, 0x2c, 0x1a, 0x0d, 0x1d, 0x55, 0xa6, 0xf1, 0xd4, + 0x58, 0x5e, 0xa8, 0xea, 0x7d, 0x38, 0xe7, 0xea, 0x8f, 0xaa, 0xbd, 0x21, 0x53, 0xcc, 0xf5, 0xf5, + 0xc4, 0xc4, 0x34, 0x0a, 0x30, 0x34, 0xfd, 0xd1, 0x46, 0xf4, 0x4d, 0x3b, 0xeb, 0xc6, 0x8f, 0x68, + 0x17, 0x90, 0xe1, 0x9b, 0x55, 0xaf, 0x6d, 0x34, 0x6d, 0xcf, 0xb3, 0x89, 0x53, 0xdd, 0xc3, 0xdd, + 0xe2, 0x4c, 0x1f, 0x66, 0x72, 0x42, 0x76, 0x54, 0xf9, 0xf3, 0x48, 0xfe, 0x1e, 0xee, 0x6a, 0xe7, + 0x0d, 0xdf, 0x4c, 0x7c, 0x41, 0x5b, 0x34, 0xe4, 0xa4, 0x5e, 0x9c, 0xa5, 0x48, 0xea, 0x4b, 0x46, + 0x4a, 0x20, 0x96, 0x92, 0x02, 0xa6, 0x2f, 0xf9, 0x70, 0x2d, 0x23, 0x49, 0xbb, 0x8e, 0x6f, 0xef, + 0xdf, 0xa5, 0x93, 0x63, 0xf2, 0x99, 0xd3, 0xab, 0x8d, 0x5c, 0xbc, 0x36, 0x9e, 0xe6, 0xe0, 0xfa, + 0x28, 0x66, 0x5f, 0x95, 0xc9, 0xff, 0xa3, 0x4c, 0xca, 0x4f, 0xce, 0xc2, 0x2c, 0x4d, 0x18, 0xfa, + 0x4e, 0x80, 0x3c, 0x5b, 0x53, 0xd0, 0x8d, 0x6c, 0xb8, 0xc1, 0xed, 0x48, 0x7c, 0x67, 0x44, 0x69, + 0x96, 0x73, 0x69, 0xe5, 0xdb, 0xdf, 0xff, 0xfe, 0x61, 0x5a, 0x42, 0xcb, 0x4a, 0xfa, 0x5a, 0xd6, + 0x51, 0xf9, 0xf6, 0x86, 0x9e, 0x0a, 0x90, 0x67, 0xfd, 0x6c, 0x28, 0xa3, 0xc4, 0x0a, 0x35, 0x94, + 0x51, 0x72, 0x4d, 0x92, 0xb6, 0x28, 0xa3, 0x75, 0x74, 0x3b, 0x9b, 0x51, 0xaf, 0x36, 0x95, 0xc3, + 0xf0, 0xa5, 0x1c, 0x29, 0xac, 0xc9, 0x2a, 0x87, 0xec, 0x49, 0x1c, 0xa1, 0x1f, 0x05, 0x28, 0x44, + 0xdb, 0x0c, 0x52, 0x86, 0xb0, 0xe8, 0x5f, 0x88, 0xc4, 0x77, 0x47, 0x57, 0x18, 0x3d, 0x96, 0x94, + 0xad, 0x87, 0x7e, 0x09, 0xa9, 0xd1, 0x2a, 0x1f, 0x89, 0x5a, 0x6c, 0x48, 0x8c, 0x46, 0x2d, 0x3e, + 0x01, 0xa4, 0x5b, 0x94, 0x9a, 0x8a, 0x94, 0x31, 0x83, 0x8a, 0x7e, 0x13, 0xe0, 0x5c, 0x72, 0xe7, + 0x41, 0x37, 0x87, 0x58, 0x4f, 0x5d, 0xbc, 0xc4, 0xd5, 0x31, 0xb5, 0x38, 0xf1, 0x4f, 0x28, 0xf1, + 0x3b, 0xa8, 0x32, 0x6e, 0x35, 0xd0, 0x26, 0xe2, 0x29, 0x87, 0xd1, 0x2e, 0x70, 0x84, 0x7e, 0x15, + 0x60, 0x3e, 0xb6, 0xdd, 0x20, 0x75, 0x08, 0xa5, 0xc1, 0x45, 0x4c, 0x2c, 0x8f, 0xa3, 0xc2, 0x5d, + 0xb8, 0x49, 0x5d, 0x90, 0xd1, 0x8d, 0x6c, 0x17, 0xf8, 0x7e, 0x10, 0x0f, 0xfc, 0x73, 0x01, 0xce, + 0xf7, 0xaf, 0x22, 0xe8, 0xfd, 0x11, 0xcc, 0xa7, 0xec, 0x44, 0xe2, 0xad, 0xb1, 0xf5, 0x46, 0x7f, + 0x8c, 0x83, 0xdc, 0xd3, 0x62, 0xff, 0x5c, 0x00, 0x34, 0xd8, 0xf7, 0xd0, 0x07, 0x43, 0x88, 0x65, + 0x2e, 0x4a, 0xe2, 0xda, 0x04, 0x9a, 0xdc, 0xa9, 0x75, 0xea, 0xd4, 0x87, 0x68, 0x2d, 0xdb, 0xa9, + 0xb4, 0x39, 0x18, 0xcf, 0xce, 0x3f, 0x02, 0x5c, 0x7e, 0xe9, 0x50, 0x45, 0x1b, 0x63, 0xf3, 0x1b, + 0xdc, 0x04, 0xc4, 0x3b, 0xff, 0x0d, 0x84, 0xfb, 0xbb, 0x43, 0xfd, 0xbd, 0x87, 0xb6, 0x27, 0xf6, + 0x57, 0x61, 0x3d, 0x35, 0xea, 0xad, 0x95, 0xcf, 0x9e, 0x1d, 0x97, 0x84, 0x17, 0xc7, 0x25, 0xe1, + 0xaf, 0xe3, 0x92, 0xf0, 0xfd, 0x49, 0x69, 0xea, 0xc5, 0x49, 0x69, 0xea, 0x8f, 0x93, 0xd2, 0xd4, + 0x57, 0xab, 0x96, 0xed, 0x37, 0xda, 0x86, 0x6c, 0x92, 0x66, 0x68, 0x8e, 0xc2, 0x44, 0xb6, 0x0f, + 0xfa, 0xac, 0xfb, 0xdd, 0x16, 0xf6, 0x8c, 0x3c, 0xfd, 0xc7, 0xff, 0xde, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x12, 0x48, 0x53, 0xd4, 0x99, 0x11, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2097,47 +2030,9 @@ func (m *QueryFinalizedChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int _ = i var l int _ = l - if len(m.ProofEpochSubmitted) > 0 { - for iNdEx := len(m.ProofEpochSubmitted) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProofEpochSubmitted[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.ProofEpochSealed != nil { - { - size, err := m.ProofEpochSealed.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.ProofHeaderInEpoch != nil { - { - size, err := m.ProofHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.ProofTxInBlock != nil { + if m.Proof != nil { { - size, err := m.ProofTxInBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2263,47 +2158,9 @@ func (m *QueryFinalizedChainInfoUntilHeightResponse) MarshalToSizedBuffer(dAtA [ _ = i var l int _ = l - if len(m.ProofEpochSubmitted) > 0 { - for iNdEx := len(m.ProofEpochSubmitted) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProofEpochSubmitted[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.ProofEpochSealed != nil { + if m.Proof != nil { { - size, err := m.ProofEpochSealed.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.ProofHeaderInEpoch != nil { - { - size, err := m.ProofHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.ProofTxInBlock != nil { - { - size, err := m.ProofTxInBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2612,24 +2469,10 @@ func (m *QueryFinalizedChainInfoResponse) Size() (n int) { l = m.BtcSubmissionKey.Size() n += 1 + l + sovQuery(uint64(l)) } - if m.ProofTxInBlock != nil { - l = m.ProofTxInBlock.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.ProofHeaderInEpoch != nil { - l = m.ProofHeaderInEpoch.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.ProofEpochSealed != nil { - l = m.ProofEpochSealed.Size() + if m.Proof != nil { + l = m.Proof.Size() n += 1 + l + sovQuery(uint64(l)) } - if len(m.ProofEpochSubmitted) > 0 { - for _, e := range m.ProofEpochSubmitted { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } return n } @@ -2674,24 +2517,10 @@ func (m *QueryFinalizedChainInfoUntilHeightResponse) Size() (n int) { l = m.BtcSubmissionKey.Size() n += 1 + l + sovQuery(uint64(l)) } - if m.ProofTxInBlock != nil { - l = m.ProofTxInBlock.Size() + if m.Proof != nil { + l = m.Proof.Size() n += 1 + l + sovQuery(uint64(l)) } - if m.ProofHeaderInEpoch != nil { - l = m.ProofHeaderInEpoch.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.ProofEpochSealed != nil { - l = m.ProofEpochSealed.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if len(m.ProofEpochSubmitted) > 0 { - for _, e := range m.ProofEpochSubmitted { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } return n } @@ -4244,79 +4073,7 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofTxInBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofTxInBlock == nil { - m.ProofTxInBlock = &types3.TxProof{} - } - if err := m.ProofTxInBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofHeaderInEpoch", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofHeaderInEpoch == nil { - m.ProofHeaderInEpoch = &crypto.Proof{} - } - if err := m.ProofHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSealed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4343,44 +4100,10 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProofEpochSealed == nil { - m.ProofEpochSealed = &ProofEpochSealed{} - } - if err := m.ProofEpochSealed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSubmitted", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery + if m.Proof == nil { + m.Proof = &ProofFinalizedChainInfo{} } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProofEpochSubmitted = append(m.ProofEpochSubmitted, &types2.TransactionInfo{}) - if err := m.ProofEpochSubmitted[len(m.ProofEpochSubmitted)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4701,7 +4424,7 @@ func (m *QueryFinalizedChainInfoUntilHeightResponse) Unmarshal(dAtA []byte) erro iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofTxInBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4728,116 +4451,10 @@ func (m *QueryFinalizedChainInfoUntilHeightResponse) Unmarshal(dAtA []byte) erro if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProofTxInBlock == nil { - m.ProofTxInBlock = &types3.TxProof{} - } - if err := m.ProofTxInBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofHeaderInEpoch", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofHeaderInEpoch == nil { - m.ProofHeaderInEpoch = &crypto.Proof{} - } - if err := m.ProofHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSealed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProofEpochSealed == nil { - m.ProofEpochSealed = &ProofEpochSealed{} - } - if err := m.ProofEpochSealed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSubmitted", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Proof == nil { + m.Proof = &ProofFinalizedChainInfo{} } - m.ProofEpochSubmitted = append(m.ProofEpochSubmitted, &types2.TransactionInfo{}) - if err := m.ProofEpochSubmitted[len(m.ProofEpochSubmitted)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/x/zoneconcierge/types/zoneconcierge.go b/x/zoneconcierge/types/zoneconcierge.go index 7a50dfff6..be4071344 100644 --- a/x/zoneconcierge/types/zoneconcierge.go +++ b/x/zoneconcierge/types/zoneconcierge.go @@ -1,5 +1,10 @@ package types +import ( + "bytes" + "fmt" +) + func (p *ProofEpochSealed) ValidateBasic() error { if p.ValidatorSet == nil { return ErrInvalidProofEpochSealed.Wrap("ValidatorSet is nil") @@ -12,3 +17,77 @@ func (p *ProofEpochSealed) ValidateBasic() error { } return nil } + +func (ih *IndexedHeader) ValidateBasic() error { + if len(ih.ChainId) == 0 { + return fmt.Errorf("empty ChainID") + } else if len(ih.Hash) == 0 { + return fmt.Errorf("empty Hash") + } else if ih.BabylonHeader == nil { + return fmt.Errorf("nil BabylonHeader") + } else if len(ih.BabylonTxHash) == 0 { + return fmt.Errorf("empty BabylonTxHash") + } + return nil +} + +func (ih *IndexedHeader) Equal(ih2 *IndexedHeader) bool { + if ih.ValidateBasic() != nil || ih2.ValidateBasic() != nil { + return false + } + + if ih.ChainId != ih2.ChainId { + return false + } else if !bytes.Equal(ih.Hash, ih2.Hash) { + return false + } else if ih.Height != ih2.Height { + return false + } else if !bytes.Equal(ih.BabylonHeader.LastCommitHash, ih2.BabylonHeader.LastCommitHash) { + return false + } else if ih.BabylonEpoch != ih2.BabylonEpoch { + return false + } + return bytes.Equal(ih.BabylonTxHash, ih2.BabylonTxHash) +} + +func (ci *ChainInfo) Equal(ci2 *ChainInfo) bool { + if ci.ValidateBasic() != nil || ci2.ValidateBasic() != nil { + return false + } + + if ci.ChainId != ci2.ChainId { + return false + } + if !ci.LatestHeader.Equal(ci2.LatestHeader) { + return false + } + if len(ci.LatestForks.Headers) != len(ci2.LatestForks.Headers) { + return false + } + for i := 0; i < len(ci.LatestForks.Headers); i++ { + if !ci.LatestForks.Headers[i].Equal(ci2.LatestForks.Headers[i]) { + return false + } + } + return ci.TimestampedHeadersCount == ci2.TimestampedHeadersCount +} + +func (ci *ChainInfo) ValidateBasic() error { + if len(ci.ChainId) == 0 { + return ErrInvalidChainInfo.Wrap("ChainID is empty") + } else if ci.LatestHeader == nil { + return ErrInvalidChainInfo.Wrap("LatestHeader is nil") + } else if ci.LatestForks == nil { + return ErrInvalidChainInfo.Wrap("LatestForks is nil") + } + if err := ci.LatestHeader.ValidateBasic(); err != nil { + return err + } + for _, forkHeader := range ci.LatestForks.Headers { + if err := forkHeader.ValidateBasic(); err != nil { + return err + } + } + + return nil +} diff --git a/x/zoneconcierge/types/zoneconcierge.pb.go b/x/zoneconcierge/types/zoneconcierge.pb.go index af6967140..bfb3066fc 100644 --- a/x/zoneconcierge/types/zoneconcierge.pb.go +++ b/x/zoneconcierge/types/zoneconcierge.pb.go @@ -5,6 +5,7 @@ package types import ( fmt "fmt" + types2 "github.com/babylonchain/babylon/x/btccheckpoint/types" types1 "github.com/babylonchain/babylon/x/checkpointing/types" proto "github.com/gogo/protobuf/proto" crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" @@ -322,11 +323,86 @@ func (m *ProofEpochSealed) GetProofEpochValSet() *crypto.ProofOps { return nil } +// ProofFinalizedChainInfo is a set of proofs that attest a chain info is BTC-finalised +type ProofFinalizedChainInfo struct { + // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block + ProofTxInBlock *types.TxProof `protobuf:"bytes,4,opt,name=proof_tx_in_block,json=proofTxInBlock,proto3" json:"proof_tx_in_block,omitempty"` + // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch + ProofHeaderInEpoch *crypto.Proof `protobuf:"bytes,5,opt,name=proof_header_in_epoch,json=proofHeaderInEpoch,proto3" json:"proof_header_in_epoch,omitempty"` + // proof_epoch_sealed is the proof that the epoch is sealed + ProofEpochSealed *ProofEpochSealed `protobuf:"bytes,6,opt,name=proof_epoch_sealed,json=proofEpochSealed,proto3" json:"proof_epoch_sealed,omitempty"` + // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger + // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission + ProofEpochSubmitted []*types2.TransactionInfo `protobuf:"bytes,7,rep,name=proof_epoch_submitted,json=proofEpochSubmitted,proto3" json:"proof_epoch_submitted,omitempty"` +} + +func (m *ProofFinalizedChainInfo) Reset() { *m = ProofFinalizedChainInfo{} } +func (m *ProofFinalizedChainInfo) String() string { return proto.CompactTextString(m) } +func (*ProofFinalizedChainInfo) ProtoMessage() {} +func (*ProofFinalizedChainInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c76d28ce8dde4532, []int{4} +} +func (m *ProofFinalizedChainInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofFinalizedChainInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofFinalizedChainInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofFinalizedChainInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofFinalizedChainInfo.Merge(m, src) +} +func (m *ProofFinalizedChainInfo) XXX_Size() int { + return m.Size() +} +func (m *ProofFinalizedChainInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ProofFinalizedChainInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofFinalizedChainInfo proto.InternalMessageInfo + +func (m *ProofFinalizedChainInfo) GetProofTxInBlock() *types.TxProof { + if m != nil { + return m.ProofTxInBlock + } + return nil +} + +func (m *ProofFinalizedChainInfo) GetProofHeaderInEpoch() *crypto.Proof { + if m != nil { + return m.ProofHeaderInEpoch + } + return nil +} + +func (m *ProofFinalizedChainInfo) GetProofEpochSealed() *ProofEpochSealed { + if m != nil { + return m.ProofEpochSealed + } + return nil +} + +func (m *ProofFinalizedChainInfo) GetProofEpochSubmitted() []*types2.TransactionInfo { + if m != nil { + return m.ProofEpochSubmitted + } + return nil +} + func init() { proto.RegisterType((*IndexedHeader)(nil), "babylon.zoneconcierge.v1.IndexedHeader") proto.RegisterType((*Forks)(nil), "babylon.zoneconcierge.v1.Forks") proto.RegisterType((*ChainInfo)(nil), "babylon.zoneconcierge.v1.ChainInfo") proto.RegisterType((*ProofEpochSealed)(nil), "babylon.zoneconcierge.v1.ProofEpochSealed") + proto.RegisterType((*ProofFinalizedChainInfo)(nil), "babylon.zoneconcierge.v1.ProofFinalizedChainInfo") } func init() { @@ -334,43 +410,52 @@ func init() { } var fileDescriptor_c76d28ce8dde4532 = []byte{ - // 571 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, - 0x10, 0xcd, 0x36, 0x69, 0xfa, 0xeb, 0x26, 0xe9, 0x2f, 0x32, 0x12, 0xb8, 0x05, 0x4c, 0x94, 0x4a, - 0x25, 0x1c, 0xb0, 0x45, 0x10, 0x17, 0x2e, 0x88, 0x54, 0x45, 0x6d, 0x41, 0x2a, 0x72, 0x50, 0x91, - 0xb8, 0x58, 0x1b, 0x7b, 0x12, 0xaf, 0xe2, 0xec, 0x5a, 0xde, 0x6d, 0x94, 0xf0, 0x29, 0xf8, 0x58, - 0x1c, 0x7b, 0xe4, 0x88, 0x12, 0x3e, 0x02, 0x17, 0x6e, 0x28, 0xbb, 0xeb, 0xfc, 0x41, 0x14, 0xb8, - 0x58, 0x9e, 0x9d, 0x37, 0x6f, 0xdf, 0xbc, 0x19, 0x1b, 0x3f, 0xea, 0x91, 0xde, 0x34, 0xe1, 0xcc, - 0xfb, 0xc8, 0x19, 0x84, 0x9c, 0x85, 0x14, 0xb2, 0x01, 0x6c, 0x46, 0x6e, 0x9a, 0x71, 0xc9, 0x2d, - 0xdb, 0x40, 0xdd, 0xcd, 0xe4, 0xf8, 0xc9, 0xc1, 0x61, 0x4e, 0x12, 0xc6, 0x10, 0x0e, 0x53, 0x4e, - 0x99, 0xa4, 0x6c, 0xe0, 0xf5, 0x12, 0x11, 0x0c, 0x61, 0xaa, 0xcb, 0x0f, 0x8e, 0x7e, 0x0f, 0x5a, - 0x45, 0x06, 0x77, 0x4f, 0x02, 0x8b, 0x20, 0x1b, 0x51, 0x26, 0x3d, 0x39, 0x4d, 0x41, 0xe8, 0xa7, - 0xc9, 0xde, 0x5f, 0xcb, 0x86, 0xd9, 0x34, 0x95, 0xdc, 0x4b, 0x33, 0xce, 0xfb, 0x3a, 0xdd, 0xfc, - 0x86, 0x70, 0xed, 0x8c, 0x45, 0x30, 0x81, 0xe8, 0x14, 0x48, 0x04, 0x99, 0xb5, 0x8f, 0xff, 0x0b, - 0x63, 0x42, 0x59, 0x40, 0x23, 0x1b, 0x35, 0x50, 0x6b, 0xd7, 0xdf, 0x51, 0xf1, 0x59, 0x64, 0x59, - 0xb8, 0x14, 0x13, 0x11, 0xdb, 0x5b, 0x0d, 0xd4, 0xaa, 0xfa, 0xea, 0xdd, 0xba, 0x8d, 0xcb, 0x31, - 0xd0, 0x41, 0x2c, 0xed, 0x62, 0x03, 0xb5, 0x4a, 0xbe, 0x89, 0xac, 0x17, 0x78, 0xcf, 0xe8, 0x0f, - 0x62, 0x45, 0x6c, 0x97, 0x1a, 0xa8, 0x55, 0x69, 0xdb, 0xee, 0x4a, 0x90, 0xab, 0x85, 0xea, 0x8b, - 0xfd, 0x9a, 0xc1, 0x1b, 0x1d, 0x87, 0x38, 0x3f, 0x08, 0x20, 0xe5, 0x61, 0x6c, 0x6f, 0x2b, 0xfe, - 0xaa, 0x39, 0x3c, 0x59, 0x9c, 0x59, 0x47, 0xf8, 0xff, 0x1c, 0x24, 0x27, 0x81, 0x12, 0x57, 0x56, - 0xe2, 0xf2, 0xda, 0x77, 0x93, 0x53, 0x22, 0xe2, 0xe6, 0x39, 0xde, 0x7e, 0xc5, 0xb3, 0xa1, 0xb0, - 0x5e, 0xe2, 0x1d, 0x2d, 0x47, 0xd8, 0xc5, 0x46, 0xb1, 0x55, 0x69, 0x3f, 0x74, 0x6f, 0x9a, 0x92, - 0xbb, 0xe1, 0x8b, 0x9f, 0xd7, 0x35, 0xbf, 0x23, 0xbc, 0x7b, 0xac, 0x1c, 0x61, 0x7d, 0xfe, 0x27, - 0xbb, 0xde, 0xe0, 0x5a, 0x42, 0x24, 0x08, 0x99, 0x3b, 0xb0, 0xa5, 0x1c, 0xf8, 0xe7, 0x1b, 0xab, - 0xba, 0xda, 0xf8, 0xd1, 0xc1, 0x26, 0x0e, 0xfa, 0x8b, 0x4e, 0x94, 0xdd, 0x95, 0xf6, 0x83, 0x9b, - 0xc9, 0x54, 0xc3, 0x7e, 0x45, 0x17, 0xe9, 0xee, 0x9f, 0xe3, 0x7d, 0x49, 0x47, 0x20, 0x24, 0x19, - 0xa5, 0x10, 0x19, 0x59, 0x22, 0x08, 0xf9, 0x15, 0x93, 0x6a, 0x3e, 0x25, 0xff, 0xce, 0x1a, 0x40, - 0xdf, 0x2c, 0x8e, 0x17, 0xe9, 0xe6, 0x0f, 0x84, 0xeb, 0x6f, 0x17, 0x9b, 0xa3, 0x9c, 0xef, 0x02, - 0x49, 0x20, 0xb2, 0x7c, 0x5c, 0x1b, 0x93, 0x84, 0x46, 0x44, 0xf2, 0x2c, 0x10, 0x20, 0x6d, 0xa4, - 0x4c, 0x7d, 0xbc, 0x54, 0xb5, 0xb1, 0xbb, 0x0b, 0x55, 0x97, 0x39, 0xfc, 0x3d, 0x95, 0x71, 0x27, - 0x11, 0xaf, 0x61, 0xea, 0x57, 0x97, 0x1c, 0x5d, 0x90, 0xd6, 0x09, 0xae, 0xab, 0x0d, 0xd5, 0x63, - 0x0f, 0x28, 0xeb, 0x73, 0xe3, 0xdc, 0xdd, 0xf5, 0xdd, 0xd1, 0xcb, 0xec, 0x2a, 0x49, 0x17, 0xa9, - 0xf0, 0xf7, 0xd2, 0xa5, 0x38, 0x35, 0x98, 0x73, 0x7c, 0x6b, 0x9d, 0x66, 0x4c, 0x12, 0x25, 0xb0, - 0xf8, 0x77, 0xa6, 0xfa, 0x8a, 0xe9, 0x92, 0x24, 0x5d, 0x90, 0x9d, 0x8b, 0xcf, 0x33, 0x07, 0x5d, - 0xcf, 0x1c, 0xf4, 0x75, 0xe6, 0xa0, 0x4f, 0x73, 0xa7, 0x70, 0x3d, 0x77, 0x0a, 0x5f, 0xe6, 0x4e, - 0xe1, 0xc3, 0xb3, 0x01, 0x95, 0xf1, 0x55, 0xcf, 0x0d, 0xf9, 0xc8, 0x33, 0x3d, 0xab, 0xf1, 0xe7, - 0x81, 0x37, 0xf9, 0xe5, 0x47, 0xa1, 0x56, 0xbe, 0x57, 0x56, 0x5f, 0xdf, 0xd3, 0x9f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xd6, 0xfb, 0xc0, 0x15, 0x4e, 0x04, 0x00, 0x00, + // 719 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x51, 0x6f, 0xd3, 0x3c, + 0x14, 0x5d, 0xd6, 0x6e, 0xfb, 0xe6, 0xb6, 0xfb, 0x46, 0x26, 0x58, 0x36, 0xa0, 0x94, 0x4e, 0x1a, + 0x1d, 0x12, 0xa9, 0x56, 0xc4, 0x0b, 0x2f, 0x88, 0x8e, 0x4d, 0xeb, 0x86, 0x34, 0x94, 0x55, 0x03, + 0x21, 0xa1, 0xc8, 0x49, 0xdc, 0xc6, 0x6a, 0x6a, 0x87, 0xd8, 0xad, 0xd2, 0xfd, 0x0a, 0x7e, 0x16, + 0x8f, 0x7b, 0xe4, 0x11, 0x6d, 0xf0, 0x0f, 0x78, 0xe1, 0x0d, 0xc5, 0x76, 0xda, 0xb4, 0x6c, 0xc0, + 0x4b, 0x55, 0xe7, 0x9e, 0x7b, 0xee, 0xf1, 0xb9, 0xf7, 0x1a, 0xec, 0x38, 0xd0, 0x19, 0x05, 0x94, + 0xd4, 0xcf, 0x29, 0x41, 0x2e, 0x25, 0x2e, 0x46, 0x51, 0x17, 0x4d, 0x9f, 0xcc, 0x30, 0xa2, 0x9c, + 0xea, 0x86, 0x82, 0x9a, 0xd3, 0xc1, 0xe1, 0xee, 0xe6, 0x3d, 0x8e, 0x88, 0x87, 0xa2, 0x3e, 0x26, + 0xbc, 0xce, 0x47, 0x21, 0x62, 0xf2, 0x57, 0xe6, 0x6d, 0xde, 0xcf, 0x44, 0xdd, 0x68, 0x14, 0x72, + 0x5a, 0x0f, 0x23, 0x4a, 0x3b, 0x2a, 0x3c, 0x56, 0xe0, 0x70, 0xd7, 0xf5, 0x91, 0xdb, 0x0b, 0x69, + 0x82, 0x9c, 0x3a, 0x29, 0xe8, 0x56, 0x0a, 0x9d, 0x44, 0x30, 0xe9, 0xd6, 0x9d, 0x80, 0xd9, 0x3d, + 0x34, 0x52, 0xa0, 0x87, 0xd7, 0x83, 0x3e, 0x0e, 0x50, 0x94, 0x42, 0xb6, 0xaf, 0x87, 0xcc, 0xd6, + 0xab, 0x7e, 0xd3, 0x40, 0xa9, 0x45, 0x3c, 0x14, 0x23, 0xef, 0x10, 0x41, 0x0f, 0x45, 0xfa, 0x06, + 0xf8, 0xcf, 0xf5, 0x21, 0x26, 0x36, 0xf6, 0x0c, 0xad, 0xa2, 0xd5, 0x96, 0xad, 0x25, 0x71, 0x6e, + 0x79, 0xba, 0x0e, 0xf2, 0x3e, 0x64, 0xbe, 0x31, 0x5f, 0xd1, 0x6a, 0x45, 0x4b, 0xfc, 0xd7, 0xef, + 0x80, 0x45, 0x1f, 0xe1, 0xae, 0xcf, 0x8d, 0x5c, 0x45, 0xab, 0xe5, 0x2d, 0x75, 0xd2, 0x5f, 0x80, + 0x15, 0x25, 0xc1, 0xf6, 0x05, 0xb1, 0x91, 0xaf, 0x68, 0xb5, 0x42, 0xc3, 0x30, 0x27, 0x5e, 0x99, + 0xd2, 0x43, 0x59, 0xd8, 0x2a, 0x29, 0xbc, 0xd2, 0xb1, 0x05, 0xd2, 0x0f, 0x36, 0x0a, 0xa9, 0xeb, + 0x1b, 0x0b, 0x82, 0xbf, 0xa8, 0x3e, 0xee, 0x27, 0xdf, 0xf4, 0x6d, 0xf0, 0x7f, 0x0a, 0xe2, 0xb1, + 0x2d, 0xc4, 0x2d, 0x0a, 0x71, 0x69, 0x6e, 0x3b, 0x3e, 0x84, 0xcc, 0xaf, 0x1e, 0x81, 0x85, 0x03, + 0x1a, 0xf5, 0x98, 0xfe, 0x12, 0x2c, 0x49, 0x39, 0xcc, 0xc8, 0x55, 0x72, 0xb5, 0x42, 0xe3, 0x91, + 0x79, 0x53, 0xcf, 0xcd, 0x29, 0x5f, 0xac, 0x34, 0xaf, 0xfa, 0x43, 0x03, 0xcb, 0x7b, 0xc2, 0x11, + 0xd2, 0xa1, 0x7f, 0xb2, 0xeb, 0x35, 0x28, 0x05, 0x90, 0x23, 0xc6, 0x53, 0x07, 0xe6, 0x85, 0x03, + 0xff, 0x5c, 0xb1, 0x28, 0xb3, 0x95, 0x1f, 0x4d, 0xa0, 0xce, 0x76, 0x27, 0xb9, 0x89, 0xb0, 0xbb, + 0xd0, 0x78, 0x70, 0x33, 0x99, 0xb8, 0xb0, 0x55, 0x90, 0x49, 0xf2, 0xf6, 0xcf, 0xc1, 0x06, 0xc7, + 0x7d, 0xc4, 0x38, 0xec, 0x87, 0xc8, 0x53, 0xb2, 0x98, 0xed, 0xd2, 0x01, 0xe1, 0xa2, 0x3f, 0x79, + 0x6b, 0x3d, 0x03, 0x90, 0x95, 0xd9, 0x5e, 0x12, 0xae, 0xfe, 0xd4, 0xc0, 0xea, 0x9b, 0x64, 0xa8, + 0x85, 0xf3, 0xa7, 0x08, 0x06, 0xc8, 0xd3, 0x2d, 0x50, 0x1a, 0xc2, 0x00, 0x7b, 0x90, 0xd3, 0xc8, + 0x66, 0x88, 0x1b, 0x9a, 0x30, 0xf5, 0xc9, 0x58, 0xd5, 0xd4, 0xf8, 0x25, 0xaa, 0xce, 0x52, 0xf8, + 0x5b, 0xcc, 0xfd, 0x66, 0xc0, 0x8e, 0xd1, 0xc8, 0x2a, 0x8e, 0x39, 0x4e, 0x11, 0xd7, 0xf7, 0xc1, + 0xaa, 0x58, 0x1e, 0xd9, 0x76, 0x1b, 0x93, 0x0e, 0x55, 0xce, 0xdd, 0xcd, 0xce, 0x8e, 0xdc, 0x33, + 0x53, 0x48, 0x3a, 0x09, 0x99, 0xb5, 0x12, 0x8e, 0xc5, 0x89, 0xc6, 0x1c, 0x81, 0xb5, 0x2c, 0xcd, + 0x10, 0x06, 0x42, 0x60, 0xee, 0xef, 0x4c, 0xab, 0x13, 0xa6, 0x33, 0x18, 0x9c, 0x22, 0x5e, 0xfd, + 0x3e, 0x0f, 0xd6, 0x45, 0xf8, 0x00, 0x13, 0x18, 0xe0, 0x73, 0xe4, 0x4d, 0x06, 0xe0, 0x15, 0xb8, + 0x25, 0xeb, 0xf0, 0xd8, 0xc6, 0xc4, 0x76, 0x02, 0xea, 0xf6, 0xd4, 0xac, 0x6f, 0xfc, 0x3e, 0xeb, + 0xed, 0x58, 0xf0, 0x28, 0xb5, 0xed, 0xb8, 0x45, 0x9a, 0x49, 0x82, 0x7e, 0x0c, 0x6e, 0x4b, 0x16, + 0xd9, 0x93, 0x84, 0x69, 0x32, 0xf5, 0x33, 0x5b, 0x93, 0xd5, 0x6b, 0xe9, 0x22, 0x4d, 0x76, 0xaa, + 0xa5, 0xb6, 0xe2, 0x1d, 0xd0, 0xb3, 0x57, 0x67, 0xa2, 0x57, 0x62, 0x31, 0x0a, 0x8d, 0xc7, 0x37, + 0x0f, 0xcc, 0x6c, 0x77, 0xb3, 0x46, 0xa8, 0x7e, 0x7f, 0x48, 0x65, 0x2a, 0xe6, 0x81, 0xd3, 0xc7, + 0x9c, 0x23, 0xcf, 0x58, 0x12, 0x7d, 0xdf, 0x19, 0x93, 0x4f, 0xbf, 0x6d, 0xc3, 0x5d, 0xb3, 0x1d, + 0x41, 0xc2, 0xa0, 0xcb, 0x31, 0x15, 0xb6, 0x59, 0x6b, 0x19, 0xee, 0x94, 0xa5, 0x79, 0xf2, 0xf9, + 0xb2, 0xac, 0x5d, 0x5c, 0x96, 0xb5, 0xaf, 0x97, 0x65, 0xed, 0xd3, 0x55, 0x79, 0xee, 0xe2, 0xaa, + 0x3c, 0xf7, 0xe5, 0xaa, 0x3c, 0xf7, 0xfe, 0x59, 0x17, 0x73, 0x7f, 0xe0, 0x98, 0x2e, 0xed, 0xd7, + 0x55, 0x0d, 0xb1, 0x66, 0xe9, 0xa1, 0x1e, 0xcf, 0x3c, 0xef, 0xc2, 0x6e, 0x67, 0x51, 0xbc, 0x72, + 0x4f, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x91, 0x2a, 0x68, 0xc0, 0x04, 0x06, 0x00, 0x00, } func (m *IndexedHeader) Marshal() (dAtA []byte, err error) { @@ -596,6 +681,79 @@ func (m *ProofEpochSealed) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ProofFinalizedChainInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofFinalizedChainInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofFinalizedChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProofEpochSubmitted) > 0 { + for iNdEx := len(m.ProofEpochSubmitted) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ProofEpochSubmitted[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.ProofEpochSealed != nil { + { + size, err := m.ProofEpochSealed.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.ProofHeaderInEpoch != nil { + { + size, err := m.ProofHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ProofTxInBlock != nil { + { + size, err := m.ProofTxInBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} + func encodeVarintZoneconcierge(dAtA []byte, offset int, v uint64) int { offset -= sovZoneconcierge(v) base := offset @@ -700,6 +858,33 @@ func (m *ProofEpochSealed) Size() (n int) { return n } +func (m *ProofFinalizedChainInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProofTxInBlock != nil { + l = m.ProofTxInBlock.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + if m.ProofHeaderInEpoch != nil { + l = m.ProofHeaderInEpoch.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + if m.ProofEpochSealed != nil { + l = m.ProofEpochSealed.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + if len(m.ProofEpochSubmitted) > 0 { + for _, e := range m.ProofEpochSubmitted { + l = e.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + } + return n +} + func sovZoneconcierge(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1343,6 +1528,198 @@ func (m *ProofEpochSealed) Unmarshal(dAtA []byte) error { } return nil } +func (m *ProofFinalizedChainInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofFinalizedChainInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofFinalizedChainInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofTxInBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofTxInBlock == nil { + m.ProofTxInBlock = &types.TxProof{} + } + if err := m.ProofTxInBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeaderInEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofHeaderInEpoch == nil { + m.ProofHeaderInEpoch = &crypto.Proof{} + } + if err := m.ProofHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSealed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofEpochSealed == nil { + m.ProofEpochSealed = &ProofEpochSealed{} + } + if err := m.ProofEpochSealed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSubmitted", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofEpochSubmitted = append(m.ProofEpochSubmitted, &types2.TransactionInfo{}) + if err := m.ProofEpochSubmitted[len(m.ProofEpochSubmitted)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipZoneconcierge(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthZoneconcierge + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipZoneconcierge(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 From b6735b91176558bd809479b76a5eee50ab5539de Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Thu, 12 Jan 2023 08:03:31 +0100 Subject: [PATCH 14/37] Add integration test for zoneconcierge checkpointing (#269) * Add integration test checking info about opposing cz chain --- contrib/images/babylond-dlv/Dockerfile | 2 +- contrib/images/babylond-env/Dockerfile | 2 +- test/e2e/configurer/chain/chain.go | 4 +- test/e2e/configurer/chain/commands.go | 124 +++++++++++++++++++++++++ test/e2e/configurer/chain/node.go | 28 ++++++ test/e2e/configurer/chain/queries.go | 88 ++++++++++++++++++ test/e2e/containers/containers.go | 2 +- test/e2e/e2e_test.go | 35 +++++++ test/e2e/initialization/config.go | 39 +++++++- test/e2e/initialization/node.go | 6 +- testutil/datagen/btc_header_info.go | 27 ++++++ x/btccheckpoint/client/cli/tx.go | 45 +++++++++ x/btccheckpoint/types/types.go | 19 ++++ x/btclightclient/client/cli/tx.go | 1 + 14 files changed, 412 insertions(+), 10 deletions(-) diff --git a/contrib/images/babylond-dlv/Dockerfile b/contrib/images/babylond-dlv/Dockerfile index 4b85bfa5e..79f1cfaba 100644 --- a/contrib/images/babylond-dlv/Dockerfile +++ b/contrib/images/babylond-dlv/Dockerfile @@ -16,7 +16,7 @@ COPY --from=build /work/build/babylond /babylond/ COPY --from=build /go/bin/dlv /usr/local/bin WORKDIR /babylond -EXPOSE 26656 26657 2345 +EXPOSE 26656 26657 2345 1317 ENTRYPOINT ["/usr/bin/wrapper.sh"] CMD ["start", "--log_format", "plain"] STOPSIGNAL SIGTERM diff --git a/contrib/images/babylond-env/Dockerfile b/contrib/images/babylond-env/Dockerfile index c6774dd59..9fe29b87c 100644 --- a/contrib/images/babylond-env/Dockerfile +++ b/contrib/images/babylond-env/Dockerfile @@ -15,7 +15,7 @@ VOLUME /babylond COPY --from=build /work/build/babylond /babylond/ WORKDIR /babylond -EXPOSE 26656 26657 +EXPOSE 26656 26657 1317 ENTRYPOINT ["/usr/bin/wrapper.sh"] CMD ["start", "--log_format", "plain"] STOPSIGNAL SIGTERM diff --git a/test/e2e/configurer/chain/chain.go b/test/e2e/configurer/chain/chain.go index 8b792f597..fd0d5d437 100644 --- a/test/e2e/configurer/chain/chain.go +++ b/test/e2e/configurer/chain/chain.go @@ -151,7 +151,7 @@ func (c *Config) SendIBC(dstChain *Config, recipient string, token sdk.Coin) { // The default node is the first one created. Returns error if no // ndoes created. func (c *Config) GetDefaultNode() (*NodeConfig, error) { - return c.getNodeAtIndex(defaultNodeIndex) + return c.GetNodeAtIndex(defaultNodeIndex) } // GetPersistentPeers returns persistent peers from every node @@ -164,7 +164,7 @@ func (c *Config) GetPersistentPeers() []string { return peers } -func (c *Config) getNodeAtIndex(nodeIndex int) (*NodeConfig, error) { +func (c *Config) GetNodeAtIndex(nodeIndex int) (*NodeConfig, error) { if nodeIndex > len(c.NodeConfigs) { return nil, fmt.Errorf("node index (%d) is greter than the number of nodes available (%d)", nodeIndex, len(c.NodeConfigs)) } diff --git a/test/e2e/configurer/chain/commands.go b/test/e2e/configurer/chain/commands.go index 87e27f02c..ce6a385ac 100644 --- a/test/e2e/configurer/chain/commands.go +++ b/test/e2e/configurer/chain/commands.go @@ -1,9 +1,22 @@ package chain import ( + "encoding/hex" "encoding/json" "fmt" + btccheckpointtypes "github.com/babylonchain/babylon/x/btccheckpoint/types" + cttypes "github.com/babylonchain/babylon/x/checkpointing/types" + "github.com/cosmos/cosmos-sdk/types/bech32" + + txformat "github.com/babylonchain/babylon/btctxformatter" + bbn "github.com/babylonchain/babylon/types" + + "github.com/babylonchain/babylon/test/e2e/initialization" + "github.com/babylonchain/babylon/test/e2e/util" + "github.com/babylonchain/babylon/testutil/datagen" + blc "github.com/babylonchain/babylon/x/btclightclient/types" + "github.com/stretchr/testify/require" ) @@ -38,3 +51,114 @@ func (n *NodeConfig) BankSend(amount string, sendAddress string, receiveAddress require.NoError(n.t, err) n.LogActionF("successfully sent bank sent %s from address %s to %s", amount, sendAddress, receiveAddress) } + +func (n *NodeConfig) SendHeaderHex(headerHex string) { + n.LogActionF("btclightclient sending header %s", headerHex) + cmd := []string{"./babylond", "tx", "btclightclient", "insert-header", headerHex, "--from=val"} + _, _, err := n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) + require.NoError(n.t, err) + n.LogActionF("successfully inserted header %s", headerHex) +} + +func (n *NodeConfig) InsertNewEmptyBtcHeader() *blc.BTCHeaderInfo { + tip, err := n.QueryTip() + require.NoError(n.t, err) + n.t.Logf("Retrieved current tip of btc headerchain. Height: %d", tip.Height) + child := datagen.GenRandomValidBTCHeaderInfoWithParent(*tip) + n.SendHeaderHex(child.Header.MarshalHex()) + n.WaitUntilBtcHeight(tip.Height + 1) + return child +} + +func (n *NodeConfig) InsertHeader(h *bbn.BTCHeaderBytes) { + tip, err := n.QueryTip() + require.NoError(n.t, err) + n.t.Logf("Retrieved current tip of btc headerchain. Height: %d", tip.Height) + n.SendHeaderHex(h.MarshalHex()) + n.WaitUntilBtcHeight(tip.Height + 1) +} + +func (n *NodeConfig) InsertProofs(p1 *btccheckpointtypes.BTCSpvProof, p2 *btccheckpointtypes.BTCSpvProof) { + n.LogActionF("btccheckpoint sending proofs") + + p1bytes, err := util.Cdc.Marshal(p1) + require.NoError(n.t, err) + p2bytes, err := util.Cdc.Marshal(p2) + require.NoError(n.t, err) + + p1HexBytes := hex.EncodeToString(p1bytes) + p2HexBytes := hex.EncodeToString(p2bytes) + + cmd := []string{"./babylond", "tx", "btccheckpoint", "insert-proofs", p1HexBytes, p2HexBytes, "--from=val"} + _, _, err = n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) + require.NoError(n.t, err) + n.LogActionF("successfully inserted btc spv proofs") +} + +func (n *NodeConfig) FinalizeSealedEpochs(startingEpoch uint64, lastEpoch uint64) { + n.LogActionF("start finalizing epoch starting from %d", startingEpoch) + + madeProgress := false + currEpoch := startingEpoch + for { + if currEpoch > lastEpoch { + break + } + + checkpoint, err := n.QueryCheckpointForEpoch(currEpoch) + + require.NoError(n.t, err) + + // can only finalize sealed checkpoints + if checkpoint.Status != cttypes.Sealed { + return + } + + currentBtcTip, err := n.QueryTip() + + require.NoError(n.t, err) + + _, c, err := bech32.DecodeAndConvert(n.PublicAddress) + + require.NoError(n.t, err) + + btcCheckpoint, err := cttypes.FromRawCkptToBTCCkpt(checkpoint.Ckpt, c) + + require.NoError(n.t, err) + + p1, p2, err := txformat.EncodeCheckpointData( + txformat.BabylonTag(initialization.BabylonOpReturnTag), + txformat.CurrentVersion, + btcCheckpoint, + ) + + require.NoError(n.t, err) + + opReturn1 := datagen.CreateBlockWithTransaction(currentBtcTip.Header.ToBlockHeader(), p1) + + opReturn2 := datagen.CreateBlockWithTransaction(opReturn1.HeaderBytes.ToBlockHeader(), p2) + + n.InsertHeader(&opReturn1.HeaderBytes) + n.InsertHeader(&opReturn2.HeaderBytes) + n.InsertProofs(opReturn1.SpvProof, opReturn2.SpvProof) + + n.WaitForCondition(func() bool { + ckpt, err := n.QueryCheckpointForEpoch(currEpoch) + require.NoError(n.t, err) + return ckpt.Status == cttypes.Submitted + }, "Checkpoint should be submitted ") + + madeProgress = true + currEpoch++ + } + + if madeProgress { + // we made progress in above loop, which means the last header of btc chain is + // valid op return header, by finalizing it, we will also finalize all older + // checkpoints + + for i := 0; i < initialization.BabylonBtcFinalizationPeriod; i++ { + n.InsertNewEmptyBtcHeader() + } + } +} diff --git a/test/e2e/configurer/chain/node.go b/test/e2e/configurer/chain/node.go index a98e3989c..9f133091a 100644 --- a/test/e2e/configurer/chain/node.go +++ b/test/e2e/configurer/chain/node.go @@ -111,6 +111,34 @@ func (n *NodeConfig) WaitUntil(doneCondition func(syncInfo coretypes.SyncInfo) b n.t.Errorf("node %s timed out waiting for condition, latest block height was %d", n.Name, latestBlockHeight) } +func (n *NodeConfig) LatestBlockNumber() uint64 { + status, err := n.rpcClient.Status(context.Background()) + require.NoError(n.t, err) + return uint64(status.SyncInfo.LatestBlockHeight) +} + +func (n *NodeConfig) WaitForCondition(doneCondition func() bool, errormsg string) { + for i := 0; i < waitUntilrepeatMax; i++ { + if !doneCondition() { + time.Sleep(waitUntilRepeatPauseTime) + continue + } + return + } + n.t.Errorf("node %s timed out waiting for condition. Msg: %s", n.Name, errormsg) +} + +func (n *NodeConfig) WaitUntilBtcHeight(height uint64) { + var latestBlockHeight uint64 + n.WaitForCondition(func() bool { + btcTip, err := n.QueryTip() + require.NoError(n.t, err) + latestBlockHeight = btcTip.Height + + return latestBlockHeight >= height + }, fmt.Sprintf("Timed out waiting for btc height %d", height)) +} + func (n *NodeConfig) extractOperatorAddressIfValidator() error { if !n.IsValidator { n.t.Logf("node (%s) is not a validator, skipping", n.Name) diff --git a/test/e2e/configurer/chain/queries.go b/test/e2e/configurer/chain/queries.go index ab869a686..a30be66ef 100644 --- a/test/e2e/configurer/chain/queries.go +++ b/test/e2e/configurer/chain/queries.go @@ -16,6 +16,9 @@ import ( tmabcitypes "github.com/tendermint/tendermint/abci/types" "github.com/babylonchain/babylon/test/e2e/util" + blc "github.com/babylonchain/babylon/x/btclightclient/types" + ct "github.com/babylonchain/babylon/x/checkpointing/types" + zctypes "github.com/babylonchain/babylon/x/zoneconcierge/types" ) func (n *NodeConfig) QueryGRPCGateway(path string, parameters ...string) ([]byte, error) { @@ -129,3 +132,88 @@ func (n *NodeConfig) QueryListSnapshots() ([]*tmabcitypes.Snapshot, error) { return listSnapshots.Snapshots, nil } + +// func (n *NodeConfig) QueryContractsFromId(codeId int) ([]string, error) { +// path := fmt.Sprintf("/cosmwasm/wasm/v1/code/%d/contracts", codeId) +// bz, err := n.QueryGRPCGateway(path) + +// require.NoError(n.t, err) + +// var contractsResponse wasmtypes.QueryContractsByCodeResponse +// if err := util.Cdc.UnmarshalJSON(bz, &contractsResponse); err != nil { +// return nil, err +// } + +// return contractsResponse.Contracts, nil +// } + +func (n *NodeConfig) QueryCheckpointForEpoch(epoch uint64) (*ct.RawCheckpointWithMeta, error) { + path := fmt.Sprintf("babylon/checkpointing/v1/raw_checkpoint/%d", epoch) + bz, err := n.QueryGRPCGateway(path) + require.NoError(n.t, err) + + var checkpointingResponse ct.QueryRawCheckpointResponse + if err := util.Cdc.UnmarshalJSON(bz, &checkpointingResponse); err != nil { + return nil, err + } + + return checkpointingResponse.RawCheckpoint, nil +} + +func (n *NodeConfig) QueryBtcBaseHeader() (*blc.BTCHeaderInfo, error) { + bz, err := n.QueryGRPCGateway("babylon/btclightclient/v1/baseheader") + require.NoError(n.t, err) + + var blcResponse blc.QueryBaseHeaderResponse + if err := util.Cdc.UnmarshalJSON(bz, &blcResponse); err != nil { + return nil, err + } + + return blcResponse.Header, nil +} + +func (n *NodeConfig) QueryTip() (*blc.BTCHeaderInfo, error) { + bz, err := n.QueryGRPCGateway("babylon/btclightclient/v1/tip") + require.NoError(n.t, err) + + var blcResponse blc.QueryTipResponse + if err := util.Cdc.UnmarshalJSON(bz, &blcResponse); err != nil { + return nil, err + } + + return blcResponse.Header, nil +} + +func (n *NodeConfig) QueryFinalizedChainInfo(chainId string) (*zctypes.QueryFinalizedChainInfoResponse, error) { + finalizedPath := fmt.Sprintf("babylon/zoneconcierge/v1/finalized_chain_info/%s", chainId) + bz, err := n.QueryGRPCGateway(finalizedPath) + require.NoError(n.t, err) + + var finalizedResponse zctypes.QueryFinalizedChainInfoResponse + if err := util.Cdc.UnmarshalJSON(bz, &finalizedResponse); err != nil { + return nil, err + } + + return &finalizedResponse, nil +} + +func (n *NodeConfig) QueryCheckpointChains() (*[]string, error) { + bz, err := n.QueryGRPCGateway("babylon/zoneconcierge/v1/chains") + require.NoError(n.t, err) + var chainsResponse zctypes.QueryChainListResponse + if err := util.Cdc.UnmarshalJSON(bz, &chainsResponse); err != nil { + return nil, err + } + return &chainsResponse.ChainIds, nil +} + +func (n *NodeConfig) QueryCheckpointChainInfo(chainId string) (*zctypes.ChainInfo, error) { + infoPath := fmt.Sprintf("/babylon/zoneconcierge/v1/chain_info/%s", chainId) + bz, err := n.QueryGRPCGateway(infoPath) + require.NoError(n.t, err) + var infoResponse zctypes.QueryChainInfoResponse + if err := util.Cdc.UnmarshalJSON(bz, &infoResponse); err != nil { + return nil, err + } + return infoResponse.ChainInfo, nil +} diff --git a/test/e2e/containers/containers.go b/test/e2e/containers/containers.go index 3878d3ddd..e97fba0db 100644 --- a/test/e2e/containers/containers.go +++ b/test/e2e/containers/containers.go @@ -61,7 +61,7 @@ func (m *Manager) ExecTxCmd(t *testing.T, chainId string, containerName string, // namely adding flags `--chain-id={chain-id} -b=block --yes --keyring-backend=test "--log_format=json"`, // and searching for `successStr` func (m *Manager) ExecTxCmdWithSuccessString(t *testing.T, chainId string, containerName string, command []string, successStr string) (bytes.Buffer, bytes.Buffer, error) { - allTxArgs := []string{fmt.Sprintf("--chain-id=%s", chainId), "-b=block", "--yes", "--keyring-backend=test", "--log_format=json"} + allTxArgs := []string{fmt.Sprintf("--chain-id=%s", chainId), "-b=block", "--yes", "--keyring-backend=test", "--log_format=json", "--home=/babylondata"} txCommand := append(command, allTxArgs...) return m.ExecCmd(t, containerName, txCommand, successStr) } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index d9473d884..7f9adb385 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -3,6 +3,11 @@ package e2e +import ( + "github.com/babylonchain/babylon/test/e2e/initialization" + ct "github.com/babylonchain/babylon/x/checkpointing/types" +) + // Most simple test, just checking that two chains are up and connected through // ibc func (s *IntegrationTestSuite) TestConnectIbc() { @@ -13,3 +18,33 @@ func (s *IntegrationTestSuite) TestConnectIbc() { _, err = chainB.GetDefaultNode() s.NoError(err) } + +func (s *IntegrationTestSuite) TestIbcCheckpointing() { + chainA := s.configurer.GetChainConfig(0) + + chainA.WaitUntilHeight(25) + + nonValidatorNode, err := chainA.GetNodeAtIndex(2) + s.NoError(err) + + // Finalize epoch 1 and 2, as first headers of opposing chain are in epoch 2 + nonValidatorNode.FinalizeSealedEpochs(1, 2) + + epoch2, err := nonValidatorNode.QueryCheckpointForEpoch(2) + s.NoError(err) + + if epoch2.Status != ct.Finalized { + s.FailNow("Epoch 2 should be finalized") + } + + // Check we have finalized epoch info for opposing chain and some basic assertions + fininfo, err := nonValidatorNode.QueryFinalizedChainInfo(initialization.ChainBID) + s.NoError(err) + // TODO Add more assertion here. Maybe check proofs ? + s.Equal(fininfo.FinalizedChainInfo.ChainId, initialization.ChainBID) + s.Equal(fininfo.EpochInfo.EpochNumber, uint64(2)) + + chainB := s.configurer.GetChainConfig(1) + _, err = chainB.GetDefaultNode() + s.NoError(err) +} diff --git a/test/e2e/initialization/config.go b/test/e2e/initialization/config.go index 7ed392896..77b69f11d 100644 --- a/test/e2e/initialization/config.go +++ b/test/e2e/initialization/config.go @@ -7,6 +7,9 @@ import ( "time" "github.com/babylonchain/babylon/privval" + bbn "github.com/babylonchain/babylon/types" + btccheckpointtypes "github.com/babylonchain/babylon/x/btccheckpoint/types" + blctypes "github.com/babylonchain/babylon/x/btclightclient/types" checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" ed25519 "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" @@ -40,9 +43,12 @@ type NodeConfig struct { const ( // common - BabylonDenom = "ubbn" - MinGasPrice = "0.000" - ValidatorWalletName = "val" + BabylonDenom = "ubbn" + MinGasPrice = "0.000" + ValidatorWalletName = "val" + BabylonOpReturnTag = "bbni" + BabylonBtcConfirmationPeriod = 2 + BabylonBtcFinalizationPeriod = 4 // chainA ChainAID = "bbn-test-a" BabylonBalanceA = 200000000000 @@ -226,6 +232,16 @@ func initGenesis(chain *internalChain, votingPeriod, expeditedVotingPeriod time. return err } + err = updateModuleGenesis(appGenState, blctypes.ModuleName, blctypes.DefaultGenesis(), updateBtcLightClientGenesis) + if err != nil { + return err + } + + err = updateModuleGenesis(appGenState, btccheckpointtypes.ModuleName, btccheckpointtypes.DefaultGenesis(), updateBtccheckpointGenesis) + if err != nil { + return err + } + bz, err := json.MarshalIndent(appGenState, "", " ") if err != nil { return err @@ -278,6 +294,23 @@ func updateCrisisGenesis(crisisGenState *crisistypes.GenesisState) { crisisGenState.ConstantFee.Denom = BabylonDenom } +func updateBtcLightClientGenesis(blcGenState *blctypes.GenesisState) { + blcGenState.Params = blctypes.DefaultParams() + btcSimnetGenesisHex := "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a45068653ffff7f2002000000" + baseBtcHeader, err := bbn.NewBTCHeaderBytesFromHex(btcSimnetGenesisHex) + if err != nil { + panic(err) + } + work := blctypes.CalcWork(&baseBtcHeader) + blcGenState.BaseBtcHeader = *blctypes.NewBTCHeaderInfo(&baseBtcHeader, baseBtcHeader.Hash(), 0, &work) +} + +func updateBtccheckpointGenesis(btccheckpointGenState *btccheckpointtypes.GenesisState) { + btccheckpointGenState.Params = btccheckpointtypes.DefaultParams() + btccheckpointGenState.Params.BtcConfirmationDepth = BabylonBtcConfirmationPeriod + btccheckpointGenState.Params.CheckpointFinalizationTimeout = BabylonBtcFinalizationPeriod +} + func updateGenUtilGenesis(c *internalChain) func(*genutiltypes.GenesisState) { return func(genUtilGenState *genutiltypes.GenesisState) { // generate genesis txs diff --git a/test/e2e/initialization/node.go b/test/e2e/initialization/node.go index 617f81a71..2d990ed9c 100644 --- a/test/e2e/initialization/node.go +++ b/test/e2e/initialization/node.go @@ -9,9 +9,8 @@ import ( "strings" "github.com/babylonchain/babylon/crypto/bls12381" - tmed25519 "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/babylonchain/babylon/privval" + bbn "github.com/babylonchain/babylon/types" sdkcrypto "github.com/cosmos/cosmos-sdk/crypto" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" "github.com/cosmos/cosmos-sdk/crypto/hd" @@ -28,6 +27,7 @@ import ( "github.com/cosmos/go-bip39" "github.com/spf13/viper" tmconfig "github.com/tendermint/tendermint/config" + tmed25519 "github.com/tendermint/tendermint/crypto/ed25519" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/p2p" tmtypes "github.com/tendermint/tendermint/types" @@ -129,6 +129,8 @@ func (n *internalNode) createAppConfig(nodeConfig *NodeConfig) { appConfig.StateSync.SnapshotInterval = nodeConfig.SnapshotInterval appConfig.StateSync.SnapshotKeepRecent = nodeConfig.SnapshotKeepRecent appConfig.SignerConfig.KeyName = ValidatorWalletName + appConfig.BtcConfig.Network = string(bbn.BtcSimnet) + appConfig.BtcConfig.CheckpointTag = BabylonOpReturnTag customTemplate := cmd.DefaultBabylonTemplate() diff --git a/testutil/datagen/btc_header_info.go b/testutil/datagen/btc_header_info.go index 137c25c37..2a705ca98 100644 --- a/testutil/datagen/btc_header_info.go +++ b/testutil/datagen/btc_header_info.go @@ -140,6 +140,33 @@ func GenRandomBTCHeaderInfoWithParent(parent *btclightclienttypes.BTCHeaderInfo) return GenRandomBTCHeaderInfoWithParentAndBits(parent, nil) } +// GenRandomValidBTCHeaderInfoWithParent generates random BTCHeaderInfo object +// with valid proof of work. +// WARNING: if parent is from network with a lot of work (mainnet) it may never finish +// use only with simnet headers +func GenRandomValidBTCHeaderInfoWithParent(parent btclightclienttypes.BTCHeaderInfo) *btclightclienttypes.BTCHeaderInfo { + randHeader := GenRandomBtcdHeader() + parentHeader := parent.Header.ToBlockHeader() + + randHeader.Version = parentHeader.Version + randHeader.PrevBlock = parentHeader.BlockHash() + randHeader.Bits = parentHeader.Bits + randHeader.Timestamp = parentHeader.Timestamp.Add(50 * time.Second) + SolveBlock(randHeader) + + headerBytes := bbn.NewBTCHeaderBytesFromBlockHeader(randHeader) + + accumulatedWork := btclightclienttypes.CalcWork(&headerBytes) + accumulatedWork = btclightclienttypes.CumulativeWork(accumulatedWork, *parent.Work) + + return &btclightclienttypes.BTCHeaderInfo{ + Header: &headerBytes, + Hash: headerBytes.Hash(), + Height: parent.Height + 1, + Work: &accumulatedWork, + } +} + func GenRandomBTCHeaderInfoWithBits(bits *sdk.Uint) *btclightclienttypes.BTCHeaderInfo { return GenRandomBTCHeaderInfoWithParentAndBits(nil, bits) } diff --git a/x/btccheckpoint/client/cli/tx.go b/x/btccheckpoint/client/cli/tx.go index c6358bc6b..752ae5679 100644 --- a/x/btccheckpoint/client/cli/tx.go +++ b/x/btccheckpoint/client/cli/tx.go @@ -7,6 +7,9 @@ import ( "github.com/spf13/cobra" "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + // "github.com/cosmos/cosmos-sdk/client/flags" "github.com/babylonchain/babylon/x/btccheckpoint/types" ) @@ -31,5 +34,47 @@ func GetTxCmd() *cobra.Command { RunE: client.ValidateCmd, } + cmd.AddCommand(CmdTxInsertSpvProofs()) + + return cmd +} + +// TODO this api is not super friendly i.e it is not easy to provide hex encoded +// proto serialized blobs. It would be good to have version which takes some +// other format like json or maybe path to file +func CmdTxInsertSpvProofs() *cobra.Command { + cmd := &cobra.Command{ + Use: "insert-proofs [proof-hex-string] [proof-hex-string]", + Short: "submit proof bytes", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + proof1, err := types.NewSpvProofFromHexBytes(clientCtx.Codec, args[0]) + + if err != nil { + return err + } + + proof2, err := types.NewSpvProofFromHexBytes(clientCtx.Codec, args[1]) + + if err != nil { + return err + } + + msg := &types.MsgInsertBTCSpvProof{ + Submitter: clientCtx.GetFromAddress().String(), + Proofs: []*types.BTCSpvProof{proof1, proof2}, + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + return cmd } diff --git a/x/btccheckpoint/types/types.go b/x/btccheckpoint/types/types.go index 75ce93c56..8c9137141 100644 --- a/x/btccheckpoint/types/types.go +++ b/x/btccheckpoint/types/types.go @@ -1,9 +1,11 @@ package types import ( + "encoding/hex" "fmt" "github.com/babylonchain/babylon/types" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -172,3 +174,20 @@ func (ti *TransactionInfo) ValidateBasic() error { } return nil } + +func NewSpvProofFromHexBytes(c codec.Codec, proof string) (*BTCSpvProof, error) { + bytes, err := hex.DecodeString(proof) + + if err != nil { + return nil, err + } + + var p BTCSpvProof + err = c.Unmarshal(bytes, &p) + + if err != nil { + return nil, err + } + + return &p, nil +} diff --git a/x/btclightclient/client/cli/tx.go b/x/btclightclient/client/cli/tx.go index 2cc8d0561..043ef24ba 100644 --- a/x/btclightclient/client/cli/tx.go +++ b/x/btclightclient/client/cli/tx.go @@ -2,6 +2,7 @@ package cli import ( "fmt" + "github.com/babylonchain/babylon/x/btclightclient/types" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" From 01d77ec9c91d583b4de0d882a9024830da9b147e Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Thu, 12 Jan 2023 09:59:03 +0100 Subject: [PATCH 15/37] Fix: Increase gas in e2e test (#270) Increase gas --- test/e2e/configurer/chain/commands.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/configurer/chain/commands.go b/test/e2e/configurer/chain/commands.go index ce6a385ac..c55d6f843 100644 --- a/test/e2e/configurer/chain/commands.go +++ b/test/e2e/configurer/chain/commands.go @@ -54,7 +54,7 @@ func (n *NodeConfig) BankSend(amount string, sendAddress string, receiveAddress func (n *NodeConfig) SendHeaderHex(headerHex string) { n.LogActionF("btclightclient sending header %s", headerHex) - cmd := []string{"./babylond", "tx", "btclightclient", "insert-header", headerHex, "--from=val"} + cmd := []string{"./babylond", "tx", "btclightclient", "insert-header", headerHex, "--from=val", "--gas=500000"} _, _, err := n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) require.NoError(n.t, err) n.LogActionF("successfully inserted header %s", headerHex) From 82d5277a452693b1728b4985b624f52dcfce5421 Mon Sep 17 00:00:00 2001 From: Vitalis Salis Date: Thu, 12 Jan 2023 15:57:26 +0300 Subject: [PATCH 16/37] fix: checkpointing: Do not make the `home` flag a required one and unmarshall PubKey (#271) --- app/app.go | 1 + x/checkpointing/client/cli/tx.go | 14 ++++++++++++-- x/checkpointing/client/cli/tx_test.go | 11 +++++------ x/checkpointing/types/msgs.go | 7 ++++++- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/app/app.go b/app/app.go index 1a7a80f4b..6336f3e19 100644 --- a/app/app.go +++ b/app/app.go @@ -247,6 +247,7 @@ type BabylonApp struct { } func init() { + // Note: If this changes, the home directory under x/checkpointing/client/cli/tx.go needs to change as well userHomeDir, err := os.UserHomeDir() if err != nil { panic(err) diff --git a/x/checkpointing/client/cli/tx.go b/x/checkpointing/client/cli/tx.go index d22211c75..d4986d8cb 100644 --- a/x/checkpointing/client/cli/tx.go +++ b/x/checkpointing/client/cli/tx.go @@ -2,6 +2,8 @@ package cli import ( "fmt" + "os" + "path/filepath" "strconv" "strings" @@ -105,9 +107,17 @@ before running the command (e.g., via babylond create-bls-key).`)) return tx.GenerateOrBroadcastTxWithFactory(clientCtx, txf, msg) } + // HACK: test cases need to setup the path where the priv validator BLS key is going to be set + // so we redefine the FlagHome here. Since we can't import `app` due to a cyclic dependency, + // we have to duplicate the definition here. + // If this changes, the `DefaultHomeDir` flag at `app/app.go` needs to change as well. + userHomeDir, err := os.UserHomeDir() + if err != nil { + panic(err) + } - cmd.Flags().String(flags.FlagHome, "", "The node home directory") - _ = cmd.MarkFlagRequired(flags.FlagHome) + defaultNodeHome := filepath.Join(userHomeDir, ".babylond") + cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The node home directory") return cmd } diff --git a/x/checkpointing/client/cli/tx_test.go b/x/checkpointing/client/cli/tx_test.go index 236634ff6..c7150d3cc 100644 --- a/x/checkpointing/client/cli/tx_test.go +++ b/x/checkpointing/client/cli/tx_test.go @@ -22,6 +22,11 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/staking/client/cli" + "github.com/babylonchain/babylon/app" + "github.com/babylonchain/babylon/app/params" + "github.com/babylonchain/babylon/privval" + testutilcli "github.com/babylonchain/babylon/testutil/cli" + checkpointcli "github.com/babylonchain/babylon/x/checkpointing/client/cli" abci "github.com/tendermint/tendermint/abci/types" tmconfig "github.com/tendermint/tendermint/config" tmbytes "github.com/tendermint/tendermint/libs/bytes" @@ -30,12 +35,6 @@ import ( rpcclientmock "github.com/tendermint/tendermint/rpc/client/mock" coretypes "github.com/tendermint/tendermint/rpc/core/types" tmtypes "github.com/tendermint/tendermint/types" - - "github.com/babylonchain/babylon/app" - "github.com/babylonchain/babylon/app/params" - "github.com/babylonchain/babylon/privval" - testutilcli "github.com/babylonchain/babylon/testutil/cli" - checkpointcli "github.com/babylonchain/babylon/x/checkpointing/client/cli" ) type mockTendermintRPC struct { diff --git a/x/checkpointing/types/msgs.go b/x/checkpointing/types/msgs.go index e5f551de0..aee1604ab 100644 --- a/x/checkpointing/types/msgs.go +++ b/x/checkpointing/types/msgs.go @@ -77,7 +77,12 @@ func (m *MsgWrappedCreateValidator) ValidateBasic() error { if err != nil { return err } - ok := m.VerifyPoP(m.MsgCreateValidator.Pubkey.GetCachedValue().(*ed255192.PubKey)) + var pubKey ed255192.PubKey + err = pubKey.Unmarshal(m.MsgCreateValidator.Pubkey.GetValue()) + if err != nil { + return err + } + ok := m.VerifyPoP(&pubKey) if !ok { return errors.New("the proof-of-possession is not valid") } From 889f70a07bfff33dfbe414407e1849df825ce717 Mon Sep 17 00:00:00 2001 From: Cirrus Gai Date: Mon, 16 Jan 2023 17:23:41 +0800 Subject: [PATCH 17/37] fix: Add HTTP URL for LastCheckpointWithStatusRequest (#277) --- proto/babylon/checkpointing/query.proto | 4 +- x/checkpointing/types/query.pb.go | 123 ++++++++++++------------ x/checkpointing/types/query.pb.gw.go | 107 +++++++++++++++++++++ 3 files changed, 172 insertions(+), 62 deletions(-) diff --git a/proto/babylon/checkpointing/query.proto b/proto/babylon/checkpointing/query.proto index c33dbd0f7..3accee792 100644 --- a/proto/babylon/checkpointing/query.proto +++ b/proto/babylon/checkpointing/query.proto @@ -38,7 +38,9 @@ service Query { } // LastCheckpointWithStatus queries the last checkpoint with a given status or a more matured status - rpc LastCheckpointWithStatus(QueryLastCheckpointWithStatusRequest) returns (QueryLastCheckpointWithStatusResponse); + rpc LastCheckpointWithStatus(QueryLastCheckpointWithStatusRequest) returns (QueryLastCheckpointWithStatusResponse) { + option (google.api.http).get = "/babylon/checkpointing/v1/last_raw_checkpoint/{status}"; + } // Parameters queries the parameters of the module. rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { diff --git a/x/checkpointing/types/query.pb.go b/x/checkpointing/types/query.pb.go index 06abf8cf7..5450cf6a9 100644 --- a/x/checkpointing/types/query.pb.go +++ b/x/checkpointing/types/query.pb.go @@ -739,67 +739,68 @@ func init() { func init() { proto.RegisterFile("babylon/checkpointing/query.proto", fileDescriptor_a0fdb8f0f85bb51e) } var fileDescriptor_a0fdb8f0f85bb51e = []byte{ - // 959 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0x38, 0x6d, 0x44, 0x9e, 0x69, 0x08, 0x43, 0x44, 0xcd, 0xb6, 0xb8, 0x61, 0x0b, 0x6d, - 0x54, 0x91, 0x5d, 0xd9, 0xf9, 0xa9, 0xd0, 0x06, 0xc9, 0x55, 0xe0, 0xd0, 0x12, 0xc2, 0x22, 0x0a, - 0xe2, 0x80, 0x35, 0xde, 0x8e, 0xec, 0x25, 0xf6, 0xce, 0xc6, 0x33, 0xeb, 0x60, 0x55, 0xbd, 0xc0, - 0x1f, 0x00, 0x12, 0x12, 0x17, 0xfe, 0x04, 0x4e, 0xdc, 0x38, 0xc3, 0xa5, 0x07, 0x84, 0x2a, 0x71, - 0xe1, 0x84, 0x50, 0xc2, 0xff, 0x01, 0xda, 0x99, 0xd9, 0xd8, 0x6b, 0x7b, 0xbb, 0x76, 0xea, 0xdb, - 0x7a, 0xf6, 0x7d, 0xef, 0x7d, 0xdf, 0x9b, 0xb7, 0xdf, 0x4b, 0xe0, 0x8d, 0x1a, 0xa9, 0x75, 0x9b, - 0xcc, 0xb7, 0xdd, 0x06, 0x75, 0x0f, 0x03, 0xe6, 0xf9, 0xc2, 0xf3, 0xeb, 0xf6, 0x51, 0x48, 0xdb, - 0x5d, 0x2b, 0x68, 0x33, 0xc1, 0x70, 0x41, 0x87, 0x58, 0x89, 0x10, 0xab, 0x53, 0x32, 0xae, 0x8f, - 0x06, 0xd7, 0x9a, 0xbc, 0x7a, 0x48, 0x35, 0xdc, 0xb8, 0xe5, 0x32, 0xde, 0x62, 0xdc, 0xae, 0x11, - 0x4e, 0x55, 0x5e, 0xbb, 0x53, 0xaa, 0x51, 0x41, 0x4a, 0x76, 0x40, 0xea, 0x9e, 0x4f, 0x84, 0xc7, - 0x7c, 0x1d, 0xbb, 0x54, 0x67, 0x75, 0x26, 0x1f, 0xed, 0xe8, 0x49, 0x9f, 0x5e, 0xad, 0x33, 0x56, - 0x6f, 0x52, 0x9b, 0x04, 0x9e, 0x4d, 0x7c, 0x9f, 0x09, 0x09, 0xe1, 0xfa, 0xad, 0x39, 0x9a, 0x44, - 0x40, 0xda, 0xa4, 0x15, 0xc7, 0xdc, 0x18, 0x1d, 0xd3, 0xfb, 0xa5, 0xe2, 0xcc, 0x9f, 0x10, 0xbc, - 0xfe, 0x51, 0x44, 0xd1, 0x21, 0xc7, 0x77, 0xcf, 0x5e, 0xde, 0xf7, 0xb8, 0x70, 0xe8, 0x51, 0x48, - 0xb9, 0xc0, 0x15, 0x98, 0xe3, 0x82, 0x88, 0x90, 0x17, 0xd0, 0x32, 0x5a, 0x59, 0x28, 0xdf, 0xb2, - 0xd2, 0xba, 0x63, 0xf5, 0x12, 0x7c, 0x2c, 0x11, 0x8e, 0x46, 0xe2, 0xf7, 0x00, 0x7a, 0xca, 0x0b, - 0xb9, 0x65, 0xb4, 0x92, 0x2f, 0xdf, 0xb0, 0x54, 0x9b, 0xac, 0xa8, 0x4d, 0x96, 0x6a, 0xbf, 0x6e, - 0x93, 0x75, 0x40, 0xea, 0x54, 0xd7, 0x77, 0xfa, 0x90, 0xe6, 0x6f, 0x08, 0x8a, 0x69, 0x6c, 0x79, - 0xc0, 0x7c, 0x4e, 0xf1, 0x67, 0xf0, 0x52, 0x9b, 0x1c, 0x57, 0x7b, 0xdc, 0x22, 0xde, 0xb3, 0x2b, - 0xf9, 0xb2, 0x9d, 0xce, 0x3b, 0x91, 0xed, 0x53, 0x4f, 0x34, 0x3e, 0xa0, 0x82, 0x38, 0x0b, 0xed, - 0xfe, 0x63, 0x8e, 0xdf, 0x1f, 0x21, 0xe2, 0x66, 0xa6, 0x08, 0x45, 0x2b, 0xa1, 0x62, 0x1b, 0x5e, - 0x1b, 0x16, 0x11, 0xb7, 0xfb, 0x0a, 0xcc, 0xd3, 0x80, 0xb9, 0x8d, 0xaa, 0x1f, 0xb6, 0x64, 0xc7, - 0x2f, 0x38, 0x2f, 0xc8, 0x83, 0xfd, 0xb0, 0x65, 0x0a, 0x30, 0x46, 0x21, 0xb5, 0xf4, 0x07, 0xb0, - 0x90, 0x94, 0x2e, 0xf1, 0xe7, 0x50, 0x7e, 0x29, 0xa1, 0xdc, 0xfc, 0x06, 0xc1, 0x55, 0x59, 0xb6, - 0xd2, 0xe4, 0x07, 0x61, 0xad, 0xe9, 0xb9, 0xf7, 0x68, 0xb7, 0x7f, 0x44, 0x9e, 0xc5, 0x79, 0x6a, - 0x77, 0xff, 0x47, 0x3c, 0xa9, 0xc3, 0x2c, 0xb4, 0xfe, 0x87, 0x70, 0xb9, 0x43, 0x9a, 0xde, 0x43, - 0x22, 0x58, 0xbb, 0x7a, 0xec, 0x89, 0x46, 0x55, 0x7f, 0x97, 0xf1, 0x08, 0xac, 0xa6, 0x37, 0xe2, - 0x41, 0x0c, 0x8c, 0x9a, 0x50, 0x69, 0xf2, 0x7b, 0xb4, 0xeb, 0x2c, 0x75, 0x86, 0x0f, 0xa7, 0x38, - 0x06, 0x9b, 0x70, 0x59, 0xea, 0xd9, 0x8b, 0x3a, 0xa5, 0x3f, 0x98, 0x71, 0x86, 0xe0, 0x0b, 0x28, - 0x0c, 0xe3, 0x74, 0x0b, 0xa6, 0xf0, 0xb1, 0x9a, 0x7b, 0x60, 0xaa, 0x21, 0xa3, 0x2e, 0xf5, 0x45, - 0x5f, 0x95, 0xbb, 0x2c, 0xec, 0xcd, 0xe9, 0x35, 0xc8, 0x2b, 0x8a, 0x6e, 0x74, 0xaa, 0x49, 0x82, - 0x3c, 0x92, 0x71, 0xe6, 0x0f, 0x39, 0xb8, 0xfe, 0xcc, 0x3c, 0x9a, 0xf2, 0x15, 0x98, 0x17, 0x5e, - 0x50, 0x95, 0xc8, 0x58, 0xab, 0xf0, 0x02, 0x19, 0x3f, 0x58, 0x25, 0x37, 0x58, 0x05, 0x1f, 0xc1, - 0x8b, 0x8a, 0xb6, 0x8e, 0x98, 0x95, 0x17, 0xbd, 0x9f, 0x2e, 0x7b, 0x0c, 0x4a, 0x56, 0xdf, 0xd9, - 0x9e, 0x2f, 0xda, 0x5d, 0x27, 0xcf, 0x7b, 0x27, 0xc6, 0x2e, 0x2c, 0x0e, 0x06, 0xe0, 0x45, 0x98, - 0x3d, 0xa4, 0x5d, 0x49, 0x7f, 0xde, 0x89, 0x1e, 0xf1, 0x12, 0x5c, 0xec, 0x90, 0x66, 0x48, 0x35, - 0x67, 0xf5, 0x63, 0x27, 0xb7, 0x8d, 0xcc, 0x2f, 0xe1, 0x4d, 0x49, 0xe2, 0x3e, 0xe1, 0x22, 0xf9, - 0xf1, 0x25, 0x87, 0x60, 0x1a, 0x77, 0x79, 0x0c, 0x6f, 0x65, 0xd4, 0xd2, 0xb7, 0xb0, 0x9f, 0xe2, - 0x1d, 0x37, 0xc7, 0xf4, 0x8e, 0x41, 0xcf, 0x58, 0x02, 0x2c, 0x0b, 0x1f, 0xc8, 0xa5, 0xa4, 0x25, - 0x99, 0x9f, 0xc0, 0x2b, 0x89, 0x53, 0x5d, 0x7c, 0x17, 0xe6, 0xd4, 0xf2, 0xd2, 0x45, 0x97, 0xd3, - 0x8b, 0x2a, 0x64, 0xe5, 0xc2, 0x93, 0xbf, 0xaf, 0xcd, 0x38, 0x1a, 0x55, 0xfe, 0x6f, 0x1e, 0x2e, - 0xca, 0xbc, 0xf8, 0x57, 0x04, 0x2f, 0x0f, 0xed, 0x06, 0xbc, 0x95, 0x35, 0x0e, 0x29, 0xbb, 0xcf, - 0xd8, 0x9e, 0x1c, 0xa8, 0x24, 0x99, 0x3b, 0x5f, 0xff, 0xf9, 0xef, 0xf7, 0xb9, 0x75, 0x5c, 0xb6, - 0x47, 0x2f, 0xe2, 0x4e, 0xc9, 0x1e, 0x58, 0x53, 0xf6, 0x23, 0x75, 0x67, 0x8f, 0xf1, 0x2f, 0x08, - 0x2e, 0x25, 0x32, 0xe3, 0xb5, 0x49, 0x78, 0xc4, 0xe4, 0xd7, 0x27, 0x03, 0x69, 0xe2, 0xb7, 0x25, - 0xf1, 0x4d, 0xbc, 0x3e, 0x2e, 0x71, 0xfb, 0xd1, 0x99, 0x55, 0x3d, 0x8e, 0xfa, 0xbf, 0x38, 0xe8, - 0xcf, 0x78, 0x33, 0x83, 0x48, 0xca, 0x5a, 0x31, 0xb6, 0x26, 0xc6, 0x69, 0x0d, 0x77, 0xa4, 0x86, - 0x2d, 0xbc, 0x91, 0xae, 0x21, 0xda, 0x0c, 0x81, 0x04, 0xcb, 0x05, 0x91, 0x10, 0xf1, 0x33, 0x82, - 0x7c, 0x9f, 0x37, 0xe0, 0x52, 0x06, 0x8f, 0x61, 0x03, 0x37, 0xca, 0x93, 0x40, 0x34, 0xeb, 0x77, - 0x24, 0xeb, 0x0d, 0xbc, 0x96, 0xce, 0x5a, 0x92, 0x4c, 0x90, 0xb5, 0xf5, 0x5f, 0x58, 0xbf, 0x23, - 0x78, 0x75, 0xb4, 0xab, 0xe1, 0xdb, 0xe7, 0x34, 0x43, 0xa5, 0xe4, 0xce, 0x73, 0x59, 0xa9, 0xb9, - 0x21, 0x45, 0xd9, 0x78, 0x35, 0x4b, 0xd4, 0x4e, 0xbf, 0x8d, 0xe3, 0x1f, 0x11, 0x14, 0xd2, 0x3c, - 0x0b, 0xef, 0x66, 0x50, 0xca, 0x30, 0x56, 0xe3, 0xdd, 0x73, 0xe3, 0xb5, 0x5f, 0x7d, 0x8b, 0x60, - 0x4e, 0x19, 0x11, 0x7e, 0x3b, 0x23, 0x57, 0xc2, 0xff, 0x8c, 0xd5, 0x31, 0xa3, 0x75, 0xf3, 0x56, - 0x64, 0xf3, 0x4c, 0xbc, 0x9c, 0xde, 0x3c, 0xe5, 0x80, 0x95, 0x0f, 0x9f, 0x9c, 0x14, 0xd1, 0xd3, - 0x93, 0x22, 0xfa, 0xe7, 0xa4, 0x88, 0xbe, 0x3b, 0x2d, 0xce, 0x3c, 0x3d, 0x2d, 0xce, 0xfc, 0x75, - 0x5a, 0x9c, 0xf9, 0x7c, 0xa3, 0xee, 0x89, 0x46, 0x58, 0xb3, 0x5c, 0xd6, 0x8a, 0xb3, 0xb8, 0x0d, - 0xe2, 0xf9, 0x67, 0x29, 0xbf, 0x1a, 0x48, 0x2a, 0xba, 0x01, 0xe5, 0xb5, 0x39, 0xf9, 0xef, 0xc1, - 0xda, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x92, 0xec, 0x6d, 0x2e, 0x0d, 0x00, 0x00, + // 974 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x5f, 0x6f, 0xdb, 0x54, + 0x14, 0xaf, 0xd3, 0xad, 0xa2, 0x27, 0xac, 0x94, 0x4b, 0xc5, 0x82, 0x37, 0xb2, 0xe2, 0xc1, 0x56, + 0x4d, 0xd4, 0x56, 0xd2, 0xbf, 0x2a, 0x5b, 0x91, 0x32, 0x15, 0x1e, 0x36, 0x4a, 0x31, 0x62, 0x20, + 0x1e, 0x88, 0x6e, 0xbc, 0xab, 0xc4, 0xd4, 0xf1, 0x75, 0x73, 0xaf, 0x53, 0xa2, 0x69, 0x2f, 0xf0, + 0x01, 0x40, 0x42, 0xe2, 0x4b, 0xf0, 0xc4, 0x1b, 0x6f, 0x48, 0xf0, 0xb2, 0x07, 0x84, 0x26, 0xf1, + 0x82, 0x84, 0x84, 0x50, 0xcb, 0x07, 0x41, 0xbe, 0xf7, 0xba, 0x89, 0x9d, 0x78, 0x4e, 0xba, 0xbc, + 0xd9, 0xc7, 0xe7, 0x77, 0xce, 0xef, 0x77, 0xee, 0xb9, 0xe7, 0x24, 0xf0, 0x46, 0x03, 0x37, 0x7a, + 0x1e, 0xf5, 0x2d, 0xa7, 0x45, 0x9c, 0xc3, 0x80, 0xba, 0x3e, 0x77, 0xfd, 0xa6, 0x75, 0x14, 0x92, + 0x4e, 0xcf, 0x0c, 0x3a, 0x94, 0x53, 0x54, 0x52, 0x2e, 0x66, 0xc2, 0xc5, 0xec, 0x56, 0xf4, 0xeb, + 0xa3, 0xc1, 0x0d, 0x8f, 0xd5, 0x0f, 0x89, 0x82, 0xeb, 0xb7, 0x1c, 0xca, 0xda, 0x94, 0x59, 0x0d, + 0xcc, 0x88, 0x8c, 0x6b, 0x75, 0x2b, 0x0d, 0xc2, 0x71, 0xc5, 0x0a, 0x70, 0xd3, 0xf5, 0x31, 0x77, + 0xa9, 0xaf, 0x7c, 0x97, 0x9a, 0xb4, 0x49, 0xc5, 0xa3, 0x15, 0x3d, 0x29, 0xeb, 0xd5, 0x26, 0xa5, + 0x4d, 0x8f, 0x58, 0x38, 0x70, 0x2d, 0xec, 0xfb, 0x94, 0x0b, 0x08, 0x53, 0x5f, 0x8d, 0xd1, 0x24, + 0x02, 0xdc, 0xc1, 0xed, 0xd8, 0xe7, 0xc6, 0x68, 0x9f, 0xfe, 0x9b, 0xf4, 0x33, 0x7e, 0xd4, 0xe0, + 0xf5, 0x8f, 0x22, 0x8a, 0x36, 0x3e, 0xbe, 0x7b, 0xf6, 0xf1, 0xbe, 0xcb, 0xb8, 0x4d, 0x8e, 0x42, + 0xc2, 0x38, 0xaa, 0xc1, 0x1c, 0xe3, 0x98, 0x87, 0xac, 0xa4, 0x2d, 0x6b, 0x2b, 0x0b, 0xd5, 0x5b, + 0x66, 0x56, 0x75, 0xcc, 0x7e, 0x80, 0x8f, 0x05, 0xc2, 0x56, 0x48, 0xf4, 0x1e, 0x40, 0x5f, 0x79, + 0xa9, 0xb0, 0xac, 0xad, 0x14, 0xab, 0x37, 0x4c, 0x59, 0x26, 0x33, 0x2a, 0x93, 0x29, 0xcb, 0xaf, + 0xca, 0x64, 0x1e, 0xe0, 0x26, 0x51, 0xf9, 0xed, 0x01, 0xa4, 0xf1, 0x9b, 0x06, 0xe5, 0x2c, 0xb6, + 0x2c, 0xa0, 0x3e, 0x23, 0xe8, 0x33, 0x78, 0xa9, 0x83, 0x8f, 0xeb, 0x7d, 0x6e, 0x11, 0xef, 0xd9, + 0x95, 0x62, 0xd5, 0xca, 0xe6, 0x9d, 0x88, 0xf6, 0xa9, 0xcb, 0x5b, 0x1f, 0x10, 0x8e, 0xed, 0x85, + 0xce, 0xa0, 0x99, 0xa1, 0xf7, 0x47, 0x88, 0xb8, 0x99, 0x2b, 0x42, 0xd2, 0x4a, 0xa8, 0xd8, 0x86, + 0xd7, 0x86, 0x45, 0xc4, 0xe5, 0xbe, 0x02, 0xf3, 0x24, 0xa0, 0x4e, 0xab, 0xee, 0x87, 0x6d, 0x51, + 0xf1, 0x0b, 0xf6, 0x0b, 0xc2, 0xb0, 0x1f, 0xb6, 0x0d, 0x0e, 0xfa, 0x28, 0xa4, 0x92, 0xfe, 0x00, + 0x16, 0x92, 0xd2, 0x05, 0xfe, 0x1c, 0xca, 0x2f, 0x25, 0x94, 0x1b, 0xdf, 0x68, 0x70, 0x55, 0xa4, + 0xad, 0x79, 0xec, 0x20, 0x6c, 0x78, 0xae, 0x73, 0x8f, 0xf4, 0x06, 0x5b, 0xe4, 0x59, 0x9c, 0xa7, + 0x76, 0xf6, 0x7f, 0xc4, 0x9d, 0x3a, 0xcc, 0x42, 0xe9, 0x7f, 0x08, 0x97, 0xbb, 0xd8, 0x73, 0x1f, + 0x62, 0x4e, 0x3b, 0xf5, 0x63, 0x97, 0xb7, 0xea, 0xea, 0x5e, 0xc6, 0x2d, 0xb0, 0x9a, 0x5d, 0x88, + 0x07, 0x31, 0x30, 0x2a, 0x42, 0xcd, 0x63, 0xf7, 0x48, 0xcf, 0x5e, 0xea, 0x0e, 0x1b, 0xa7, 0xd8, + 0x06, 0x9b, 0x70, 0x59, 0xe8, 0xd9, 0x8b, 0x2a, 0xa5, 0x2e, 0xcc, 0x38, 0x4d, 0xf0, 0x05, 0x94, + 0x86, 0x71, 0xaa, 0x04, 0x53, 0xb8, 0xac, 0xc6, 0x1e, 0x18, 0xb2, 0xc9, 0x88, 0x43, 0x7c, 0x3e, + 0x90, 0xe5, 0x2e, 0x0d, 0xfb, 0x7d, 0x7a, 0x0d, 0x8a, 0x92, 0xa2, 0x13, 0x59, 0x15, 0x49, 0x10, + 0x26, 0xe1, 0x67, 0xfc, 0x50, 0x80, 0xeb, 0xcf, 0x8c, 0xa3, 0x28, 0x5f, 0x81, 0x79, 0xee, 0x06, + 0x75, 0x81, 0x8c, 0xb5, 0x72, 0x37, 0x10, 0xfe, 0xe9, 0x2c, 0x85, 0x74, 0x16, 0x74, 0x04, 0x2f, + 0x4a, 0xda, 0xca, 0x63, 0x56, 0x1c, 0xf4, 0x7e, 0xb6, 0xec, 0x31, 0x28, 0x99, 0x03, 0xb6, 0x3d, + 0x9f, 0x77, 0x7a, 0x76, 0x91, 0xf5, 0x2d, 0xfa, 0x2e, 0x2c, 0xa6, 0x1d, 0xd0, 0x22, 0xcc, 0x1e, + 0x92, 0x9e, 0xa0, 0x3f, 0x6f, 0x47, 0x8f, 0x68, 0x09, 0x2e, 0x76, 0xb1, 0x17, 0x12, 0xc5, 0x59, + 0xbe, 0xec, 0x14, 0xb6, 0x35, 0xe3, 0x4b, 0x78, 0x53, 0x90, 0xb8, 0x8f, 0x19, 0x4f, 0x5e, 0xbe, + 0x64, 0x13, 0x4c, 0xe3, 0x2c, 0x8f, 0xe1, 0xad, 0x9c, 0x5c, 0xea, 0x14, 0xf6, 0x33, 0x66, 0xc7, + 0xcd, 0x31, 0x67, 0x47, 0x7a, 0x66, 0x2c, 0x01, 0x12, 0x89, 0x0f, 0xc4, 0x52, 0x52, 0x92, 0x8c, + 0x4f, 0xe0, 0x95, 0x84, 0x55, 0x25, 0xdf, 0x85, 0x39, 0xb9, 0xbc, 0x54, 0xd2, 0xe5, 0xec, 0xa4, + 0x12, 0x59, 0xbb, 0xf0, 0xe4, 0x9f, 0x6b, 0x33, 0xb6, 0x42, 0x55, 0x7f, 0x01, 0xb8, 0x28, 0xe2, + 0xa2, 0x5f, 0x35, 0x78, 0x79, 0x68, 0x37, 0xa0, 0xad, 0xbc, 0x76, 0xc8, 0xd8, 0x7d, 0xfa, 0xf6, + 0xe4, 0x40, 0x29, 0xc9, 0xd8, 0xf9, 0xfa, 0xcf, 0xff, 0xbe, 0x2f, 0xac, 0xa3, 0xaa, 0x35, 0x7a, + 0x11, 0x77, 0x2b, 0x56, 0x6a, 0x4d, 0x59, 0x8f, 0xe4, 0x99, 0x3d, 0x46, 0x3f, 0x6b, 0x70, 0x29, + 0x11, 0x19, 0xad, 0x4d, 0xc2, 0x23, 0x26, 0xbf, 0x3e, 0x19, 0x48, 0x11, 0xbf, 0x2d, 0x88, 0x6f, + 0xa2, 0xf5, 0x71, 0x89, 0x5b, 0x8f, 0xce, 0x46, 0xd5, 0xe3, 0xa8, 0xfe, 0x8b, 0xe9, 0xf9, 0x8c, + 0x36, 0x73, 0x88, 0x64, 0xac, 0x15, 0x7d, 0x6b, 0x62, 0x9c, 0xd2, 0x70, 0x47, 0x68, 0xd8, 0x42, + 0x1b, 0xd9, 0x1a, 0xa2, 0xcd, 0x10, 0x08, 0xb0, 0x58, 0x10, 0x09, 0x11, 0x3f, 0x69, 0x50, 0x1c, + 0x98, 0x0d, 0xa8, 0x92, 0xc3, 0x63, 0x78, 0x80, 0xeb, 0xd5, 0x49, 0x20, 0x8a, 0xf5, 0x3b, 0x82, + 0xf5, 0x06, 0x5a, 0xcb, 0x66, 0x2d, 0x48, 0x26, 0xc8, 0x5a, 0xea, 0x17, 0xd6, 0xef, 0x1a, 0xbc, + 0x3a, 0x7a, 0xaa, 0xa1, 0xdb, 0xe7, 0x1c, 0x86, 0x52, 0xc9, 0x9d, 0xe7, 0x1a, 0xa5, 0xc6, 0x86, + 0x10, 0x65, 0xa1, 0xd5, 0x3c, 0x51, 0x3b, 0x83, 0x63, 0x1c, 0xfd, 0xad, 0x41, 0x29, 0x6b, 0x66, + 0xa1, 0xdd, 0x1c, 0x4a, 0x39, 0x83, 0x55, 0x7f, 0xf7, 0xdc, 0x78, 0x25, 0x6a, 0x57, 0x88, 0xda, + 0x46, 0x9b, 0xd9, 0xa2, 0x3c, 0xcc, 0x78, 0x3d, 0x7d, 0x51, 0xe2, 0x0b, 0xfe, 0xad, 0x06, 0x73, + 0x72, 0x90, 0xa1, 0xb7, 0x73, 0xb8, 0x24, 0xe6, 0xa7, 0xbe, 0x3a, 0xa6, 0xb7, 0xe2, 0xb9, 0x22, + 0x78, 0x1a, 0x68, 0x39, 0x9b, 0xa7, 0x9c, 0xa0, 0xb5, 0x0f, 0x9f, 0x9c, 0x94, 0xb5, 0xa7, 0x27, + 0x65, 0xed, 0xdf, 0x93, 0xb2, 0xf6, 0xdd, 0x69, 0x79, 0xe6, 0xe9, 0x69, 0x79, 0xe6, 0xaf, 0xd3, + 0xf2, 0xcc, 0xe7, 0x1b, 0x4d, 0x97, 0xb7, 0xc2, 0x86, 0xe9, 0xd0, 0x76, 0x1c, 0xc5, 0x69, 0x61, + 0xd7, 0x3f, 0x0b, 0xf9, 0x55, 0x2a, 0x28, 0xef, 0x05, 0x84, 0x35, 0xe6, 0xc4, 0xdf, 0x8b, 0xb5, + 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x28, 0x98, 0x2f, 0x2c, 0x6e, 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/x/checkpointing/types/query.pb.gw.go b/x/checkpointing/types/query.pb.gw.go index 8d6963380..833886136 100644 --- a/x/checkpointing/types/query.pb.gw.go +++ b/x/checkpointing/types/query.pb.gw.go @@ -327,6 +327,66 @@ func local_request_Query_RecentEpochStatusCount_0(ctx context.Context, marshaler } +func request_Query_LastCheckpointWithStatus_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLastCheckpointWithStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + e int32 + ok bool + err error + _ = err + ) + + val, ok = pathParams["status"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "status") + } + + e, err = runtime.Enum(val, CheckpointStatus_value) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "status", err) + } + + protoReq.Status = CheckpointStatus(e) + + msg, err := client.LastCheckpointWithStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_LastCheckpointWithStatus_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLastCheckpointWithStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + e int32 + ok bool + err error + _ = err + ) + + val, ok = pathParams["status"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "status") + } + + e, err = runtime.Enum(val, CheckpointStatus_value) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "status", err) + } + + protoReq.Status = CheckpointStatus(e) + + msg, err := server.LastCheckpointWithStatus(ctx, &protoReq) + return msg, metadata, err + +} + func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryParamsRequest var metadata runtime.ServerMetadata @@ -466,6 +526,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_LastCheckpointWithStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_LastCheckpointWithStatus_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_LastCheckpointWithStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -630,6 +713,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_LastCheckpointWithStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_LastCheckpointWithStatus_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_LastCheckpointWithStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -664,6 +767,8 @@ var ( pattern_Query_RecentEpochStatusCount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "checkpointing", "v1", "epochs"}, "status_count", runtime.AssumeColonVerbOpt(false))) + pattern_Query_LastCheckpointWithStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "last_raw_checkpoint", "status"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "checkpointing", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) ) @@ -678,5 +783,7 @@ var ( forward_Query_RecentEpochStatusCount_0 = runtime.ForwardResponseMessage + forward_Query_LastCheckpointWithStatus_0 = runtime.ForwardResponseMessage + forward_Query_Params_0 = runtime.ForwardResponseMessage ) From 4be8fd2a10603740b86ad3eb39c854faccdb3e58 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Mon, 16 Jan 2023 22:03:27 +1100 Subject: [PATCH 18/37] epoching/checkpointing: fuzz test for validators with zero voting power (#276) --- x/checkpointing/keeper/msg_server_test.go | 104 ++++++++++++++++++++++ x/checkpointing/testckpt/helper.go | 62 ++++++++++++- x/epoching/keeper/epoch_msg_queue.go | 23 +++-- x/epoching/testepoching/helper.go | 33 ------- 4 files changed, 181 insertions(+), 41 deletions(-) diff --git a/x/checkpointing/keeper/msg_server_test.go b/x/checkpointing/keeper/msg_server_test.go index 09628e884..ac0ba6f95 100644 --- a/x/checkpointing/keeper/msg_server_test.go +++ b/x/checkpointing/keeper/msg_server_test.go @@ -4,6 +4,7 @@ import ( "math/rand" "testing" + "cosmossdk.io/math" "github.com/babylonchain/babylon/app" appparams "github.com/babylonchain/babylon/app/params" "github.com/babylonchain/babylon/crypto/bls12381" @@ -19,6 +20,82 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" ) +// FuzzWrappedCreateValidator_InsufficientTokens tests adding new validators with zero voting power +// It ensures that validators with zero voting power (i.e., with tokens fewer than sdk.DefaultPowerReduction) +// are unbonded, thus are not included in the validator set +func FuzzWrappedCreateValidator_InsufficientTokens(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 4) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + // a genesis validator is generate for setup + helper := testepoching.NewHelper(t) + ek := helper.EpochingKeeper + ck := helper.App.CheckpointingKeeper + msgServer := checkpointingkeeper.NewMsgServerImpl(ck) + + // BeginBlock of block 1, and thus entering epoch 1 + ctx := helper.BeginBlock() + epoch := ek.GetEpoch(ctx) + require.Equal(t, uint64(1), epoch.EpochNumber) + + n := rand.Intn(3) + 1 + addrs := app.AddTestAddrs(helper.App, helper.Ctx, n, sdk.NewInt(100000000)) + + // add n new validators with zero voting power via MsgWrappedCreateValidator + wcvMsgs := make([]*types.MsgWrappedCreateValidator, n) + for i := 0; i < n; i++ { + msg, err := buildMsgWrappedCreateValidatorWithAmount(addrs[i], sdk.DefaultPowerReduction.SubRaw(1)) + require.NoError(t, err) + wcvMsgs[i] = msg + _, err = msgServer.WrappedCreateValidator(ctx, msg) + require.NoError(t, err) + blsPK, err := ck.GetBlsPubKey(ctx, sdk.ValAddress(addrs[i])) + require.NoError(t, err) + require.True(t, msg.Key.Pubkey.Equal(blsPK)) + } + require.Len(t, ek.GetCurrentEpochMsgs(ctx), n) + + // EndBlock of block 1 + ctx = helper.EndBlock() + + // go to BeginBlock of block 11, and thus entering epoch 2 + for i := uint64(0); i < ek.GetParams(ctx).EpochInterval; i++ { + ctx = helper.GenAndApplyEmptyBlock() + } + epoch = ek.GetEpoch(ctx) + require.Equal(t, uint64(2), epoch.EpochNumber) + // ensure epoch 2 has initialised an empty msg queue + require.Empty(t, ek.GetCurrentEpochMsgs(ctx)) + + // ensure the length of current validator set equals to 1 + // since one genesis validator was added when setup + // the rest n validators have zero voting power and thus are ruled out + valSet = ck.GetValidatorSet(ctx, 2) + require.Equal(t, 1, len(valSet)) + + // ensure all validators (not just validators in the val set) have correct bond status + // - the 1st validator is bonded + // - all the rest are unbonded since they have zero voting power + iterator := helper.StakingKeeper.ValidatorsPowerStoreIterator(ctx) + defer iterator.Close() + count := 0 + for ; iterator.Valid(); iterator.Next() { + valAddr := sdk.ValAddress(iterator.Value()) + val, found := helper.StakingKeeper.GetValidator(ctx, valAddr) + require.True(t, found) + count++ + if count == 1 { + require.Equal(t, stakingtypes.Bonded, val.Status) + } else { + require.Equal(t, stakingtypes.Unbonded, val.Status) + } + } + require.Equal(t, len(wcvMsgs)+1, count) + }) +} + // FuzzWrappedCreateValidator tests adding new validators via // MsgWrappedCreateValidator, which first registers BLS pubkey // and then unwrapped into MsgCreateValidator and enqueued into @@ -113,3 +190,30 @@ func buildMsgWrappedCreateValidator(addr sdk.AccAddress) (*types.MsgWrappedCreat return types.NewMsgWrappedCreateValidator(createValidatorMsg, &blsPubKey, pop) } + +func buildMsgWrappedCreateValidatorWithAmount(addr sdk.AccAddress, bondTokens math.Int) (*types.MsgWrappedCreateValidator, error) { + tmValPrivkey := ed25519.GenPrivKey() + bondCoin := sdk.NewCoin(appparams.DefaultBondDenom, bondTokens) + description := stakingtypes.NewDescription("foo_moniker", "", "", "", "") + commission := stakingtypes.NewCommissionRates(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()) + + pk, err := codec.FromTmPubKeyInterface(tmValPrivkey.PubKey()) + if err != nil { + return nil, err + } + + createValidatorMsg, err := stakingtypes.NewMsgCreateValidator( + sdk.ValAddress(addr), pk, bondCoin, description, commission, sdk.OneInt(), + ) + if err != nil { + return nil, err + } + blsPrivKey := bls12381.GenPrivKey() + pop, err := privval.BuildPoP(tmValPrivkey, blsPrivKey) + if err != nil { + return nil, err + } + blsPubKey := blsPrivKey.PubKey() + + return types.NewMsgWrappedCreateValidator(createValidatorMsg, &blsPubKey, pop) +} diff --git a/x/checkpointing/testckpt/helper.go b/x/checkpointing/testckpt/helper.go index 0efb6281a..b30e68b24 100644 --- a/x/checkpointing/testckpt/helper.go +++ b/x/checkpointing/testckpt/helper.go @@ -1,16 +1,25 @@ package testckpt import ( + "testing" + + "cosmossdk.io/math" "github.com/babylonchain/babylon/app" + appparams "github.com/babylonchain/babylon/app/params" + "github.com/babylonchain/babylon/crypto/bls12381" "github.com/babylonchain/babylon/testutil/datagen" "github.com/babylonchain/babylon/x/checkpointing/keeper" "github.com/babylonchain/babylon/x/checkpointing/types" + "github.com/babylonchain/babylon/x/epoching" epochingkeeper "github.com/babylonchain/babylon/x/epoching/keeper" "github.com/cosmos/cosmos-sdk/baseapp" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/stretchr/testify/require" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "testing" ) // Helper is a structure which wraps the entire app and exposes functionalities for testing the epoching module @@ -22,6 +31,7 @@ type Helper struct { CheckpointingKeeper *keeper.Keeper MsgSrvr types.MsgServer QueryClient types.QueryClient + StakingKeeper *stakingkeeper.Keeper EpochingKeeper *epochingkeeper.Keeper GenAccs []authtypes.GenesisAccount @@ -35,6 +45,7 @@ func NewHelper(t *testing.T, n int) *Helper { checkpointingKeeper := app.CheckpointingKeeper epochingKeeper := app.EpochingKeeper + stakingKeeper := app.StakingKeeper querier := keeper.Querier{Keeper: checkpointingKeeper} queryHelper := baseapp.NewQueryServerTestHelper(ctx, app.InterfaceRegistry()) types.RegisterQueryServer(queryHelper, querier) @@ -48,7 +59,56 @@ func NewHelper(t *testing.T, n int) *Helper { CheckpointingKeeper: &checkpointingKeeper, MsgSrvr: msgSrvr, QueryClient: queryClient, + StakingKeeper: &stakingKeeper, EpochingKeeper: &epochingKeeper, GenAccs: accs, } } + +// CreateValidator calls handler to create a new staking validator +func (h *Helper) CreateValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, stakeAmount math.Int, ok bool) { + coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) + h.createValidator(addr, pk, blsPK, pop, coin, ok) +} + +// CreateValidatorWithValPower calls handler to create a new staking validator with zero commission +func (h *Helper) CreateValidatorWithValPower(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, valPower int64, ok bool) math.Int { + amount := h.StakingKeeper.TokensFromConsensusPower(h.Ctx, valPower) + coin := sdk.NewCoin(appparams.DefaultBondDenom, amount) + h.createValidator(addr, pk, blsPK, pop, coin, ok) + return amount +} + +// CreateValidatorMsg returns a message used to create validator in this service. +func (h *Helper) CreateValidatorMsg(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, stakeAmount math.Int) *types.MsgWrappedCreateValidator { + coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) + msg, err := stakingtypes.NewMsgCreateValidator(addr, pk, coin, stakingtypes.Description{}, ZeroCommission(), sdk.OneInt()) + require.NoError(h.t, err) + wmsg, err := types.NewMsgWrappedCreateValidator(msg, blsPK, pop) + require.NoError(h.t, err) + return wmsg +} + +func (h *Helper) createValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, coin sdk.Coin, ok bool) { + msg := h.CreateValidatorMsg(addr, pk, blsPK, pop, coin.Amount) + h.Handle(msg, ok) +} + +// Handle calls epoching handler on a given message +func (h *Helper) Handle(msg sdk.Msg, ok bool) *sdk.Result { + handler := epoching.NewHandler(*h.EpochingKeeper) + res, err := handler(h.Ctx, msg) + if ok { + require.NoError(h.t, err) + require.NotNil(h.t, res) + } else { + require.Error(h.t, err) + require.Nil(h.t, res) + } + return res +} + +// ZeroCommission constructs a commission rates with all zeros. +func ZeroCommission() stakingtypes.CommissionRates { + return stakingtypes.NewCommissionRates(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()) +} diff --git a/x/epoching/keeper/epoch_msg_queue.go b/x/epoching/keeper/epoch_msg_queue.go index 6e70cee6f..b370ba500 100644 --- a/x/epoching/keeper/epoch_msg_queue.go +++ b/x/epoching/keeper/epoch_msg_queue.go @@ -144,11 +144,12 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk return nil, err } // self-bonded to the created validator - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack - if err != nil { + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED); err != nil { + return nil, err + } + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED); err != nil { return nil, err } - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack case *types.QueuedMessage_MsgDelegate: delAddr, err := sdk.AccAddressFromBech32(unwrappedMsg.MsgDelegate.DelegatorAddress) if err != nil { @@ -159,8 +160,12 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk return nil, err } // created and bonded to the validator - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED); err != nil { + return nil, err + } + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED); err != nil { + return nil, err + } case *types.QueuedMessage_MsgUndelegate: delAddr, err := sdk.AccAddressFromBech32(unwrappedMsg.MsgUndelegate.DelegatorAddress) if err != nil { @@ -172,7 +177,9 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk } // unbonding from the validator // (in `ApplyMatureUnbonding`) AFTER mature, unbonded from the validator - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_UNBONDING) //nolint:errcheck // either we ignore the error here, or propoagate up the stack + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_UNBONDING); err != nil { + return nil, err + } case *types.QueuedMessage_MsgBeginRedelegate: delAddr, err := sdk.AccAddressFromBech32(unwrappedMsg.MsgBeginRedelegate.DelegatorAddress) if err != nil { @@ -184,7 +191,9 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk } // unbonding from the source validator // (in `ApplyMatureUnbonding`) AFTER mature, unbonded from the source validator, created/bonded to the destination validator - k.RecordNewDelegationState(ctx, delAddr, srcValAddr, types.BondState_UNBONDING) //nolint:errcheck // either we ignore the error here, or propoagate up the stack + if err := k.RecordNewDelegationState(ctx, delAddr, srcValAddr, types.BondState_UNBONDING); err != nil { + return nil, err + } default: panic(sdkerrors.Wrap(types.ErrInvalidQueuedMessageType, msg.String())) } diff --git a/x/epoching/testepoching/helper.go b/x/epoching/testepoching/helper.go index 72500f787..887e71c64 100644 --- a/x/epoching/testepoching/helper.go +++ b/x/epoching/testepoching/helper.go @@ -17,7 +17,6 @@ import ( "github.com/babylonchain/babylon/x/epoching/types" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" @@ -147,38 +146,6 @@ func (h *Helper) EndBlock() sdk.Context { return h.Ctx } -// CreateValidator calls handler to create a new staking validator -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) CreateValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, stakeAmount math.Int, ok bool) { - coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) - h.createValidator(addr, pk, coin, ok) -} - -// CreateValidatorWithValPower calls handler to create a new staking validator with zero commission -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) CreateValidatorWithValPower(addr sdk.ValAddress, pk cryptotypes.PubKey, valPower int64, ok bool) math.Int { - amount := h.StakingKeeper.TokensFromConsensusPower(h.Ctx, valPower) - coin := sdk.NewCoin(appparams.DefaultBondDenom, amount) - h.createValidator(addr, pk, coin, ok) - return amount -} - -// CreateValidatorMsg returns a message used to create validator in this service. -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) CreateValidatorMsg(addr sdk.ValAddress, pk cryptotypes.PubKey, stakeAmount math.Int) *stakingtypes.MsgCreateValidator { - coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) - msg, err := stakingtypes.NewMsgCreateValidator(addr, pk, coin, stakingtypes.Description{}, ZeroCommission(), sdk.OneInt()) - require.NoError(h.t, err) - return msg -} - -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) createValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, coin sdk.Coin, ok bool) { - msg, err := stakingtypes.NewMsgCreateValidator(addr, pk, coin, stakingtypes.Description{}, ZeroCommission(), sdk.OneInt()) - require.NoError(h.t, err) - h.Handle(msg, ok) -} - // WrappedDelegate calls handler to delegate stake for a validator func (h *Helper) WrappedDelegate(delegator sdk.AccAddress, val sdk.ValAddress, amount math.Int) *sdk.Result { coin := sdk.NewCoin(appparams.DefaultBondDenom, amount) From 5584675aab5340d38a519099603a831c7f70083e Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Mon, 16 Jan 2023 15:01:17 +0100 Subject: [PATCH 19/37] Bump btcd versions to fix 2 consensus issues (#281) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 5adcbccec..2c7954cc8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ go 1.19 module github.com/babylonchain/babylon require ( - github.com/btcsuite/btcd v0.22.1 + github.com/btcsuite/btcd v0.22.3 github.com/cosmos/cosmos-sdk v0.46.6 github.com/gogo/protobuf v1.3.3 github.com/golang/protobuf v1.5.2 diff --git a/go.sum b/go.sum index 4b479a7ca..5aec91ca3 100644 --- a/go.sum +++ b/go.sum @@ -158,8 +158,9 @@ github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BR github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs= -github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd v0.22.3 h1:kYNaWFvOw6xvqP0vR20RP1Zq1DVMBxEO8QN5d1/EfNg= +github.com/btcsuite/btcd v0.22.3/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= From a2b5d0e5a2954134b44d7bf53ea6e4e364ab0990 Mon Sep 17 00:00:00 2001 From: Cirrus Gai Date: Mon, 16 Jan 2023 23:44:04 +0800 Subject: [PATCH 20/37] fix: Fix pagination error of RawCheckpointList (#282) --- testutil/datagen/raw_checkpoint.go | 4 +- .../keeper/grpc_query_checkpoint.go | 2 + .../keeper/grpc_query_checkpoint_test.go | 59 +++++++++++++++++++ 3 files changed, 63 insertions(+), 2 deletions(-) diff --git a/testutil/datagen/raw_checkpoint.go b/testutil/datagen/raw_checkpoint.go index 12db94241..a798171c5 100644 --- a/testutil/datagen/raw_checkpoint.go +++ b/testutil/datagen/raw_checkpoint.go @@ -83,8 +83,8 @@ func GenRandomSequenceRawCheckpointsWithMeta() []*types.RawCheckpointWithMeta { } func GenSequenceRawCheckpointsWithMeta(tipEpoch uint64) []*types.RawCheckpointWithMeta { - ckpts := make([]*types.RawCheckpointWithMeta, int(tipEpoch)) - for e := uint64(0); e < tipEpoch; e++ { + ckpts := make([]*types.RawCheckpointWithMeta, int(tipEpoch)+1) + for e := uint64(0); e <= tipEpoch; e++ { ckpt := GenRandomRawCheckpointWithMeta() ckpt.Ckpt.EpochNum = e ckpts[int(e)] = ckpt diff --git a/x/checkpointing/keeper/grpc_query_checkpoint.go b/x/checkpointing/keeper/grpc_query_checkpoint.go index 685c63bb0..c2f6d44df 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint.go @@ -31,7 +31,9 @@ func (k Keeper) RawCheckpointList(ctx context.Context, req *types.QueryRawCheckp } if ckptWithMeta.Status == req.Status { checkpointList = append(checkpointList, ckptWithMeta) + return true, nil } + return false, nil } return true, nil }) diff --git a/x/checkpointing/keeper/grpc_query_checkpoint_test.go b/x/checkpointing/keeper/grpc_query_checkpoint_test.go index fb0e98267..2549144f7 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint_test.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint_test.go @@ -1,6 +1,9 @@ package keeper_test import ( + "context" + "github.com/babylonchain/babylon/x/checkpointing/keeper" + "github.com/cosmos/cosmos-sdk/types/query" "math/rand" "testing" @@ -125,3 +128,59 @@ func FuzzQueryLastCheckpointWithStatus(f *testing.F) { require.Equal(t, expectedResp, resp) }) } + +func FuzzQueryRawCheckpointList(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + tipEpoch := datagen.RandomInt(100) + 10 + ctrl := gomock.NewController(t) + defer ctrl.Finish() + ek := mocks.NewMockEpochingKeeper(ctrl) + ek.EXPECT().GetEpoch(gomock.Any()).Return(&epochingtypes.Epoch{EpochNumber: tipEpoch}).AnyTimes() + ckptKeeper, ctx, _ := testkeeper.CheckpointingKeeper(t, ek, nil, client.Context{}) + checkpoints := datagen.GenSequenceRawCheckpointsWithMeta(tipEpoch) + finalizedEpoch := datagen.RandomInt(int(tipEpoch)) + + // add Sealed and Finalized checkpoints + for e := uint64(0); e <= tipEpoch; e++ { + if e <= finalizedEpoch { + checkpoints[int(e)].Status = types.Finalized + } else { + checkpoints[int(e)].Status = types.Sealed + } + err := ckptKeeper.AddRawCheckpoint(ctx, checkpoints[int(e)]) + require.NoError(t, err) + } + + finalizedCheckpoints := checkpoints[:finalizedEpoch] + testRawCheckpointListWithType(t, ckptKeeper, ctx, finalizedCheckpoints, 0, types.Finalized) + sealedCheckpoints := checkpoints[finalizedEpoch+1:] + testRawCheckpointListWithType(t, ckptKeeper, ctx, sealedCheckpoints, finalizedEpoch+1, types.Sealed) + }) +} + +func testRawCheckpointListWithType( + t *testing.T, + ckptKeeper *keeper.Keeper, + ctx context.Context, + checkpointList []*types.RawCheckpointWithMeta, + baseEpoch uint64, + status types.CheckpointStatus, +) { + limit := datagen.RandomInt(len(checkpointList)+1) + 1 + pagination := &query.PageRequest{Limit: limit} + req := types.NewQueryRawCheckpointListRequest(pagination, status) + + for ckptsRetrieved := uint64(0); ckptsRetrieved < uint64(len(checkpointList)); ckptsRetrieved += limit { + resp, err := ckptKeeper.RawCheckpointList(ctx, req) + require.NoError(t, err) + for i, ckpt := range resp.RawCheckpoints { + require.Equal(t, baseEpoch+ckptsRetrieved+uint64(i), ckpt.Ckpt.EpochNum) + require.Equal(t, status, ckpt.Status) + } + pagination = &query.PageRequest{Key: resp.Pagination.NextKey, Limit: limit} + req = types.NewQueryRawCheckpointListRequest(pagination, status) + } +} From 2598409df9c7df16c6b773b493c861f329bbfbac Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Tue, 17 Jan 2023 10:20:48 +0100 Subject: [PATCH 21/37] Add simple monitor module (#274) * Add simple monitor module --- app/app.go | 36 +- proto/babylon/monitor/genesis.proto | 10 + proto/babylon/monitor/params.proto | 11 + proto/babylon/monitor/query.proto | 40 ++ test/e2e/configurer/chain/queries.go | 23 + test/e2e/e2e_test.go | 13 + x/btclightclient/keeper/keeper.go | 4 + x/monitor/client/cli/query.go | 25 + x/monitor/client/cli/query_params.go | 34 + x/monitor/client/cli/tx.go | 24 + x/monitor/genesis.go | 21 + x/monitor/genesis_test.go | 33 + x/monitor/handler.go | 23 + x/monitor/keeper/grpc_query.go | 28 + x/monitor/keeper/grpc_query_params.go | 19 + x/monitor/keeper/hooks.go | 27 + x/monitor/keeper/keeper.go | 81 +++ x/monitor/keeper/params.go | 17 + x/monitor/module.go | 177 +++++ x/monitor/module_simulation.go | 55 ++ x/monitor/simulation/simap.go | 15 + x/monitor/types/errors.go | 12 + x/monitor/types/expected_keepers.go | 23 + x/monitor/types/genesis.go | 18 + x/monitor/types/genesis.pb.go | 321 +++++++++ x/monitor/types/genesis_test.go | 31 + x/monitor/types/keys.go | 34 + x/monitor/types/params.go | 32 + x/monitor/types/params.pb.go | 286 +++++++++ x/monitor/types/params_test.go | 16 + x/monitor/types/query.pb.go | 892 ++++++++++++++++++++++++++ x/monitor/types/query.pb.gw.go | 254 ++++++++ 32 files changed, 2627 insertions(+), 8 deletions(-) create mode 100644 proto/babylon/monitor/genesis.proto create mode 100644 proto/babylon/monitor/params.proto create mode 100644 proto/babylon/monitor/query.proto create mode 100644 x/monitor/client/cli/query.go create mode 100644 x/monitor/client/cli/query_params.go create mode 100644 x/monitor/client/cli/tx.go create mode 100644 x/monitor/genesis.go create mode 100644 x/monitor/genesis_test.go create mode 100644 x/monitor/handler.go create mode 100644 x/monitor/keeper/grpc_query.go create mode 100644 x/monitor/keeper/grpc_query_params.go create mode 100644 x/monitor/keeper/hooks.go create mode 100644 x/monitor/keeper/keeper.go create mode 100644 x/monitor/keeper/params.go create mode 100644 x/monitor/module.go create mode 100644 x/monitor/module_simulation.go create mode 100644 x/monitor/simulation/simap.go create mode 100644 x/monitor/types/errors.go create mode 100644 x/monitor/types/expected_keepers.go create mode 100644 x/monitor/types/genesis.go create mode 100644 x/monitor/types/genesis.pb.go create mode 100644 x/monitor/types/genesis_test.go create mode 100644 x/monitor/types/keys.go create mode 100644 x/monitor/types/params.go create mode 100644 x/monitor/types/params.pb.go create mode 100644 x/monitor/types/params_test.go create mode 100644 x/monitor/types/query.pb.go create mode 100644 x/monitor/types/query.pb.gw.go diff --git a/app/app.go b/app/app.go index 6336f3e19..c964260b7 100644 --- a/app/app.go +++ b/app/app.go @@ -102,6 +102,9 @@ import ( "github.com/babylonchain/babylon/x/epoching" epochingkeeper "github.com/babylonchain/babylon/x/epoching/keeper" epochingtypes "github.com/babylonchain/babylon/x/epoching/types" + "github.com/babylonchain/babylon/x/monitor" + monitorkeeper "github.com/babylonchain/babylon/x/monitor/keeper" + monitortypes "github.com/babylonchain/babylon/x/monitor/types" storetypes "github.com/cosmos/cosmos-sdk/store/types" govclient "github.com/cosmos/cosmos-sdk/x/gov/client" govv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" @@ -162,6 +165,7 @@ var ( btclightclient.AppModuleBasic{}, btccheckpoint.AppModuleBasic{}, checkpointing.AppModuleBasic{}, + monitor.AppModuleBasic{}, // IBC-related ibc.AppModuleBasic{}, @@ -225,6 +229,7 @@ type BabylonApp struct { BTCLightClientKeeper btclightclientkeeper.Keeper BtcCheckpointKeeper btccheckpointkeeper.Keeper CheckpointingKeeper checkpointingkeeper.Keeper + MonitorKeeper monitorkeeper.Keeper // IBC-related modules IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly @@ -287,6 +292,7 @@ func NewBabylonApp( btclightclienttypes.StoreKey, btccheckpointtypes.StoreKey, checkpointingtypes.StoreKey, + monitortypes.StoreKey, // IBC-related modules ibchost.StoreKey, ibctransfertypes.StoreKey, @@ -467,14 +473,6 @@ func NewBabylonApp( // No more routes can be added app.IBCKeeper.SetRouter(ibcRouter) - // add msgServiceRouter so that the epoching module can forward unwrapped messages to the staking module - epochingKeeper.SetMsgServiceRouter(app.BaseApp.MsgServiceRouter()) - // make ZoneConcierge to subscribe to the epoching's hooks - epochingKeeper.SetHooks( - epochingtypes.NewMultiEpochingHooks(app.ZoneConciergeKeeper.Hooks()), - ) - app.EpochingKeeper = epochingKeeper - btclightclientKeeper := *btclightclientkeeper.NewKeeper( appCodec, keys[btclightclienttypes.StoreKey], @@ -483,6 +481,22 @@ func NewBabylonApp( btcConfig, ) + app.MonitorKeeper = monitorkeeper.NewKeeper( + appCodec, + keys[monitortypes.StoreKey], + keys[monitortypes.StoreKey], + app.GetSubspace(monitortypes.ModuleName), + &btclightclientKeeper, + ) + + // add msgServiceRouter so that the epoching module can forward unwrapped messages to the staking module + epochingKeeper.SetMsgServiceRouter(app.BaseApp.MsgServiceRouter()) + // make ZoneConcierge to subscribe to the epoching's hooks + epochingKeeper.SetHooks( + epochingtypes.NewMultiEpochingHooks(app.ZoneConciergeKeeper.Hooks(), app.MonitorKeeper.Hooks()), + ) + app.EpochingKeeper = epochingKeeper + checkpointingKeeper := checkpointingkeeper.NewKeeper( appCodec, @@ -559,6 +573,7 @@ func NewBabylonApp( btclightclient.NewAppModule(appCodec, app.BTCLightClientKeeper, app.AccountKeeper, app.BankKeeper), btccheckpoint.NewAppModule(appCodec, app.BtcCheckpointKeeper, app.AccountKeeper, app.BankKeeper), checkpointing.NewAppModule(appCodec, app.CheckpointingKeeper, app.AccountKeeper, app.BankKeeper), + monitor.NewAppModule(appCodec, app.MonitorKeeper, app.AccountKeeper, app.BankKeeper), // IBC-related modules ibc.NewAppModule(app.IBCKeeper), transferModule, @@ -581,6 +596,7 @@ func NewBabylonApp( btclightclienttypes.ModuleName, btccheckpointtypes.ModuleName, checkpointingtypes.ModuleName, + monitortypes.ModuleName, // IBC-related modules ibchost.ModuleName, ibctransfertypes.ModuleName, @@ -603,6 +619,7 @@ func NewBabylonApp( btclightclienttypes.ModuleName, btccheckpointtypes.ModuleName, checkpointingtypes.ModuleName, + monitortypes.ModuleName, // IBC-related modules ibchost.ModuleName, ibctransfertypes.ModuleName, @@ -627,6 +644,7 @@ func NewBabylonApp( btclightclienttypes.ModuleName, btccheckpointtypes.ModuleName, checkpointingtypes.ModuleName, + monitortypes.ModuleName, // IBC-related modules ibchost.ModuleName, ibctransfertypes.ModuleName, @@ -666,6 +684,7 @@ func NewBabylonApp( btclightclient.NewAppModule(appCodec, app.BTCLightClientKeeper, app.AccountKeeper, app.BankKeeper), btccheckpoint.NewAppModule(appCodec, app.BtcCheckpointKeeper, app.AccountKeeper, app.BankKeeper), checkpointing.NewAppModule(appCodec, app.CheckpointingKeeper, app.AccountKeeper, app.BankKeeper), + monitor.NewAppModule(appCodec, app.MonitorKeeper, app.AccountKeeper, app.BankKeeper), // IBC-related modules ibc.NewAppModule(app.IBCKeeper), transferModule, @@ -898,6 +917,7 @@ func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino paramsKeeper.Subspace(btclightclienttypes.ModuleName) paramsKeeper.Subspace(btccheckpointtypes.ModuleName) paramsKeeper.Subspace(checkpointingtypes.ModuleName) + paramsKeeper.Subspace(monitortypes.ModuleName) // IBC-related modules paramsKeeper.Subspace(ibchost.ModuleName) paramsKeeper.Subspace(ibctransfertypes.ModuleName) diff --git a/proto/babylon/monitor/genesis.proto b/proto/babylon/monitor/genesis.proto new file mode 100644 index 000000000..fb79098d0 --- /dev/null +++ b/proto/babylon/monitor/genesis.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package babylon.monitor.v1; + +import "gogoproto/gogo.proto"; +import "babylon/monitor/params.proto"; + +option go_package = "github.com/babylonchain/babylon/x/monitor/types"; + +// GenesisState defines the monitor module's genesis state. +message GenesisState { Params params = 1 [ (gogoproto.nullable) = false ]; } diff --git a/proto/babylon/monitor/params.proto b/proto/babylon/monitor/params.proto new file mode 100644 index 000000000..9ffe19c63 --- /dev/null +++ b/proto/babylon/monitor/params.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package babylon.monitor.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/babylonchain/babylon/x/monitor/types"; + +// Params defines the parameters for the module. +message Params { + option (gogoproto.equal) = true; +} diff --git a/proto/babylon/monitor/query.proto b/proto/babylon/monitor/query.proto new file mode 100644 index 000000000..43e322cec --- /dev/null +++ b/proto/babylon/monitor/query.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; +package babylon.monitor.v1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "babylon/monitor/params.proto"; + +option go_package = "github.com/babylonchain/babylon/x/monitor/types"; + +// Query defines the gRPC querier service. +service Query { + // Parameters queries the parameters of the module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/babylon/monitor/v1/params"; + } + + // FinishedEpochBtcHeight btc light client height at provided epoch finish + rpc FinishedEpochBtcHeight(QueryFinishedEpochBtcHeightRequest) returns (QueryFinishedEpochBtcHeightResponse) { + option (google.api.http).get = "/babylon/monitor/v1/{epoch_num}"; + } +} + +// QueryParamsRequest is request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is response type for the Query/Params RPC method. +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryFinishedEpochBtcHeightRequest { + uint64 epoch_num = 1; +} + +message QueryFinishedEpochBtcHeightResponse { + // height of btc ligh client when epoch ended + uint64 btc_light_client_height = 1; +} diff --git a/test/e2e/configurer/chain/queries.go b/test/e2e/configurer/chain/queries.go index a30be66ef..2b90047d2 100644 --- a/test/e2e/configurer/chain/queries.go +++ b/test/e2e/configurer/chain/queries.go @@ -18,6 +18,8 @@ import ( "github.com/babylonchain/babylon/test/e2e/util" blc "github.com/babylonchain/babylon/x/btclightclient/types" ct "github.com/babylonchain/babylon/x/checkpointing/types" + etypes "github.com/babylonchain/babylon/x/epoching/types" + mtypes "github.com/babylonchain/babylon/x/monitor/types" zctypes "github.com/babylonchain/babylon/x/zoneconcierge/types" ) @@ -217,3 +219,24 @@ func (n *NodeConfig) QueryCheckpointChainInfo(chainId string) (*zctypes.ChainInf } return infoResponse.ChainInfo, nil } + +func (n *NodeConfig) QueryCurrentEpoch() (uint64, error) { + bz, err := n.QueryGRPCGateway("/babylon/epoching/v1/current_epoch") + require.NoError(n.t, err) + var epochResponse etypes.QueryCurrentEpochResponse + if err := util.Cdc.UnmarshalJSON(bz, &epochResponse); err != nil { + return 0, err + } + return epochResponse.CurrentEpoch, nil +} + +func (n *NodeConfig) QueryLightClientHeighEpochEnd(epoch uint64) (uint64, error) { + monitorPath := fmt.Sprintf("/babylon/monitor/v1/%d", epoch) + bz, err := n.QueryGRPCGateway(monitorPath) + require.NoError(n.t, err) + var mResponse mtypes.QueryFinishedEpochBtcHeightResponse + if err := util.Cdc.UnmarshalJSON(bz, &mResponse); err != nil { + return 0, err + } + return mResponse.BtcLightClientHeight, nil +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 7f9adb385..3bb5720d5 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -4,6 +4,8 @@ package e2e import ( + "fmt" + "github.com/babylonchain/babylon/test/e2e/initialization" ct "github.com/babylonchain/babylon/x/checkpointing/types" ) @@ -44,6 +46,17 @@ func (s *IntegrationTestSuite) TestIbcCheckpointing() { s.Equal(fininfo.FinalizedChainInfo.ChainId, initialization.ChainBID) s.Equal(fininfo.EpochInfo.EpochNumber, uint64(2)) + currEpoch, err := nonValidatorNode.QueryCurrentEpoch() + s.NoError(err) + + heightAtFinishedEpoch, err := nonValidatorNode.QueryLightClientHeighEpochEnd(currEpoch - 1) + s.NoError(err) + + if heightAtFinishedEpoch == 0 { + // we can only assert, that btc lc height is larger than 0. + s.FailNow(fmt.Sprintf("Light client height should be > 0 on epoch %d", currEpoch-1)) + } + chainB := s.configurer.GetChainConfig(1) _, err = chainB.GetDefaultNode() s.NoError(err) diff --git a/x/btclightclient/keeper/keeper.go b/x/btclightclient/keeper/keeper.go index deba138d6..59792d21d 100644 --- a/x/btclightclient/keeper/keeper.go +++ b/x/btclightclient/keeper/keeper.go @@ -236,3 +236,7 @@ func (k Keeper) IsAncestor(ctx sdk.Context, parentHashBytes *bbn.BTCHeaderHashBy // Return whether the last element of the ancestry is equal to the parent return ancestry[len(ancestry)-1].Eq(parentHeader), nil } + +func (k Keeper) GetTipInfo(ctx sdk.Context) *types.BTCHeaderInfo { + return k.headersState(ctx).GetTip() +} diff --git a/x/monitor/client/cli/query.go b/x/monitor/client/cli/query.go new file mode 100644 index 000000000..50e9b2a2e --- /dev/null +++ b/x/monitor/client/cli/query.go @@ -0,0 +1,25 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + + "github.com/babylonchain/babylon/x/monitor/types" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd(queryRoute string) *cobra.Command { + // Group monitor queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + return cmd +} diff --git a/x/monitor/client/cli/query_params.go b/x/monitor/client/cli/query_params.go new file mode 100644 index 000000000..9578c4226 --- /dev/null +++ b/x/monitor/client/cli/query_params.go @@ -0,0 +1,34 @@ +package cli + +import ( + "context" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdQueryParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "shows the parameters of the module", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/monitor/client/cli/tx.go b/x/monitor/client/cli/tx.go new file mode 100644 index 000000000..c1836f6e7 --- /dev/null +++ b/x/monitor/client/cli/tx.go @@ -0,0 +1,24 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + + "github.com/babylonchain/babylon/x/monitor/types" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + return cmd +} diff --git a/x/monitor/genesis.go b/x/monitor/genesis.go new file mode 100644 index 000000000..9a2cea635 --- /dev/null +++ b/x/monitor/genesis.go @@ -0,0 +1,21 @@ +package monitor + +import ( + "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the capability module's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) +} + +// ExportGenesis returns the capability module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + genesis.Params = k.GetParams(ctx) + + return genesis +} diff --git a/x/monitor/genesis_test.go b/x/monitor/genesis_test.go new file mode 100644 index 000000000..a5d8dd915 --- /dev/null +++ b/x/monitor/genesis_test.go @@ -0,0 +1,33 @@ +package monitor_test + +import ( + "testing" + + "github.com/babylonchain/babylon/x/monitor" + "github.com/stretchr/testify/require" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + simapp "github.com/babylonchain/babylon/app" + "github.com/babylonchain/babylon/x/monitor/types" +) + +func TestExportGenesis(t *testing.T) { + app := simapp.Setup(t, false) + ctx := app.BaseApp.NewContext(false, tmproto.Header{}) + + app.MonitorKeeper.SetParams(ctx, types.DefaultParams()) + genesisState := monitor.ExportGenesis(ctx, app.MonitorKeeper) + require.Equal(t, genesisState.Params, types.DefaultParams()) +} + +func TestInitGenesis(t *testing.T) { + app := simapp.Setup(t, false) + ctx := app.BaseApp.NewContext(false, tmproto.Header{}) + + genesisState := types.GenesisState{ + Params: types.Params{}, + } + + monitor.InitGenesis(ctx, app.MonitorKeeper, genesisState) + require.Equal(t, app.MonitorKeeper.GetParams(ctx), genesisState.Params) +} diff --git a/x/monitor/handler.go b/x/monitor/handler.go new file mode 100644 index 000000000..f9d925572 --- /dev/null +++ b/x/monitor/handler.go @@ -0,0 +1,23 @@ +package monitor + +import ( + "fmt" + + "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// NewHandler ... +func NewHandler(k keeper.Keeper) sdk.Handler { + + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + + switch msg := msg.(type) { + default: + errMsg := fmt.Sprintf("unrecognized %s message type: %T", types.ModuleName, msg) + return nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, errMsg) + } + } +} diff --git a/x/monitor/keeper/grpc_query.go b/x/monitor/keeper/grpc_query.go new file mode 100644 index 000000000..5f564cfb1 --- /dev/null +++ b/x/monitor/keeper/grpc_query.go @@ -0,0 +1,28 @@ +package keeper + +import ( + "context" + + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ types.QueryServer = Keeper{} + +func (k Keeper) FinishedEpochBtcHeight(c context.Context, req *types.QueryFinishedEpochBtcHeightRequest) (*types.QueryFinishedEpochBtcHeightResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + btcHeight, err := k.LightclientHeightAtEpochEnd(ctx, req.EpochNum) + + if err != nil { + return nil, err + } + + return &types.QueryFinishedEpochBtcHeightResponse{BtcLightClientHeight: btcHeight}, nil +} diff --git a/x/monitor/keeper/grpc_query_params.go b/x/monitor/keeper/grpc_query_params.go new file mode 100644 index 000000000..4cf229c7a --- /dev/null +++ b/x/monitor/keeper/grpc_query_params.go @@ -0,0 +1,19 @@ +package keeper + +import ( + "context" + + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil +} diff --git a/x/monitor/keeper/hooks.go b/x/monitor/keeper/hooks.go new file mode 100644 index 000000000..c7f91bf7f --- /dev/null +++ b/x/monitor/keeper/hooks.go @@ -0,0 +1,27 @@ +package keeper + +import ( + etypes "github.com/babylonchain/babylon/x/epoching/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Helper interface to be sure Hooks implement both epoching and light client hooks +type HandledHooks interface { + etypes.EpochingHooks +} + +type Hooks struct { + k Keeper +} + +var _ HandledHooks = Hooks{} + +func (k Keeper) Hooks() Hooks { return Hooks{k} } + +func (h Hooks) AfterEpochBegins(ctx sdk.Context, epoch uint64) {} + +func (h Hooks) AfterEpochEnds(ctx sdk.Context, epoch uint64) { + h.k.updateBtcLightClientHeightForEpoch(ctx, epoch) +} + +func (h Hooks) BeforeSlashThreshold(ctx sdk.Context, valSet etypes.ValidatorSet) {} diff --git a/x/monitor/keeper/keeper.go b/x/monitor/keeper/keeper.go new file mode 100644 index 000000000..e711f7fb6 --- /dev/null +++ b/x/monitor/keeper/keeper.go @@ -0,0 +1,81 @@ +package keeper + +import ( + "fmt" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/tendermint/tendermint/libs/log" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + paramstore paramtypes.Subspace + btcLightClientKeeper types.BTCLightClientKeeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey, + memKey storetypes.StoreKey, + ps paramtypes.Subspace, + bk types.BTCLightClientKeeper, +) Keeper { + // set KeyTable if it has not already been set + if !ps.HasKeyTable() { + ps = ps.WithKeyTable(types.ParamKeyTable()) + } + + return Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + paramstore: ps, + btcLightClientKeeper: bk, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func bytesToUint64(bytes []byte) (uint64, error) { + if len(bytes) != 8 { + return 0, fmt.Errorf("epoch bytes must have exactly 8 bytes") + } + + return sdk.BigEndianToUint64(bytes), nil +} + +func (k Keeper) updateBtcLightClientHeightForEpoch(ctx sdk.Context, epoch uint64) { + store := ctx.KVStore(k.storeKey) + currentTipHeight := k.btcLightClientKeeper.GetTipInfo(ctx).Height + store.Set(types.GetEpochEndLightClientHeightKey(epoch), sdk.Uint64ToBigEndian(currentTipHeight)) +} + +func (k Keeper) LightclientHeightAtEpochEnd(ctx sdk.Context, epoch uint64) (uint64, error) { + store := ctx.KVStore(k.storeKey) + + btcHeightBytes := store.Get(types.GetEpochEndLightClientHeightKey(epoch)) + + if len(btcHeightBytes) == 0 { + // we do not have any key under given epoch, most probably epoch did not finish + // yet + return 0, types.ErrEpochNotFinishedYet + } + + btcHeight, err := bytesToUint64(btcHeightBytes) + + if err != nil { + panic("Invalid data in database") + } + + return btcHeight, nil +} diff --git a/x/monitor/keeper/params.go b/x/monitor/keeper/params.go new file mode 100644 index 000000000..c904062ad --- /dev/null +++ b/x/monitor/keeper/params.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetParams get all parameters as types.Params +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + k.paramstore.GetParamSet(ctx, ¶ms) + return params +} + +// SetParams set the params +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramstore.SetParamSet(ctx, ¶ms) +} diff --git a/x/monitor/module.go b/x/monitor/module.go new file mode 100644 index 000000000..337d27286 --- /dev/null +++ b/x/monitor/module.go @@ -0,0 +1,177 @@ +package monitor + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/gorilla/mux" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/babylonchain/babylon/x/monitor/client/cli" + "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface for the capability module. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the capability module's name. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +func (AppModuleBasic) RegisterCodec(cdc *codec.LegacyAmino) { + + // types.RegisterCodec(cdc) +} + +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + // types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers the module's interface types +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + // types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns the capability module's default genesis state. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis performs genesis state validation for the capability module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterRESTRoutes registers the capability module's REST service handlers. +func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) { +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) //nolint:errcheck // generally we don't handle errors in these registration functions +} + +// GetTxCmd returns the capability module's root tx command. +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the capability module's root query command. +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd(types.StoreKey) +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface for the capability module. +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper + // TODO: add dependencies to staking, slashing and evidence +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +// Name returns the capability module's name. +func (am AppModule) Name() string { + return am.AppModuleBasic.Name() +} + +// Route returns the capability module's message routing key. +func (am AppModule) Route() sdk.Route { + return sdk.NewRoute(types.RouterKey, NewHandler(am.keeper)) +} + +// QuerierRoute returns the capability module's query routing key. +func (AppModule) QuerierRoute() string { return types.QuerierRoute } + +// LegacyQuerierHandler returns the capability module's Querier. +func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a GRPC query service to respond to the +// module-specific GRPC queries. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the capability module's invariants. +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the capability module's genesis initialization It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the capability module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion implements ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 2 } + +// BeginBlock executes all ABCI BeginBlock logic respective to the capability module. +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock executes all ABCI EndBlock logic respective to the capability module. It +// returns no validator updates. +func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} diff --git a/x/monitor/module_simulation.go b/x/monitor/module_simulation.go new file mode 100644 index 000000000..bf642f766 --- /dev/null +++ b/x/monitor/module_simulation.go @@ -0,0 +1,55 @@ +package monitor + +import ( + "math/rand" + + monitorsimulation "github.com/babylonchain/babylon/x/monitor/simulation" + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/baseapp" + simappparams "github.com/cosmos/cosmos-sdk/simapp/params" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/simulation" +) + +// avoid unused import issue +var ( + _ = monitorsimulation.FindAccount + _ = simappparams.StakePerAccount + _ = simulation.MsgEntryKind + _ = baseapp.Paramspace +) + +// GenerateGenesisState creates a randomized GenState of the module +func (AppModule) GenerateGenesisState(simState *module.SimulationState) { + accs := make([]string, len(simState.Accounts)) + for i, acc := range simState.Accounts { + accs[i] = acc.Address.String() + } + monitorgenesis := types.GenesisState{ + Params: types.DefaultParams(), + } + simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&monitorgenesis) +} + +// ProposalContents doesn't return any content functions for governance proposals +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { + return nil +} + +// RandomizedParams creates randomized param changes for the simulator +func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { + + return []simtypes.ParamChange{} +} + +// RegisterStoreDecoder registers a decoder +func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} + +// WeightedOperations returns the all the gov module operations with their respective weights. +func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { + operations := make([]simtypes.WeightedOperation, 0) + + return operations +} diff --git a/x/monitor/simulation/simap.go b/x/monitor/simulation/simap.go new file mode 100644 index 000000000..92c437c0d --- /dev/null +++ b/x/monitor/simulation/simap.go @@ -0,0 +1,15 @@ +package simulation + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +// FindAccount find a specific address from an account list +func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { + creator, err := sdk.AccAddressFromBech32(address) + if err != nil { + panic(err) + } + return simtypes.FindAccount(accs, creator) +} diff --git a/x/monitor/types/errors.go b/x/monitor/types/errors.go new file mode 100644 index 000000000..56c20fa0b --- /dev/null +++ b/x/monitor/types/errors.go @@ -0,0 +1,12 @@ +package types + +// DONTCOVER + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// x/monitor module sentinel errors +var ( + ErrEpochNotFinishedYet = sdkerrors.Register(ModuleName, 1100, "Epoch not finished yet") +) diff --git a/x/monitor/types/expected_keepers.go b/x/monitor/types/expected_keepers.go new file mode 100644 index 000000000..6777fbff5 --- /dev/null +++ b/x/monitor/types/expected_keepers.go @@ -0,0 +1,23 @@ +package types + +import ( + lc "github.com/babylonchain/babylon/x/btclightclient/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/types" +) + +// AccountKeeper defines the expected account keeper used for simulations (noalias) +type AccountKeeper interface { + GetAccount(ctx sdk.Context, addr sdk.AccAddress) types.AccountI + // Methods imported from account should be defined here +} + +// BankKeeper defines the expected interface needed to retrieve account balances. +type BankKeeper interface { + SpendableCoins(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins + // Methods imported from bank should be defined here +} + +type BTCLightClientKeeper interface { + GetTipInfo(ctx sdk.Context) *lc.BTCHeaderInfo +} diff --git a/x/monitor/types/genesis.go b/x/monitor/types/genesis.go new file mode 100644 index 000000000..a6cdfe807 --- /dev/null +++ b/x/monitor/types/genesis.go @@ -0,0 +1,18 @@ +package types + +// DefaultIndex is the default capability global index +const DefaultIndex uint64 = 1 + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + + return gs.Params.Validate() +} diff --git a/x/monitor/types/genesis.pb.go b/x/monitor/types/genesis.pb.go new file mode 100644 index 000000000..2bfeb15e0 --- /dev/null +++ b/x/monitor/types/genesis.pb.go @@ -0,0 +1,321 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: babylon/monitor/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the monitor module's genesis state. +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_98b2aa1b23cbbe77, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "babylon.monitor.v1.GenesisState") +} + +func init() { proto.RegisterFile("babylon/monitor/genesis.proto", fileDescriptor_98b2aa1b23cbbe77) } + +var fileDescriptor_98b2aa1b23cbbe77 = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0x4a, 0x4c, 0xaa, + 0xcc, 0xc9, 0xcf, 0xd3, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, + 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0x4a, 0xeb, 0x41, 0xa5, + 0xf5, 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, + 0x94, 0x0c, 0xba, 0x41, 0x05, 0x89, 0x45, 0x89, 0xb9, 0x50, 0x73, 0x94, 0x3c, 0xb8, 0x78, 0xdc, + 0x21, 0x06, 0x07, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x59, 0x70, 0xb1, 0x41, 0xe4, 0x25, 0x18, 0x15, + 0x18, 0x35, 0xb8, 0x8d, 0xa4, 0xf4, 0x30, 0x2d, 0xd2, 0x0b, 0x00, 0xab, 0x70, 0x62, 0x39, 0x71, + 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xde, 0xc9, 0xf3, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, + 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, + 0x18, 0xa2, 0xf4, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xa1, 0xa6, + 0x25, 0x67, 0x24, 0x66, 0xe6, 0xc1, 0x38, 0xfa, 0x15, 0x70, 0xb7, 0x95, 0x54, 0x16, 0xa4, 0x16, + 0x27, 0xb1, 0x81, 0xdd, 0x66, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x87, 0x54, 0x4d, 0xde, 0x04, + 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/monitor/types/genesis_test.go b/x/monitor/types/genesis_test.go new file mode 100644 index 000000000..5c5092312 --- /dev/null +++ b/x/monitor/types/genesis_test.go @@ -0,0 +1,31 @@ +package types_test + +import ( + "testing" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/stretchr/testify/require" +) + +func TestGenesisState_Validate(t *testing.T) { + for _, tc := range []struct { + desc string + genState *types.GenesisState + valid bool + }{ + { + desc: "default is valid", + genState: types.DefaultGenesis(), + valid: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + err := tc.genState.Validate() + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/x/monitor/types/keys.go b/x/monitor/types/keys.go new file mode 100644 index 000000000..6e8f79c6a --- /dev/null +++ b/x/monitor/types/keys.go @@ -0,0 +1,34 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + // ModuleName defines the module name + ModuleName = "monitor" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey is the message route for slashing + RouterKey = ModuleName + + // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_monitor" +) + +var ( + EpochEndLightClientHeightPrefix = []byte{1} +) + +func KeyPrefix(p string) []byte { + return []byte(p) +} + +func GetEpochEndLightClientHeightKey(e uint64) []byte { + return append(EpochEndLightClientHeightPrefix, sdk.Uint64ToBigEndian(e)...) +} diff --git a/x/monitor/types/params.go b/x/monitor/types/params.go new file mode 100644 index 000000000..4f3215e35 --- /dev/null +++ b/x/monitor/types/params.go @@ -0,0 +1,32 @@ +package types + +import ( + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +// ParamKeyTable the param key table for launch module +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new Params instance +func NewParams() Params { + return Params{} +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams() +} + +// ParamSetPairs get the params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{} +} + +// Validate validates the set of params +func (p Params) Validate() error { + return nil +} diff --git a/x/monitor/types/params.pb.go b/x/monitor/types/params.pb.go new file mode 100644 index 000000000..70566842b --- /dev/null +++ b/x/monitor/types/params.pb.go @@ -0,0 +1,286 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: babylon/monitor/params.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the parameters for the module. +type Params struct { +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_122b02f3f9b23cbe, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Params)(nil), "babylon.monitor.v1.Params") +} + +func init() { proto.RegisterFile("babylon/monitor/params.proto", fileDescriptor_122b02f3f9b23cbe) } + +var fileDescriptor_122b02f3f9b23cbe = []byte{ + // 153 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x4a, 0x4c, 0xaa, + 0xcc, 0xc9, 0xcf, 0xd3, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xd2, 0x2f, 0x48, 0x2c, 0x4a, + 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0xca, 0xea, 0x41, 0x65, 0xf5, + 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, 0x12, + 0x1f, 0x17, 0x5b, 0x00, 0x58, 0xa7, 0x15, 0xcb, 0x8b, 0x05, 0xf2, 0x8c, 0x4e, 0x9e, 0x27, 0x1e, + 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, + 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x9f, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, + 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x35, 0x3e, 0x39, 0x23, 0x31, 0x33, 0x0f, 0xc6, 0xd1, 0xaf, 0x80, + 0xbb, 0xa5, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x6c, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x3f, 0x70, 0x61, 0xbe, 0xab, 0x00, 0x00, 0x00, +} + +func (this *Params) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Params) + if !ok { + that2, ok := that.(Params) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/monitor/types/params_test.go b/x/monitor/types/params_test.go new file mode 100644 index 000000000..53e3c1f6a --- /dev/null +++ b/x/monitor/types/params_test.go @@ -0,0 +1,16 @@ +package types_test + +import ( + "testing" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/stretchr/testify/require" +) + +func TestParamsEqual(t *testing.T) { + p1 := types.DefaultParams() + p2 := types.DefaultParams() + + ok := p1.Equal(p2) + require.True(t, ok) +} diff --git a/x/monitor/types/query.pb.go b/x/monitor/types/query.pb.go new file mode 100644 index 000000000..2fd362550 --- /dev/null +++ b/x/monitor/types/query.pb.go @@ -0,0 +1,892 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: babylon/monitor/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +type QueryFinishedEpochBtcHeightRequest struct { + EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` +} + +func (m *QueryFinishedEpochBtcHeightRequest) Reset() { *m = QueryFinishedEpochBtcHeightRequest{} } +func (m *QueryFinishedEpochBtcHeightRequest) String() string { return proto.CompactTextString(m) } +func (*QueryFinishedEpochBtcHeightRequest) ProtoMessage() {} +func (*QueryFinishedEpochBtcHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{2} +} +func (m *QueryFinishedEpochBtcHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinishedEpochBtcHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinishedEpochBtcHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinishedEpochBtcHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinishedEpochBtcHeightRequest.Merge(m, src) +} +func (m *QueryFinishedEpochBtcHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryFinishedEpochBtcHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinishedEpochBtcHeightRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinishedEpochBtcHeightRequest proto.InternalMessageInfo + +func (m *QueryFinishedEpochBtcHeightRequest) GetEpochNum() uint64 { + if m != nil { + return m.EpochNum + } + return 0 +} + +type QueryFinishedEpochBtcHeightResponse struct { + // height of btc ligh client when epoch ended + BtcLightClientHeight uint64 `protobuf:"varint,1,opt,name=btc_light_client_height,json=btcLightClientHeight,proto3" json:"btc_light_client_height,omitempty"` +} + +func (m *QueryFinishedEpochBtcHeightResponse) Reset() { *m = QueryFinishedEpochBtcHeightResponse{} } +func (m *QueryFinishedEpochBtcHeightResponse) String() string { return proto.CompactTextString(m) } +func (*QueryFinishedEpochBtcHeightResponse) ProtoMessage() {} +func (*QueryFinishedEpochBtcHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{3} +} +func (m *QueryFinishedEpochBtcHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinishedEpochBtcHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinishedEpochBtcHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinishedEpochBtcHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinishedEpochBtcHeightResponse.Merge(m, src) +} +func (m *QueryFinishedEpochBtcHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryFinishedEpochBtcHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinishedEpochBtcHeightResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinishedEpochBtcHeightResponse proto.InternalMessageInfo + +func (m *QueryFinishedEpochBtcHeightResponse) GetBtcLightClientHeight() uint64 { + if m != nil { + return m.BtcLightClientHeight + } + return 0 +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "babylon.monitor.v1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "babylon.monitor.v1.QueryParamsResponse") + proto.RegisterType((*QueryFinishedEpochBtcHeightRequest)(nil), "babylon.monitor.v1.QueryFinishedEpochBtcHeightRequest") + proto.RegisterType((*QueryFinishedEpochBtcHeightResponse)(nil), "babylon.monitor.v1.QueryFinishedEpochBtcHeightResponse") +} + +func init() { proto.RegisterFile("babylon/monitor/query.proto", fileDescriptor_3b70877a7534d1c4) } + +var fileDescriptor_3b70877a7534d1c4 = []byte{ + // 426 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xcd, 0xaa, 0xd3, 0x40, + 0x14, 0x4e, 0x2e, 0xd7, 0xa2, 0xe3, 0x6e, 0x2c, 0x2a, 0xb9, 0x97, 0x5c, 0x8d, 0xe0, 0x15, 0x17, + 0x19, 0x72, 0xc5, 0x9f, 0xad, 0x15, 0x45, 0x41, 0xfc, 0xe9, 0x52, 0x84, 0x30, 0x33, 0x0e, 0xc9, + 0x40, 0x32, 0x27, 0xcd, 0x4c, 0x8a, 0x45, 0xba, 0xf1, 0x09, 0x04, 0xdf, 0xc4, 0xad, 0x2f, 0xd0, + 0x65, 0xc1, 0x8d, 0x2b, 0x91, 0xd6, 0x07, 0x91, 0x4c, 0xa6, 0x05, 0xdb, 0x5a, 0x71, 0x97, 0x9c, + 0xef, 0xe7, 0x7c, 0xe7, 0x9c, 0x41, 0x47, 0x8c, 0xb2, 0x49, 0x01, 0x8a, 0x94, 0xa0, 0xa4, 0x81, + 0x9a, 0x8c, 0x1a, 0x51, 0x4f, 0xe2, 0xaa, 0x06, 0x03, 0x18, 0x3b, 0x30, 0x76, 0x60, 0x3c, 0x4e, + 0x82, 0x7e, 0x06, 0x19, 0x58, 0x98, 0xb4, 0x5f, 0x1d, 0x33, 0x38, 0xce, 0x00, 0xb2, 0x42, 0x10, + 0x5a, 0x49, 0x42, 0x95, 0x02, 0x43, 0x8d, 0x04, 0xa5, 0x1d, 0x7a, 0x9b, 0x83, 0x2e, 0x41, 0x13, + 0x46, 0xb5, 0xe8, 0x1a, 0x90, 0x71, 0xc2, 0x84, 0xa1, 0x09, 0xa9, 0x68, 0x26, 0x95, 0x25, 0xaf, + 0x9c, 0x36, 0x03, 0x55, 0xb4, 0xa6, 0xa5, 0x73, 0x8a, 0xfa, 0x08, 0xbf, 0x6e, 0xf5, 0xaf, 0x6c, + 0x71, 0x28, 0x46, 0x8d, 0xd0, 0x26, 0x7a, 0x89, 0x2e, 0xfd, 0x51, 0xd5, 0x15, 0x28, 0x2d, 0xf0, + 0x03, 0xd4, 0xeb, 0xc4, 0x57, 0xfd, 0x6b, 0xfe, 0xad, 0x8b, 0x67, 0x41, 0xbc, 0x3d, 0x4f, 0xdc, + 0x69, 0x06, 0x87, 0xb3, 0x1f, 0x27, 0xde, 0xd0, 0xf1, 0xa3, 0x87, 0x28, 0xb2, 0x86, 0x4f, 0xa4, + 0x92, 0x3a, 0x17, 0xef, 0x1e, 0x57, 0xc0, 0xf3, 0x81, 0xe1, 0x4f, 0x85, 0xcc, 0x72, 0xe3, 0xda, + 0xe2, 0x23, 0x74, 0x41, 0xb4, 0x40, 0xaa, 0x9a, 0xd2, 0xb6, 0x38, 0x1c, 0x9e, 0xb7, 0x85, 0x17, + 0x4d, 0x19, 0xbd, 0x45, 0x37, 0xf6, 0x5a, 0xb8, 0x8c, 0x77, 0xd1, 0x15, 0x66, 0x78, 0x5a, 0xb4, + 0xc5, 0x94, 0x17, 0x52, 0x28, 0x93, 0xe6, 0x96, 0xe2, 0x1c, 0xfb, 0xcc, 0xf0, 0xe7, 0xed, 0xff, + 0x23, 0x0b, 0x76, 0xf2, 0xb3, 0xaf, 0x07, 0xe8, 0x9c, 0xb5, 0xc7, 0x53, 0xd4, 0xeb, 0x46, 0xc0, + 0x37, 0x77, 0x8d, 0xb7, 0xbd, 0xad, 0xe0, 0xf4, 0x9f, 0xbc, 0x2e, 0x5b, 0x14, 0x7d, 0xfc, 0xf6, + 0xeb, 0xf3, 0xc1, 0x31, 0x0e, 0xc8, 0xe6, 0x4d, 0xc6, 0x89, 0x3b, 0x0b, 0xfe, 0xe2, 0xa3, 0xcb, + 0xbb, 0x47, 0xc4, 0xf7, 0xfe, 0xda, 0x67, 0xef, 0x5a, 0x83, 0xfb, 0xff, 0xad, 0x73, 0x79, 0x4f, + 0x6d, 0xde, 0xeb, 0xf8, 0x64, 0x57, 0xde, 0x0f, 0xeb, 0x53, 0x4d, 0x07, 0xcf, 0x66, 0x8b, 0xd0, + 0x9f, 0x2f, 0x42, 0xff, 0xe7, 0x22, 0xf4, 0x3f, 0x2d, 0x43, 0x6f, 0xbe, 0x0c, 0xbd, 0xef, 0xcb, + 0xd0, 0x7b, 0x43, 0x32, 0x69, 0xf2, 0x86, 0xc5, 0x1c, 0xca, 0x95, 0x09, 0xcf, 0xa9, 0x54, 0x6b, + 0xc7, 0xf7, 0x6b, 0x4f, 0x33, 0xa9, 0x84, 0x66, 0x3d, 0xfb, 0x2e, 0xef, 0xfc, 0x0e, 0x00, 0x00, + 0xff, 0xff, 0x3f, 0x2c, 0xa9, 0x89, 0x48, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // FinishedEpochBtcHeight btc light client height at provided epoch finish + FinishedEpochBtcHeight(ctx context.Context, in *QueryFinishedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryFinishedEpochBtcHeightResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) FinishedEpochBtcHeight(ctx context.Context, in *QueryFinishedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryFinishedEpochBtcHeightResponse, error) { + out := new(QueryFinishedEpochBtcHeightResponse) + err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/FinishedEpochBtcHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // FinishedEpochBtcHeight btc light client height at provided epoch finish + FinishedEpochBtcHeight(context.Context, *QueryFinishedEpochBtcHeightRequest) (*QueryFinishedEpochBtcHeightResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (*UnimplementedQueryServer) FinishedEpochBtcHeight(ctx context.Context, req *QueryFinishedEpochBtcHeightRequest) (*QueryFinishedEpochBtcHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinishedEpochBtcHeight not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.monitor.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_FinishedEpochBtcHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryFinishedEpochBtcHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).FinishedEpochBtcHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.monitor.v1.Query/FinishedEpochBtcHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).FinishedEpochBtcHeight(ctx, req.(*QueryFinishedEpochBtcHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "babylon.monitor.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "FinishedEpochBtcHeight", + Handler: _Query_FinishedEpochBtcHeight_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "babylon/monitor/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryFinishedEpochBtcHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinishedEpochBtcHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinishedEpochBtcHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EpochNum != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryFinishedEpochBtcHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinishedEpochBtcHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinishedEpochBtcHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BtcLightClientHeight != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.BtcLightClientHeight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryFinishedEpochBtcHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } + return n +} + +func (m *QueryFinishedEpochBtcHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BtcLightClientHeight != 0 { + n += 1 + sovQuery(uint64(m.BtcLightClientHeight)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinishedEpochBtcHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinishedEpochBtcHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinishedEpochBtcHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinishedEpochBtcHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinishedEpochBtcHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinishedEpochBtcHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BtcLightClientHeight", wireType) + } + m.BtcLightClientHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BtcLightClientHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/monitor/types/query.pb.gw.go b/x/monitor/types/query.pb.gw.go new file mode 100644 index 000000000..de833cc5c --- /dev/null +++ b/x/monitor/types/query.pb.gw.go @@ -0,0 +1,254 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: babylon/monitor/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_FinishedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinishedEpochBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := client.FinishedEpochBtcHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_FinishedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinishedEpochBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := server.FinishedEpochBtcHeight(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_FinishedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_FinishedEpochBtcHeight_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_FinishedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_FinishedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_FinishedEpochBtcHeight_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_FinishedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "monitor", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_FinishedEpochBtcHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "monitor", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage + + forward_Query_FinishedEpochBtcHeight_0 = runtime.ForwardResponseMessage +) From df7d98d85bfe2aaf4d80b35a0e50131918361b77 Mon Sep 17 00:00:00 2001 From: Cirrus Gai Date: Tue, 17 Jan 2023 23:16:27 +0800 Subject: [PATCH 22/37] fix: API/Fix checkpoint list total error (#283) --- x/checkpointing/keeper/grpc_query_checkpoint.go | 17 ++++++++--------- .../keeper/grpc_query_checkpoint_test.go | 10 +++++++--- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/x/checkpointing/keeper/grpc_query_checkpoint.go b/x/checkpointing/keeper/grpc_query_checkpoint.go index c2f6d44df..34350ce83 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint.go @@ -24,18 +24,17 @@ func (k Keeper) RawCheckpointList(ctx context.Context, req *types.QueryRawCheckp store := k.CheckpointsState(sdkCtx).checkpoints pageRes, err := query.FilteredPaginate(store, req.Pagination, func(_ []byte, value []byte, accumulate bool) (bool, error) { - if accumulate { - ckptWithMeta, err := types.BytesToCkptWithMeta(k.cdc, value) - if err != nil { - return false, err - } - if ckptWithMeta.Status == req.Status { + ckptWithMeta, err := types.BytesToCkptWithMeta(k.cdc, value) + if err != nil { + return false, err + } + if ckptWithMeta.Status == req.Status { + if accumulate { checkpointList = append(checkpointList, ckptWithMeta) - return true, nil } - return false, nil + return true, nil } - return true, nil + return false, nil }) if err != nil { diff --git a/x/checkpointing/keeper/grpc_query_checkpoint_test.go b/x/checkpointing/keeper/grpc_query_checkpoint_test.go index 2549144f7..6f6f08e54 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint_test.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint_test.go @@ -129,12 +129,13 @@ func FuzzQueryLastCheckpointWithStatus(f *testing.F) { }) } +//func TestQueryRawCheckpointList(t *testing.T) { func FuzzQueryRawCheckpointList(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - tipEpoch := datagen.RandomInt(100) + 10 + tipEpoch := datagen.RandomInt(10) + 10 ctrl := gomock.NewController(t) defer ctrl.Finish() ek := mocks.NewMockEpochingKeeper(ctrl) @@ -154,7 +155,7 @@ func FuzzQueryRawCheckpointList(f *testing.F) { require.NoError(t, err) } - finalizedCheckpoints := checkpoints[:finalizedEpoch] + finalizedCheckpoints := checkpoints[:finalizedEpoch+1] testRawCheckpointListWithType(t, ckptKeeper, ctx, finalizedCheckpoints, 0, types.Finalized) sealedCheckpoints := checkpoints[finalizedEpoch+1:] testRawCheckpointListWithType(t, ckptKeeper, ctx, sealedCheckpoints, finalizedEpoch+1, types.Sealed) @@ -170,9 +171,12 @@ func testRawCheckpointListWithType( status types.CheckpointStatus, ) { limit := datagen.RandomInt(len(checkpointList)+1) + 1 - pagination := &query.PageRequest{Limit: limit} + pagination := &query.PageRequest{Limit: limit, CountTotal: true} req := types.NewQueryRawCheckpointListRequest(pagination, status) + resp, err := ckptKeeper.RawCheckpointList(ctx, req) + require.NoError(t, err) + require.Equal(t, uint64(len(checkpointList)), resp.Pagination.Total) for ckptsRetrieved := uint64(0); ckptsRetrieved < uint64(len(checkpointList)); ckptsRetrieved += limit { resp, err := ckptKeeper.RawCheckpointList(ctx, req) require.NoError(t, err) From f1a60fe22bbe04acc6342c7b285c820373e816e3 Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Wed, 18 Jan 2023 09:18:19 +0100 Subject: [PATCH 23/37] Improve btc checkpoint data model (#284) * Improve btc checkpoint data model --- .../babylon/btccheckpoint/btccheckpoint.proto | 22 +- testutil/datagen/btc_transaction.go | 6 +- x/btccheckpoint/keeper/keeper.go | 31 +-- x/btccheckpoint/keeper/msg_server.go | 10 +- x/btccheckpoint/keeper/msg_server_test.go | 67 +++-- x/btccheckpoint/types/btccheckpoint.pb.go | 228 +++++++++--------- x/btccheckpoint/types/expected_keepers.go | 7 +- x/btccheckpoint/types/mock_keepers.go | 15 +- x/btccheckpoint/types/msgs.go | 8 +- x/btccheckpoint/types/types.go | 28 +-- x/checkpointing/keeper/keeper.go | 17 +- x/checkpointing/keeper/keeper_test.go | 21 +- x/zoneconcierge/keeper/grpc_query.go | 35 ++- x/zoneconcierge/keeper/grpc_query_test.go | 8 +- .../keeper/proof_finalized_chain_info.go | 2 - x/zoneconcierge/types/expected_keepers.go | 3 +- x/zoneconcierge/types/mocked_keepers.go | 34 ++- 17 files changed, 281 insertions(+), 261 deletions(-) diff --git a/proto/babylon/btccheckpoint/btccheckpoint.proto b/proto/babylon/btccheckpoint/btccheckpoint.proto index bdb44326f..27b60a588 100644 --- a/proto/babylon/btccheckpoint/btccheckpoint.proto +++ b/proto/babylon/btccheckpoint/btccheckpoint.proto @@ -81,14 +81,14 @@ message TransactionInfo { // key is the position (txIdx, blockHash) of this tx on BTC blockchain // Although it is already a part of SubmissionKey, we store it here again // to make TransactionInfo self-contained. - // For example, storing the key allows TransactionInfo to not relay on - // the fact that TransactionInfo will be ordered in the same order as + // For example, storing the key allows TransactionInfo to not relay on + // the fact that TransactionInfo will be ordered in the same order as // TransactionKeys in SubmissionKey. TransactionKey key = 1; // transaction is the full transaction in bytes bytes transaction = 2; // proof is the Merkle proof that this tx is included in the position in `key` - // TODO: maybe it could use here better format as we already processed and + // TODO: maybe it could use here better format as we already processed and // valideated the proof? bytes proof = 3; } @@ -99,15 +99,18 @@ message TransactionInfo { // and blockshash in enough to retrieve is from lightclient message SubmissionData { // TODO: this could probably be better typed - // Address of submitter of given checkpoint. Required to payup the reward to - // submitter of given checkpoint - bytes submitter = 1; + // Address of the vigiliatne which submitted the submissions, calculated from + // submission message itself + bytes vigilante_address = 1; + + // Address of the checkpoint submitter, extracted from the checkpoint itself. + bytes submitter_address = 2; // txs_info is the two `TransactionInfo`s corresponding to the submission // It is used for // - recovering address of sender of btc transction to payup the reward. // - allowing the ZoneConcierge module to prove the checkpoint is submitted to BTC - repeated TransactionInfo txs_info = 2; - uint64 epoch = 3; + repeated TransactionInfo txs_info = 3; + uint64 epoch = 4; } // Data stored in db and indexed by epoch number @@ -119,8 +122,5 @@ message EpochData { // Current btc status of the epoch BtcStatus status = 2; - - // Required to comunicate with checkpoint module about checkpoint status - bytes raw_checkpoint = 3; } diff --git a/testutil/datagen/btc_transaction.go b/testutil/datagen/btc_transaction.go index b3ea03b1f..19496ba06 100644 --- a/testutil/datagen/btc_transaction.go +++ b/testutil/datagen/btc_transaction.go @@ -501,7 +501,7 @@ func getExpectedOpReturn(tag txformat.BabylonTag, f []byte, s []byte) []byte { return connected } -func RandomRawCheckpointDataForEpoch(e uint64) *TestRawCheckpointData { +func RandomRawCheckpointDataForEpoch(e uint64) (*TestRawCheckpointData, *txformat.RawBtcCheckpoint) { checkpointData := getRandomCheckpointDataForEpoch(e) rawBTCCkpt := &txformat.RawBtcCheckpoint{ Epoch: checkpointData.epoch, @@ -510,7 +510,7 @@ func RandomRawCheckpointDataForEpoch(e uint64) *TestRawCheckpointData { SubmitterAddress: checkpointData.submitterAddress, BlsSig: checkpointData.blsSig, } - return EncodeRawCkptToTestData(rawBTCCkpt) + return EncodeRawCkptToTestData(rawBTCCkpt), rawBTCCkpt } func EncodeRawCkptToTestData(rawBTCCkpt *txformat.RawBtcCheckpoint) *TestRawCheckpointData { @@ -545,7 +545,7 @@ func GenerateMessageWithRandomSubmitterForEpoch(epoch uint64) *btcctypes.MsgInse tx2 := numInRange(1, 99) // in those tests epoch is not important - raw := RandomRawCheckpointDataForEpoch(epoch) + raw, _ := RandomRawCheckpointDataForEpoch(epoch) blck1 := CreateBlock(0, uint32(numTransactions), uint32(tx1), raw.FirstPart) diff --git a/x/btccheckpoint/keeper/keeper.go b/x/btccheckpoint/keeper/keeper.go index b0bdd6e0f..d49b87b1c 100644 --- a/x/btccheckpoint/keeper/keeper.go +++ b/x/btccheckpoint/keeper/keeper.go @@ -8,7 +8,6 @@ import ( txformat "github.com/babylonchain/babylon/btctxformatter" bbn "github.com/babylonchain/babylon/types" "github.com/babylonchain/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/prefix" storetypes "github.com/cosmos/cosmos-sdk/store/types" @@ -166,10 +165,6 @@ func (k Keeper) GetSubmissionBtcInfo(ctx sdk.Context, sk types.SubmissionKey) (* }, nil } -func (k Keeper) GetCheckpointEpoch(ctx sdk.Context, c []byte) (uint64, error) { - return k.checkpointingKeeper.CheckpointEpoch(ctx, c) -} - func (k Keeper) SubmissionExists(ctx sdk.Context, sk types.SubmissionKey) bool { return k.GetSubmissionData(ctx, sk) != nil } @@ -179,7 +174,9 @@ func (k Keeper) GetEpochData(ctx sdk.Context, e uint64) *types.EpochData { store := ctx.KVStore(k.storeKey) bytes := store.Get(types.GetEpochIndexKey(e)) - if len(bytes) == 0 { + // note: Cannot check len(bytes) == 0, as empty bytes encoding of types.EpochData + // is epoch data with Status == Submitted and no valid submissions + if bytes == nil { return nil } @@ -188,28 +185,22 @@ func (k Keeper) GetEpochData(ctx sdk.Context, e uint64) *types.EpochData { return ed } -// GetFinalizedEpochDataWithBestSubmission gets the status, raw checkpoint bytes, -// and the best submission of a given epoch -func (k Keeper) GetFinalizedEpochDataWithBestSubmission(ctx sdk.Context, epochNumber uint64) (types.BtcStatus, *checkpointingtypes.RawCheckpoint, *types.SubmissionKey, error) { +// GetBestSubmission gets the status and the best submission of a given finalized epoch +func (k Keeper) GetBestSubmission(ctx sdk.Context, epochNumber uint64) (types.BtcStatus, *types.SubmissionKey, error) { // find the btc checkpoint tx index of this epoch ed := k.GetEpochData(ctx, epochNumber) if ed == nil { - return 0, nil, nil, types.ErrNoCheckpointsForPreviousEpoch + return 0, nil, types.ErrNoCheckpointsForPreviousEpoch } if ed.Status != types.Finalized { - return 0, nil, nil, fmt.Errorf("epoch %d has not been finalized yet", epochNumber) + return 0, nil, fmt.Errorf("epoch %d has not been finalized yet", epochNumber) } if len(ed.Key) == 0 { - return 0, nil, nil, types.ErrNoCheckpointsForPreviousEpoch + return 0, nil, types.ErrNoCheckpointsForPreviousEpoch } bestSubmissionKey := ed.Key[0] // index of checkpoint tx on BTC - // get raw checkpoint of this epoch - rawCheckpoint, err := checkpointingtypes.FromBTCCkptBytesToRawCkpt(ed.RawCheckpoint) - if err != nil { - return 0, nil, nil, err - } - return ed.Status, rawCheckpoint, bestSubmissionKey, nil + return ed.Status, bestSubmissionKey, nil } // checkAncestors checks if there is at least one ancestor in previous epoch submissions @@ -287,7 +278,6 @@ func (k Keeper) addEpochSubmission( epochNum uint64, sk types.SubmissionKey, sd types.SubmissionData, - epochRawCheckpoint []byte, ) error { ed := k.GetEpochData(ctx, epochNum) @@ -300,7 +290,7 @@ func (k Keeper) addEpochSubmission( // if ed is nil, it means it is our first submission for this epoch if ed == nil { // we do not have any data saved yet - newEd := types.NewEmptyEpochData(epochRawCheckpoint) + newEd := types.NewEmptyEpochData() ed = &newEd } @@ -436,7 +426,6 @@ func (k Keeper) clearEpochData( epoch []byte, epochDataStore prefix.Store, currentEpoch *types.EpochData) { - for _, sk := range currentEpoch.Key { k.deleteSubmission(ctx, *sk) } diff --git a/x/btccheckpoint/keeper/msg_server.go b/x/btccheckpoint/keeper/msg_server.go index 79c37bbe6..69ae6e0d3 100644 --- a/x/btccheckpoint/keeper/msg_server.go +++ b/x/btccheckpoint/keeper/msg_server.go @@ -44,19 +44,22 @@ func (m msgServer) InsertBTCSpvProof(ctx context.Context, req *types.MsgInsertBT return nil, types.ErrInvalidHeader.Wrap(err.Error()) } - rawCheckpointBytes := rawSubmission.GetRawCheckPointBytes() // At this point: // - every proof of inclusion is valid i.e every transaction is proved to be // part of provided block and contains some OP_RETURN data // - header is proved to be part of the chain we know about through BTCLightClient // - this is new checkpoint submission - // Get info about this checkpoints epoch - epochNum, err := m.k.GetCheckpointEpoch(sdkCtx, rawCheckpointBytes) + // Verify if this is expected checkpoint + err = m.k.checkpointingKeeper.VerifyCheckpoint(sdkCtx, rawSubmission.CheckpointData) if err != nil { return nil, err } + // At this point we know this is a valid checkpoint for this epoch as this was validated + // by checkpointing module + epochNum := rawSubmission.CheckpointData.Epoch + err = m.k.checkAncestors(sdkCtx, epochNum, newSubmissionOldestHeaderDepth) if err != nil { @@ -80,7 +83,6 @@ func (m msgServer) InsertBTCSpvProof(ctx context.Context, req *types.MsgInsertBT epochNum, submissionKey, submissionData, - rawCheckpointBytes, ) if err != nil { diff --git a/x/btccheckpoint/keeper/msg_server_test.go b/x/btccheckpoint/keeper/msg_server_test.go index 7de9ffe84..e6983452e 100644 --- a/x/btccheckpoint/keeper/msg_server_test.go +++ b/x/btccheckpoint/keeper/msg_server_test.go @@ -45,11 +45,10 @@ func b2TxIdx(m *btcctypes.MsgInsertBTCSpvProof) uint32 { func InitTestKeepers( t *testing.T, - epoch uint64, ) *TestKeepers { lc := btcctypes.NewMockBTCLightClientKeeper() - cc := btcctypes.NewMockCheckpointingKeeper(epoch) + cc := btcctypes.NewMockCheckpointingKeeper() k, ctx := keepertest.NewBTCCheckpointKeeper(t, lc, cc, chaincfg.SimNetParams.PowLimit) @@ -69,10 +68,6 @@ func (k *TestKeepers) insertProofMsg(msg *btcctypes.MsgInsertBTCSpvProof) (*btcc return k.MsgSrv.InsertBTCSpvProof(k.Ctx, msg) } -func (k *TestKeepers) setEpoch(epoch uint64) { - k.Checkpointing.SetEpoch(epoch) -} - func (k *TestKeepers) GetEpochData(e uint64) *btcctypes.EpochData { return k.BTCCheckpoint.GetEpochData(k.SdkCtx, e) } @@ -88,13 +83,13 @@ func (k *TestKeepers) onTipChange() { func TestRejectDuplicatedSubmission(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -123,12 +118,12 @@ func TestRejectDuplicatedSubmission(t *testing.T) { func TestRejectUnknownToBtcLightClient(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -147,12 +142,12 @@ func TestRejectUnknownToBtcLightClient(t *testing.T) { func TestRejectSubmissionsNotOnMainchain(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -184,12 +179,12 @@ func TestRejectSubmissionsNotOnMainchain(t *testing.T) { func TestSubmitValidNewCheckpoint(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, rawBtcCheckpoint := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) // here we will only have valid unconfirmed submissions - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -211,10 +206,6 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { t.Errorf("Epoch should be in submitted state after processing message") } - if !bytes.Equal(raw.ExpectedOpReturn, ed.RawCheckpoint) { - t.Errorf("Epoch does not contain expected op return data") - } - submissionKey := ed.Key[0] submissionData := tk.getSubmissionData(*submissionKey) @@ -231,6 +222,10 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { t.Errorf("Submission data with invalid TransactionInfo") } + if !bytes.Equal(rawBtcCheckpoint.SubmitterAddress, submissionData.SubmitterAddress) { + t.Errorf("Submission data does not contain expected submitter address") + } + for i, txInfo := range submissionData.TxsInfo { require.Equal(t, submissionKey.Key[i].Index, txInfo.Key.Index) require.True(t, submissionKey.Key[i].Hash.Eq(txInfo.Key.Hash)) @@ -249,20 +244,19 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { func TestRejectSubmissionWithoutSubmissionsForPreviousEpoch(t *testing.T) { rand.Seed(time.Now().Unix()) - epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + epoch := uint64(2) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) // here we will only have valid unconfirmed submissions - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) // Now we will return depth enough for moving submission to be submitted tk.BTCLightClient.SetDepth(blck1.HeaderBytes.Hash(), int64(0)) tk.BTCLightClient.SetDepth(blck2.HeaderBytes.Hash(), int64(1)) - tk.Checkpointing.SetEpoch(2) _, err := tk.insertProofMsg(msg) @@ -277,12 +271,12 @@ func TestRejectSubmissionWithoutSubmissionsForPreviousEpoch(t *testing.T) { func TestRejectSubmissionWithoutAncestorsOnMainchainInPreviousEpoch(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) epoch1Block1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) epoch1Block2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) // here we will only have valid unconfirmed submissions - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{epoch1Block1, epoch1Block2}) // Now we will return depth enough for moving submission to be submitted @@ -294,12 +288,9 @@ func TestRejectSubmissionWithoutAncestorsOnMainchainInPreviousEpoch(t *testing.T require.NoErrorf(t, err, "Unexpected message processing error: %v", err) epoch2 := uint64(2) - raw2 := dg.RandomRawCheckpointDataForEpoch(epoch2) + raw2, _ := dg.RandomRawCheckpointDataForEpoch(epoch2) epoch2Block1 := dg.CreateBlock(1, 19, 2, raw2.FirstPart) epoch2Block2 := dg.CreateBlock(2, 14, 7, raw2.SecondPart) - // Submitting checkpoints for epoch 2, there should be at least one submission - // for epoch 1, with headers deeper in chain that in this new submission - tk.Checkpointing.SetEpoch(epoch2) msg2 := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{epoch2Block1, epoch2Block2}) // Both headers are deeper than epoch 1 submission, fail @@ -353,7 +344,7 @@ func TestRejectSubmissionWithoutAncestorsOnMainchainInPreviousEpoch(t *testing.T func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { rand.Seed(time.Now().Unix()) - tk := InitTestKeepers(t, uint64(1)) + tk := InitTestKeepers(t) msg1 := dg.GenerateMessageWithRandomSubmitterForEpoch(1) tk.BTCLightClient.SetDepth(b1Hash(msg1), int64(5)) @@ -367,27 +358,25 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { _, err = tk.insertProofMsg(msg1a) require.NoError(t, err, "failed to insert submission for epoch 1") - tk.setEpoch(2) - msg2 := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg2 := dg.GenerateMessageWithRandomSubmitterForEpoch(2) tk.BTCLightClient.SetDepth(b1Hash(msg2), int64(3)) tk.BTCLightClient.SetDepth(b2Hash(msg2), int64(2)) _, err = tk.insertProofMsg(msg2) require.NoError(t, err, "failed to insert submission for epoch 2") - msg2a := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg2a := dg.GenerateMessageWithRandomSubmitterForEpoch(2) tk.BTCLightClient.SetDepth(b1Hash(msg2a), int64(3)) tk.BTCLightClient.SetDepth(b2Hash(msg2a), int64(2)) _, err = tk.insertProofMsg(msg2a) require.NoError(t, err, "failed to insert submission for epoch 2") - tk.setEpoch(3) - msg3 := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg3 := dg.GenerateMessageWithRandomSubmitterForEpoch(3) tk.BTCLightClient.SetDepth(b1Hash(msg3), int64(1)) tk.BTCLightClient.SetDepth(b2Hash(msg3), int64(0)) _, err = tk.insertProofMsg(msg3) require.NoError(t, err, "failed to insert submission for epoch 3") - msg3a := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg3a := dg.GenerateMessageWithRandomSubmitterForEpoch(3) tk.BTCLightClient.SetDepth(b1Hash(msg3a), int64(1)) tk.BTCLightClient.SetDepth(b2Hash(msg3a), int64(0)) _, err = tk.insertProofMsg(msg3a) @@ -439,7 +428,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { rand.Seed(time.Now().Unix()) - tk := InitTestKeepers(t, uint64(1)) + tk := InitTestKeepers(t) defaultParams := btcctypes.DefaultParams() wDeep := defaultParams.CheckpointFinalizationTimeout @@ -488,7 +477,7 @@ func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { func TestTxIdxShouldBreakTies(t *testing.T) { rand.Seed(time.Now().Unix()) - tk := InitTestKeepers(t, uint64(1)) + tk := InitTestKeepers(t) defaultParams := btcctypes.DefaultParams() wDeep := defaultParams.CheckpointFinalizationTimeout @@ -541,12 +530,12 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { defaultParams := btcctypes.DefaultParams() kDeep := defaultParams.BtcConfirmationDepth wDeep := defaultParams.CheckpointFinalizationTimeout - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) diff --git a/x/btccheckpoint/types/btccheckpoint.pb.go b/x/btccheckpoint/types/btccheckpoint.pb.go index 8e9bb809e..43054ceb5 100644 --- a/x/btccheckpoint/types/btccheckpoint.pb.go +++ b/x/btccheckpoint/types/btccheckpoint.pb.go @@ -323,15 +323,17 @@ func (m *TransactionInfo) GetProof() []byte { // and blockshash in enough to retrieve is from lightclient type SubmissionData struct { // TODO: this could probably be better typed - // Address of submitter of given checkpoint. Required to payup the reward to - // submitter of given checkpoint - Submitter []byte `protobuf:"bytes,1,opt,name=submitter,proto3" json:"submitter,omitempty"` + // Address of the vigiliatne which submitted the submissions, calculated from + // submission message itself + VigilanteAddress []byte `protobuf:"bytes,1,opt,name=vigilante_address,json=vigilanteAddress,proto3" json:"vigilante_address,omitempty"` + // Address of the checkpoint submitter, extracted from the checkpoint itself. + SubmitterAddress []byte `protobuf:"bytes,2,opt,name=submitter_address,json=submitterAddress,proto3" json:"submitter_address,omitempty"` // txs_info is the two `TransactionInfo`s corresponding to the submission // It is used for // - recovering address of sender of btc transction to payup the reward. // - allowing the ZoneConcierge module to prove the checkpoint is submitted to BTC - TxsInfo []*TransactionInfo `protobuf:"bytes,2,rep,name=txs_info,json=txsInfo,proto3" json:"txs_info,omitempty"` - Epoch uint64 `protobuf:"varint,3,opt,name=epoch,proto3" json:"epoch,omitempty"` + TxsInfo []*TransactionInfo `protobuf:"bytes,3,rep,name=txs_info,json=txsInfo,proto3" json:"txs_info,omitempty"` + Epoch uint64 `protobuf:"varint,4,opt,name=epoch,proto3" json:"epoch,omitempty"` } func (m *SubmissionData) Reset() { *m = SubmissionData{} } @@ -367,9 +369,16 @@ func (m *SubmissionData) XXX_DiscardUnknown() { var xxx_messageInfo_SubmissionData proto.InternalMessageInfo -func (m *SubmissionData) GetSubmitter() []byte { +func (m *SubmissionData) GetVigilanteAddress() []byte { if m != nil { - return m.Submitter + return m.VigilanteAddress + } + return nil +} + +func (m *SubmissionData) GetSubmitterAddress() []byte { + if m != nil { + return m.SubmitterAddress } return nil } @@ -396,8 +405,6 @@ type EpochData struct { Key []*SubmissionKey `protobuf:"bytes,1,rep,name=key,proto3" json:"key,omitempty"` // Current btc status of the epoch Status BtcStatus `protobuf:"varint,2,opt,name=status,proto3,enum=babylon.btccheckpoint.v1.BtcStatus" json:"status,omitempty"` - // Required to comunicate with checkpoint module about checkpoint status - RawCheckpoint []byte `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` } func (m *EpochData) Reset() { *m = EpochData{} } @@ -447,13 +454,6 @@ func (m *EpochData) GetStatus() BtcStatus { return Submitted } -func (m *EpochData) GetRawCheckpoint() []byte { - if m != nil { - return m.RawCheckpoint - } - return nil -} - func init() { proto.RegisterEnum("babylon.btccheckpoint.v1.BtcStatus", BtcStatus_name, BtcStatus_value) proto.RegisterType((*BTCSpvProof)(nil), "babylon.btccheckpoint.v1.BTCSpvProof") @@ -469,47 +469,47 @@ func init() { } var fileDescriptor_da8b9af3dbd18a36 = []byte{ - // 633 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x10, 0xcd, 0x86, 0xc0, 0xef, 0x97, 0x0d, 0x09, 0x68, 0x81, 0xca, 0x42, 0x95, 0x49, 0x53, 0x55, - 0x84, 0x1e, 0x12, 0x95, 0xb6, 0x12, 0xfd, 0x73, 0xc1, 0x49, 0x10, 0x11, 0xe5, 0x8f, 0x6c, 0x73, - 0xe1, 0x62, 0xad, 0x37, 0x9b, 0xd8, 0x22, 0xf1, 0x46, 0xde, 0x05, 0x92, 0xde, 0x2b, 0x55, 0x9c, - 0xaa, 0xde, 0x7b, 0xea, 0xad, 0x9f, 0xa4, 0x47, 0x8e, 0x15, 0x07, 0x54, 0xc1, 0xc7, 0xe8, 0xa5, - 0xf2, 0xae, 0x4b, 0x62, 0x5a, 0xd4, 0x72, 0xf3, 0xcc, 0xbe, 0x99, 0x7d, 0xef, 0xcd, 0x78, 0xe1, - 0x8a, 0x8b, 0xdd, 0x61, 0x97, 0x05, 0x55, 0x57, 0x10, 0xe2, 0x51, 0x72, 0xd8, 0x67, 0x7e, 0x20, - 0x92, 0x51, 0xa5, 0x1f, 0x32, 0xc1, 0x90, 0x16, 0x43, 0x2b, 0xc9, 0xc3, 0xe3, 0x27, 0x8b, 0xf3, - 0x1d, 0xd6, 0x61, 0x12, 0x54, 0x8d, 0xbe, 0x14, 0xbe, 0xf4, 0x03, 0xc0, 0x9c, 0x61, 0xd7, 0xac, - 0xfe, 0xf1, 0x5e, 0xc8, 0x58, 0x1b, 0x2d, 0xc3, 0x19, 0x57, 0x10, 0x47, 0x84, 0x38, 0xe0, 0x98, - 0x08, 0x9f, 0x05, 0x1a, 0x28, 0x82, 0xf2, 0xb4, 0x59, 0x70, 0x05, 0xb1, 0x47, 0x59, 0xb4, 0x0a, - 0x17, 0x6e, 0x00, 0x1d, 0x3f, 0x68, 0xd1, 0x81, 0x96, 0x2e, 0x82, 0x72, 0xde, 0x9c, 0x4b, 0xc2, - 0x9b, 0xd1, 0x11, 0x7a, 0x00, 0xa7, 0x7b, 0x34, 0x3c, 0xec, 0x52, 0x27, 0x60, 0x2d, 0xca, 0xb5, - 0x09, 0xd9, 0x39, 0xa7, 0x72, 0x3b, 0x51, 0x0a, 0x75, 0xe1, 0x02, 0x61, 0x41, 0xdb, 0x0f, 0x7b, - 0x7e, 0xd0, 0x71, 0xa2, 0x1b, 0x3c, 0x8a, 0x5b, 0x34, 0xd4, 0x32, 0x11, 0xd6, 0x58, 0x3b, 0xbf, - 0x58, 0x7a, 0xd6, 0xf1, 0x85, 0x77, 0xe4, 0x56, 0x08, 0xeb, 0x55, 0x63, 0xb5, 0xc4, 0xc3, 0x7e, - 0xf0, 0x2b, 0xa8, 0x8a, 0x61, 0x9f, 0xf2, 0x8a, 0x61, 0xd7, 0x36, 0x65, 0xa9, 0x31, 0x14, 0x94, - 0x9b, 0x73, 0xa3, 0xb6, 0x86, 0x20, 0xea, 0xa4, 0x34, 0x80, 0x85, 0x31, 0x92, 0x5b, 0x74, 0x88, - 0xe6, 0xe1, 0xa4, 0x92, 0x01, 0xa4, 0x0c, 0x15, 0xa0, 0x3d, 0x98, 0xf1, 0x30, 0xf7, 0xa4, 0xb6, - 0x69, 0xe3, 0xf5, 0xf9, 0xc5, 0xd2, 0xda, 0x1d, 0x49, 0x6c, 0x62, 0xee, 0x29, 0x22, 0xb2, 0x53, - 0x69, 0x0b, 0xe6, 0xad, 0x23, 0xb7, 0xe7, 0x73, 0x1e, 0x5f, 0xfc, 0x12, 0x4e, 0x1c, 0xd2, 0xa1, - 0x06, 0x8a, 0x13, 0xe5, 0xdc, 0x6a, 0xb9, 0x72, 0xdb, 0x18, 0x2b, 0x49, 0xbe, 0x66, 0x54, 0x54, - 0x7a, 0x07, 0xe0, 0x4c, 0xc2, 0xec, 0x36, 0x1b, 0xf5, 0x03, 0x77, 0xee, 0x87, 0x8a, 0x30, 0x37, - 0xbe, 0x00, 0x69, 0x35, 0xa6, 0xb1, 0x54, 0x64, 0x53, 0x3f, 0xda, 0x97, 0x78, 0x84, 0x2a, 0x28, - 0x9d, 0x02, 0x58, 0x18, 0xa9, 0xaa, 0x63, 0x81, 0xd1, 0x7d, 0x98, 0xe5, 0x51, 0x46, 0x08, 0x1a, - 0xc6, 0x9b, 0x34, 0x4a, 0xa0, 0x3a, 0xfc, 0x5f, 0x0c, 0xb8, 0xe3, 0x07, 0x6d, 0xa6, 0xa5, 0xa5, - 0xf2, 0x95, 0x7f, 0x62, 0x1a, 0x29, 0x34, 0xff, 0x13, 0x03, 0x2e, 0xa5, 0xce, 0xc3, 0x49, 0xda, - 0x67, 0xc4, 0x93, 0x64, 0x32, 0xa6, 0x0a, 0x4a, 0x5f, 0x00, 0xcc, 0x36, 0xa2, 0x2f, 0xc9, 0xe3, - 0xc5, 0xb8, 0xbd, 0xcb, 0xb7, 0x5f, 0x92, 0x18, 0x8a, 0x72, 0xe3, 0x15, 0x9c, 0xe2, 0x02, 0x8b, - 0x23, 0x2e, 0x8d, 0x28, 0xac, 0x3e, 0xbc, 0xbd, 0xda, 0x10, 0xc4, 0x92, 0x50, 0x33, 0x2e, 0x41, - 0x8f, 0x60, 0x21, 0xc4, 0x27, 0xce, 0x08, 0x16, 0x3b, 0x96, 0x0f, 0xf1, 0x49, 0xed, 0x3a, 0xf9, - 0xf8, 0x23, 0x80, 0xd9, 0xeb, 0x62, 0xb4, 0x02, 0xef, 0x35, 0xf6, 0x76, 0x6b, 0x9b, 0x8e, 0x65, - 0xaf, 0xdb, 0xfb, 0x96, 0x63, 0xed, 0x1b, 0xdb, 0x4d, 0xdb, 0x6e, 0xd4, 0x67, 0x53, 0x8b, 0xf9, - 0xd3, 0x4f, 0xc5, 0xac, 0x15, 0x3b, 0xd8, 0xfa, 0x0d, 0x5a, 0xdb, 0xdd, 0xd9, 0x68, 0x9a, 0xdb, - 0x8d, 0xfa, 0x2c, 0x50, 0xd0, 0x9a, 0x5a, 0xfb, 0x3f, 0x40, 0x37, 0x9a, 0x3b, 0xeb, 0x6f, 0x9a, - 0x07, 0x8d, 0xfa, 0x6c, 0x5a, 0x41, 0x37, 0xfc, 0x00, 0x77, 0xfd, 0xb7, 0xb4, 0xb5, 0x98, 0x79, - 0xff, 0x59, 0x4f, 0x19, 0xbb, 0x5f, 0x2f, 0x75, 0x70, 0x76, 0xa9, 0x83, 0xef, 0x97, 0x3a, 0xf8, - 0x70, 0xa5, 0xa7, 0xce, 0xae, 0xf4, 0xd4, 0xb7, 0x2b, 0x3d, 0x75, 0xf0, 0xfc, 0x6f, 0xdb, 0x3f, - 0xb8, 0xf1, 0x54, 0xc9, 0xbf, 0xc1, 0x9d, 0x92, 0x6f, 0xce, 0xd3, 0x9f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0xc7, 0x34, 0x1c, 0x27, 0xd0, 0x04, 0x00, 0x00, + // 640 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xd0, 0x82, 0x32, 0x85, 0x52, 0x17, 0x30, 0x1b, 0x0e, 0x4b, 0xad, 0x07, 0x8a, 0x26, + 0x6d, 0x44, 0x4d, 0xf0, 0xe3, 0xc2, 0xb6, 0x25, 0x34, 0xc8, 0x47, 0x76, 0x97, 0x0b, 0x97, 0xcd, + 0xec, 0xee, 0xb4, 0x3b, 0xa1, 0xdd, 0x69, 0x76, 0x06, 0xd2, 0x7a, 0x35, 0x26, 0xc6, 0x93, 0xf1, + 0xee, 0xc9, 0xff, 0xc5, 0x78, 0xe4, 0x68, 0x38, 0x10, 0x03, 0x7f, 0x86, 0x17, 0x33, 0x33, 0x4b, + 0x3f, 0x50, 0xa2, 0xdc, 0xf6, 0xbd, 0xf7, 0x7b, 0x1f, 0xbf, 0xdf, 0x7b, 0xb3, 0x70, 0xd5, 0x43, + 0x5e, 0xbf, 0x4d, 0xa3, 0x8a, 0xc7, 0x7d, 0x3f, 0xc4, 0xfe, 0x51, 0x97, 0x92, 0x88, 0x8f, 0x5b, + 0xe5, 0x6e, 0x4c, 0x39, 0xd5, 0xf4, 0x04, 0x5a, 0x1e, 0x0f, 0x9e, 0x3c, 0x59, 0x5a, 0x68, 0xd1, + 0x16, 0x95, 0xa0, 0x8a, 0xf8, 0x52, 0xf8, 0xe2, 0x2f, 0x00, 0xb3, 0xa6, 0x53, 0xb5, 0xbb, 0x27, + 0xfb, 0x31, 0xa5, 0x4d, 0x6d, 0x05, 0xce, 0x79, 0xdc, 0x77, 0x79, 0x8c, 0x22, 0x86, 0x7c, 0x4e, + 0x68, 0xa4, 0x83, 0x02, 0x28, 0xcd, 0x58, 0x39, 0x8f, 0xfb, 0xce, 0xd0, 0xab, 0xad, 0xc1, 0xc5, + 0x6b, 0x40, 0x97, 0x44, 0x01, 0xee, 0xe9, 0x13, 0x05, 0x50, 0x9a, 0xb5, 0xe6, 0xc7, 0xe1, 0x0d, + 0x11, 0xd2, 0x1e, 0xc0, 0x99, 0x0e, 0x8e, 0x8f, 0xda, 0xd8, 0x8d, 0x68, 0x80, 0x99, 0x9e, 0x96, + 0x95, 0xb3, 0xca, 0xb7, 0x2b, 0x5c, 0x5a, 0x1b, 0x2e, 0xfa, 0x34, 0x6a, 0x92, 0xb8, 0x43, 0xa2, + 0x96, 0x2b, 0x3a, 0x84, 0x18, 0x05, 0x38, 0xd6, 0x33, 0x02, 0x6b, 0xae, 0x9f, 0x9d, 0x2f, 0x3f, + 0x6b, 0x11, 0x1e, 0x1e, 0x7b, 0x65, 0x9f, 0x76, 0x2a, 0x09, 0x5b, 0x3f, 0x44, 0x24, 0xba, 0x32, + 0x2a, 0xbc, 0xdf, 0xc5, 0xac, 0x6c, 0x3a, 0xd5, 0x2d, 0x99, 0x6a, 0xf6, 0x39, 0x66, 0xd6, 0xfc, + 0xb0, 0xac, 0xc9, 0x7d, 0x15, 0x29, 0xf6, 0x60, 0x6e, 0x64, 0xc8, 0x6d, 0xdc, 0xd7, 0x16, 0xe0, + 0xa4, 0xa2, 0x01, 0x24, 0x0d, 0x65, 0x68, 0xfb, 0x30, 0x13, 0x22, 0x16, 0x4a, 0x6e, 0x33, 0xe6, + 0xeb, 0xb3, 0xf3, 0xe5, 0xf5, 0x5b, 0x0e, 0xb1, 0x85, 0x58, 0xa8, 0x06, 0x91, 0x95, 0x8a, 0xdb, + 0x70, 0xd6, 0x3e, 0xf6, 0x3a, 0x84, 0xb1, 0xa4, 0xf1, 0x4b, 0x98, 0x3e, 0xc2, 0x7d, 0x1d, 0x14, + 0xd2, 0xa5, 0xec, 0x5a, 0xa9, 0x7c, 0xd3, 0x1a, 0xcb, 0xe3, 0xf3, 0x5a, 0x22, 0xa9, 0xf8, 0x1e, + 0xc0, 0xb9, 0x31, 0xb1, 0x9b, 0x74, 0x58, 0x0f, 0xdc, 0xba, 0x9e, 0x56, 0x80, 0xd9, 0xd1, 0x03, + 0x98, 0x50, 0x6b, 0x1a, 0x71, 0x09, 0x99, 0xba, 0xe2, 0x5e, 0x92, 0x15, 0x2a, 0xa3, 0xf8, 0x0d, + 0xc0, 0xdc, 0x90, 0x55, 0x0d, 0x71, 0xa4, 0x3d, 0x86, 0xf7, 0x4e, 0x48, 0x8b, 0xb4, 0x51, 0xc4, + 0xb1, 0x8b, 0x82, 0x20, 0xc6, 0x8c, 0x25, 0x17, 0x95, 0x1f, 0x04, 0x36, 0x94, 0x5f, 0x80, 0x99, + 0x48, 0xe7, 0x1c, 0xc7, 0x03, 0xb0, 0xea, 0x9e, 0x1f, 0x04, 0xae, 0xc0, 0x35, 0x78, 0x97, 0xf7, + 0x98, 0x4b, 0xa2, 0x26, 0xd5, 0xd3, 0x52, 0xb5, 0xd5, 0xff, 0x62, 0x29, 0xd4, 0xb1, 0xee, 0xf0, + 0x1e, 0x93, 0x32, 0x2d, 0xc0, 0x49, 0xdc, 0xa5, 0x7e, 0x28, 0xef, 0x2b, 0x63, 0x29, 0xa3, 0xf8, + 0x0e, 0xc0, 0xe9, 0xba, 0xf8, 0x92, 0x1c, 0x5e, 0x8c, 0xae, 0x66, 0xe5, 0xe6, 0x26, 0x63, 0x0b, + 0x55, 0x4a, 0xbe, 0x82, 0x53, 0x8c, 0x23, 0x7e, 0xac, 0x68, 0xe4, 0xd6, 0x1e, 0xde, 0x9c, 0x6d, + 0x72, 0xdf, 0x96, 0x50, 0x2b, 0x49, 0x79, 0xf4, 0x19, 0xc0, 0xe9, 0x81, 0x57, 0x5b, 0x85, 0xf7, + 0xeb, 0xfb, 0x7b, 0xd5, 0x2d, 0xd7, 0x76, 0x36, 0x9c, 0x03, 0xdb, 0xb5, 0x0f, 0xcc, 0x9d, 0x86, + 0xe3, 0xd4, 0x6b, 0xf9, 0xd4, 0xd2, 0xec, 0xc7, 0x2f, 0x85, 0x69, 0x3b, 0x51, 0x28, 0xf8, 0x03, + 0x5a, 0xdd, 0xdb, 0xdd, 0x6c, 0x58, 0x3b, 0xf5, 0x5a, 0x1e, 0x28, 0x68, 0x55, 0xbd, 0x85, 0xbf, + 0x40, 0x37, 0x1b, 0xbb, 0x1b, 0x6f, 0x1a, 0x87, 0xf5, 0x5a, 0x7e, 0x42, 0x41, 0x37, 0x49, 0x84, + 0xda, 0xe4, 0x2d, 0x0e, 0x96, 0x32, 0x1f, 0xbe, 0x1a, 0x29, 0x73, 0xef, 0xfb, 0x85, 0x01, 0x4e, + 0x2f, 0x0c, 0xf0, 0xf3, 0xc2, 0x00, 0x9f, 0x2e, 0x8d, 0xd4, 0xe9, 0xa5, 0x91, 0xfa, 0x71, 0x69, + 0xa4, 0x0e, 0x9f, 0xff, 0xeb, 0x49, 0xf4, 0xae, 0xfd, 0xbf, 0xe4, 0x13, 0xf1, 0xa6, 0xe4, 0x8f, + 0xe8, 0xe9, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x52, 0x2f, 0x67, 0xe5, 0x04, 0x00, 0x00, } func (m *BTCSpvProof) Marshal() (dAtA []byte, err error) { @@ -715,7 +715,7 @@ func (m *SubmissionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.Epoch != 0 { i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.Epoch)) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x20 } if len(m.TxsInfo) > 0 { for iNdEx := len(m.TxsInfo) - 1; iNdEx >= 0; iNdEx-- { @@ -728,13 +728,20 @@ func (m *SubmissionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } } - if len(m.Submitter) > 0 { - i -= len(m.Submitter) - copy(dAtA[i:], m.Submitter) - i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Submitter))) + if len(m.SubmitterAddress) > 0 { + i -= len(m.SubmitterAddress) + copy(dAtA[i:], m.SubmitterAddress) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.SubmitterAddress))) + i-- + dAtA[i] = 0x12 + } + if len(m.VigilanteAddress) > 0 { + i -= len(m.VigilanteAddress) + copy(dAtA[i:], m.VigilanteAddress) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.VigilanteAddress))) i-- dAtA[i] = 0xa } @@ -761,13 +768,6 @@ func (m *EpochData) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.RawCheckpoint) > 0 { - i -= len(m.RawCheckpoint) - copy(dAtA[i:], m.RawCheckpoint) - i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.RawCheckpoint))) - i-- - dAtA[i] = 0x1a - } if m.Status != 0 { i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.Status)) i-- @@ -883,7 +883,11 @@ func (m *SubmissionData) Size() (n int) { } var l int _ = l - l = len(m.Submitter) + l = len(m.VigilanteAddress) + if l > 0 { + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + l = len(m.SubmitterAddress) if l > 0 { n += 1 + l + sovBtccheckpoint(uint64(l)) } @@ -914,10 +918,6 @@ func (m *EpochData) Size() (n int) { if m.Status != 0 { n += 1 + sovBtccheckpoint(uint64(m.Status)) } - l = len(m.RawCheckpoint) - if l > 0 { - n += 1 + l + sovBtccheckpoint(uint64(l)) - } return n } @@ -1472,7 +1472,7 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Submitter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VigilanteAddress", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -1499,12 +1499,46 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Submitter = append(m.Submitter[:0], dAtA[iNdEx:postIndex]...) - if m.Submitter == nil { - m.Submitter = []byte{} + m.VigilanteAddress = append(m.VigilanteAddress[:0], dAtA[iNdEx:postIndex]...) + if m.VigilanteAddress == nil { + m.VigilanteAddress = []byte{} } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubmitterAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubmitterAddress = append(m.SubmitterAddress[:0], dAtA[iNdEx:postIndex]...) + if m.SubmitterAddress == nil { + m.SubmitterAddress = []byte{} + } + iNdEx = postIndex + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TxsInfo", wireType) } @@ -1538,7 +1572,7 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) } @@ -1660,40 +1694,6 @@ func (m *EpochData) Unmarshal(dAtA []byte) error { break } } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBtccheckpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBtccheckpoint - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBtccheckpoint - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawCheckpoint = append(m.RawCheckpoint[:0], dAtA[iNdEx:postIndex]...) - if m.RawCheckpoint == nil { - m.RawCheckpoint = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) diff --git a/x/btccheckpoint/types/expected_keepers.go b/x/btccheckpoint/types/expected_keepers.go index dedf9afaa..d351b6a5b 100644 --- a/x/btccheckpoint/types/expected_keepers.go +++ b/x/btccheckpoint/types/expected_keepers.go @@ -1,7 +1,9 @@ package types import ( + txformat "github.com/babylonchain/babylon/btctxformatter" bbn "github.com/babylonchain/babylon/types" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/auth/types" ) @@ -30,10 +32,7 @@ type BTCLightClientKeeper interface { } type CheckpointingKeeper interface { - // CheckpointEpoch should return epoch index if provided rawCheckpoint - // passes all checkpointing validations and error otherwise - CheckpointEpoch(ctx sdk.Context, rawCheckpoint []byte) (uint64, error) - + VerifyCheckpoint(ctx sdk.Context, checkpoint txformat.RawBtcCheckpoint) error // It quite mouthfull to have 4 different methods to operate on checkpoint state // but this approach decouples both modules a bit more than having some kind // of shared enum passed into the methods. Both modules are free to evolve their diff --git a/x/btccheckpoint/types/mock_keepers.go b/x/btccheckpoint/types/mock_keepers.go index bbd0243ff..c541246b7 100644 --- a/x/btccheckpoint/types/mock_keepers.go +++ b/x/btccheckpoint/types/mock_keepers.go @@ -3,6 +3,7 @@ package types import ( "errors" + txformat "github.com/babylonchain/babylon/btctxformatter" bbn "github.com/babylonchain/babylon/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -12,7 +13,6 @@ type MockBTCLightClientKeeper struct { } type MockCheckpointingKeeper struct { - epoch uint64 returnError bool } @@ -23,18 +23,13 @@ func NewMockBTCLightClientKeeper() *MockBTCLightClientKeeper { return &lc } -func NewMockCheckpointingKeeper(epoch uint64) *MockCheckpointingKeeper { +func NewMockCheckpointingKeeper() *MockCheckpointingKeeper { mc := MockCheckpointingKeeper{ - epoch: epoch, returnError: false, } return &mc } -func (mc *MockCheckpointingKeeper) SetEpoch(e uint64) { - mc.epoch = e -} - func (mc *MockCheckpointingKeeper) ReturnError() { mc.returnError = true } @@ -61,12 +56,12 @@ func (ck MockBTCLightClientKeeper) MainChainDepth(ctx sdk.Context, headerBytes * } } -func (ck MockCheckpointingKeeper) CheckpointEpoch(ctx sdk.Context, rawCheckpoint []byte) (uint64, error) { +func (ck MockCheckpointingKeeper) VerifyCheckpoint(ctx sdk.Context, checkpoint txformat.RawBtcCheckpoint) error { if ck.returnError { - return 0, errors.New("bad checkpoints") + return errors.New("bad checkpoints") } - return ck.epoch, nil + return nil } // SetCheckpointSubmitted Informs checkpointing module that checkpoint was diff --git a/x/btccheckpoint/types/msgs.go b/x/btccheckpoint/types/msgs.go index d1f18d04d..9c4fccf06 100644 --- a/x/btccheckpoint/types/msgs.go +++ b/x/btccheckpoint/types/msgs.go @@ -72,7 +72,13 @@ func ParseTwoProofs( return nil, err } - sub := NewRawCheckpointSubmission(submitter, *parsedProofs[0], *parsedProofs[1], rawCkptData) + rawCheckpoint, err := txformat.DecodeRawCheckpoint(txformat.CurrentVersion, rawCkptData) + + if err != nil { + return nil, err + } + + sub := NewRawCheckpointSubmission(submitter, *parsedProofs[0], *parsedProofs[1], *rawCheckpoint) return &sub, nil } diff --git a/x/btccheckpoint/types/types.go b/x/btccheckpoint/types/types.go index 8c9137141..1cd12952e 100644 --- a/x/btccheckpoint/types/types.go +++ b/x/btccheckpoint/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "fmt" + "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/types" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" @@ -18,7 +19,7 @@ type RawCheckpointSubmission struct { Submitter sdk.AccAddress Proof1 ParsedProof Proof2 ParsedProof - checkpointData []byte + CheckpointData btctxformatter.RawBtcCheckpoint } // SubmissionBtcInfo encapsualte important information about submission posistion @@ -39,13 +40,13 @@ func NewRawCheckpointSubmission( a sdk.AccAddress, p1 ParsedProof, p2 ParsedProof, - checkpointData []byte, + checkpointData btctxformatter.RawBtcCheckpoint, ) RawCheckpointSubmission { r := RawCheckpointSubmission{ Submitter: a, Proof1: p1, Proof2: p2, - checkpointData: checkpointData, + CheckpointData: checkpointData, } return r @@ -55,13 +56,6 @@ func (s *RawCheckpointSubmission) GetProofs() []*ParsedProof { return []*ParsedProof{&s.Proof1, &s.Proof2} } -func (s *RawCheckpointSubmission) GetRawCheckPointBytes() []byte { - checkpointDataCopy := make([]byte, len(s.checkpointData)) - // return copy, to avoid someone modifing original - copy(checkpointDataCopy, s.checkpointData) - return checkpointDataCopy -} - func (s *RawCheckpointSubmission) GetFirstBlockHash() types.BTCHeaderHashBytes { return s.Proof1.BlockHash } @@ -97,9 +91,10 @@ func (rsc *RawCheckpointSubmission) GetSubmissionKey() SubmissionKey { func (rsc *RawCheckpointSubmission) GetSubmissionData(epochNum uint64, txsInfo []*TransactionInfo) SubmissionData { return SubmissionData{ - Submitter: rsc.Submitter.Bytes(), - TxsInfo: txsInfo, - Epoch: epochNum, + VigilanteAddress: rsc.Submitter.Bytes(), + SubmitterAddress: rsc.CheckpointData.SubmitterAddress, + TxsInfo: txsInfo, + Epoch: epochNum, } } @@ -114,11 +109,10 @@ func (sk *SubmissionKey) GetKeyBlockHashes() []*types.BTCHeaderHashBytes { return hashes } -func NewEmptyEpochData(rawCheckpointBytes []byte) EpochData { +func NewEmptyEpochData() EpochData { return EpochData{ - Key: []*SubmissionKey{}, - Status: Submitted, - RawCheckpoint: rawCheckpointBytes, + Key: []*SubmissionKey{}, + Status: Submitted, } } diff --git a/x/checkpointing/keeper/keeper.go b/x/checkpointing/keeper/keeper.go index ac74100da..92d711def 100644 --- a/x/checkpointing/keeper/keeper.go +++ b/x/checkpointing/keeper/keeper.go @@ -4,6 +4,8 @@ import ( "errors" "fmt" + txformat "github.com/babylonchain/babylon/btctxformatter" + "github.com/babylonchain/babylon/crypto/bls12381" "github.com/babylonchain/babylon/x/checkpointing/types" epochingtypes "github.com/babylonchain/babylon/x/epoching/types" @@ -166,20 +168,19 @@ func (k Keeper) BuildRawCheckpoint(ctx sdk.Context, epochNum uint64, lch types.L return ckptWithMeta, nil } -// CheckpointEpoch verifies checkpoint from BTC and returns epoch number if -// it equals to the existing raw checkpoint. Otherwise, it further verifies +// VerifyCheckpoint verifies checkpoint from BTC. It verifies // the raw checkpoint and decides whether it is an invalid checkpoint or a // conflicting checkpoint. A conflicting checkpoint indicates the existence // of a fork -func (k Keeper) CheckpointEpoch(ctx sdk.Context, btcCkptBytes []byte) (uint64, error) { - ckptWithMeta, err := k.verifyCkptBytes(ctx, btcCkptBytes) +func (k Keeper) VerifyCheckpoint(ctx sdk.Context, checkpoint txformat.RawBtcCheckpoint) error { + _, err := k.verifyCkptBytes(ctx, &checkpoint) if err != nil { if errors.Is(err, types.ErrConflictingCheckpoint) { panic(err) } - return 0, err + return err } - return ckptWithMeta.Ckpt.EpochNum, nil + return nil } // verifyCkptBytes verifies checkpoint from BTC. A checkpoint is valid if @@ -187,8 +188,8 @@ func (k Keeper) CheckpointEpoch(ctx sdk.Context, btcCkptBytes []byte) (uint64, e // the raw checkpoint and decides whether it is an invalid checkpoint or a // conflicting checkpoint. A conflicting checkpoint indicates the existence // of a fork -func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.RawCheckpointWithMeta, error) { - ckpt, err := types.FromBTCCkptBytesToRawCkpt(btcCkptBytes) +func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcCheckpoint) (*types.RawCheckpointWithMeta, error) { + ckpt, err := types.FromBTCCkptToRawCkpt(rawCheckpoint) if err != nil { return nil, err } diff --git a/x/checkpointing/keeper/keeper_test.go b/x/checkpointing/keeper/keeper_test.go index ac49fc157..37eebec6d 100644 --- a/x/checkpointing/keeper/keeper_test.go +++ b/x/checkpointing/keeper/keeper_test.go @@ -188,32 +188,32 @@ func FuzzKeeperCheckpointEpoch(f *testing.F) { ) // 1. check valid checkpoint - btcCkptBytes := makeBtcCkptBytes( + rawBtcCheckpoint := makeBtcCkptBytes( localCkptWithMeta.Ckpt.EpochNum, localCkptWithMeta.Ckpt.LastCommitHash.MustMarshal(), localCkptWithMeta.Ckpt.Bitmap, localCkptWithMeta.Ckpt.BlsMultiSig.Bytes(), t, ) - epoch, err := ckptKeeper.CheckpointEpoch(ctx, btcCkptBytes) + + err := ckptKeeper.VerifyCheckpoint(ctx, *rawBtcCheckpoint) require.NoError(t, err) - require.Equal(t, localCkptWithMeta.Ckpt.EpochNum, epoch) // 2. check a checkpoint with invalid sig - btcCkptBytes = makeBtcCkptBytes( + rawBtcCheckpoint = makeBtcCkptBytes( localCkptWithMeta.Ckpt.EpochNum, localCkptWithMeta.Ckpt.LastCommitHash.MustMarshal(), localCkptWithMeta.Ckpt.Bitmap, datagen.GenRandomByteArray(btctxformatter.BlsSigLength), t, ) - _, err = ckptKeeper.CheckpointEpoch(ctx, btcCkptBytes) + err = ckptKeeper.VerifyCheckpoint(ctx, *rawBtcCheckpoint) require.ErrorIs(t, err, types.ErrInvalidRawCheckpoint) // 3. check a conflicting checkpoint; signed on a random lastcommithash conflictLastCommitHash := datagen.GenRandomByteArray(btctxformatter.LastCommitHashLength) msgBytes = append(sdk.Uint64ToBigEndian(localCkptWithMeta.Ckpt.EpochNum), conflictLastCommitHash...) - btcCkptBytes = makeBtcCkptBytes( + rawBtcCheckpoint = makeBtcCkptBytes( localCkptWithMeta.Ckpt.EpochNum, conflictLastCommitHash, localCkptWithMeta.Ckpt.Bitmap, @@ -221,12 +221,12 @@ func FuzzKeeperCheckpointEpoch(f *testing.F) { t, ) require.Panics(t, func() { - _, _ = ckptKeeper.CheckpointEpoch(ctx, btcCkptBytes) + _ = ckptKeeper.VerifyCheckpoint(ctx, *rawBtcCheckpoint) }) }) } -func makeBtcCkptBytes(epoch uint64, lch []byte, bitmap []byte, blsSig []byte, t *testing.T) []byte { +func makeBtcCkptBytes(epoch uint64, lch []byte, bitmap []byte, blsSig []byte, t *testing.T) *btctxformatter.RawBtcCheckpoint { tag := datagen.GenRandomByteArray(btctxformatter.TagLength) babylonTag := btctxformatter.BabylonTag(tag[:btctxformatter.TagLength]) address := datagen.GenRandomByteArray(btctxformatter.AddressLength) @@ -251,7 +251,10 @@ func makeBtcCkptBytes(epoch uint64, lch []byte, bitmap []byte, blsSig []byte, t ckptData, err := btctxformatter.ConnectParts(btctxformatter.CurrentVersion, decodedFirst.Data, decodedSecond.Data) require.NoError(t, err) - return ckptData + rawCheckpoint, err := btctxformatter.DecodeRawCheckpoint(btctxformatter.CurrentVersion, ckptData) + require.NoError(t, err) + + return rawCheckpoint } func curStateUpdate(ctx sdk.Context, status types.CheckpointStatus) *types.CheckpointStateUpdate { diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index cbf7f69f9..f69392475 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -168,8 +168,16 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC return nil, err } + rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) + + if err != nil { + return nil, err + } + + resp.RawCheckpoint = rawCheckpoint.Ckpt + // find the raw checkpoint and the best submission key for the finalised epoch - _, resp.RawCheckpoint, resp.BtcSubmissionKey, err = k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) + _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) if err != nil { return nil, err } @@ -180,7 +188,7 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC } // generate all proofs - resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.RawCheckpoint, resp.BtcSubmissionKey) + resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.BtcSubmissionKey) if err != nil { return nil, err } @@ -214,8 +222,16 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer return nil, err } + rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) + + if err != nil { + return nil, err + } + + resp.RawCheckpoint = rawCheckpoint.Ckpt + // find and assign the raw checkpoint and the best submission key for the finalised epoch - _, resp.RawCheckpoint, resp.BtcSubmissionKey, err = k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) + _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) if err != nil { return nil, err } @@ -236,7 +252,16 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer if err != nil { return nil, err } - _, resp.RawCheckpoint, resp.BtcSubmissionKey, err = k.btccKeeper.GetFinalizedEpochDataWithBestSubmission(ctx, finalizedEpoch) + + rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) + + if err != nil { + return nil, err + } + + resp.RawCheckpoint = rawCheckpoint.Ckpt + + _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) if err != nil { return nil, err } @@ -248,7 +273,7 @@ func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.Quer } // generate all proofs - resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.RawCheckpoint, resp.BtcSubmissionKey) + resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.BtcSubmissionKey) if err != nil { return nil, err } diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index e9c7ea08d..b787dfc32 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -300,9 +300,13 @@ func FuzzFinalizedChainInfo(f *testing.F) { randomRawCkpt := datagen.GenRandomRawCheckpoint() randomRawCkpt.EpochNum = epoch.EpochNumber btccKeeper := zctypes.NewMockBtcCheckpointKeeper(ctrl) - btccKeeper.EXPECT().GetFinalizedEpochDataWithBestSubmission(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( + checkpointingKeeper.EXPECT().GetRawCheckpoint(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( + &checkpointingtypes.RawCheckpointWithMeta{ + Ckpt: randomRawCkpt, + }, nil, + ).AnyTimes() + btccKeeper.EXPECT().GetBestSubmission(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( btcctypes.Finalized, - randomRawCkpt, &btcctypes.SubmissionKey{ Key: []*btcctypes.TransactionKey{}, }, diff --git a/x/zoneconcierge/keeper/proof_finalized_chain_info.go b/x/zoneconcierge/keeper/proof_finalized_chain_info.go index d26c15c68..f8d99fc1c 100644 --- a/x/zoneconcierge/keeper/proof_finalized_chain_info.go +++ b/x/zoneconcierge/keeper/proof_finalized_chain_info.go @@ -2,7 +2,6 @@ package keeper import ( btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" epochingtypes "github.com/babylonchain/babylon/x/epoching/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -16,7 +15,6 @@ func (k Keeper) proveFinalizedChainInfo( ctx sdk.Context, chainInfo *types.ChainInfo, epochInfo *epochingtypes.Epoch, - rawCheckpoint *checkpointingtypes.RawCheckpoint, bestSubmissionKey *btcctypes.SubmissionKey, ) (*types.ProofFinalizedChainInfo, error) { var ( diff --git a/x/zoneconcierge/types/expected_keepers.go b/x/zoneconcierge/types/expected_keepers.go index 236d54430..190f1daaf 100644 --- a/x/zoneconcierge/types/expected_keepers.go +++ b/x/zoneconcierge/types/expected_keepers.go @@ -68,12 +68,13 @@ type ScopedKeeper interface { } type BtcCheckpointKeeper interface { - GetFinalizedEpochDataWithBestSubmission(ctx sdk.Context, e uint64) (btcctypes.BtcStatus, *checkpointingtypes.RawCheckpoint, *btcctypes.SubmissionKey, error) + GetBestSubmission(ctx sdk.Context, e uint64) (btcctypes.BtcStatus, *btcctypes.SubmissionKey, error) GetSubmissionData(ctx sdk.Context, sk btcctypes.SubmissionKey) *btcctypes.SubmissionData } type CheckpointingKeeper interface { GetBLSPubKeySet(ctx sdk.Context, epochNumber uint64) ([]*checkpointingtypes.ValidatorWithBlsKey, error) + GetRawCheckpoint(ctx sdk.Context, epochNumber uint64) (*checkpointingtypes.RawCheckpointWithMeta, error) } type EpochingKeeper interface { diff --git a/x/zoneconcierge/types/mocked_keepers.go b/x/zoneconcierge/types/mocked_keepers.go index 1e48d0971..d58c82a20 100644 --- a/x/zoneconcierge/types/mocked_keepers.go +++ b/x/zoneconcierge/types/mocked_keepers.go @@ -502,21 +502,20 @@ func (m *MockBtcCheckpointKeeper) EXPECT() *MockBtcCheckpointKeeperMockRecorder return m.recorder } -// GetFinalizedEpochDataWithBestSubmission mocks base method. -func (m *MockBtcCheckpointKeeper) GetFinalizedEpochDataWithBestSubmission(ctx types2.Context, e uint64) (types.BtcStatus, *types0.RawCheckpoint, *types.SubmissionKey, error) { +// GetBestSubmission mocks base method. +func (m *MockBtcCheckpointKeeper) GetBestSubmission(ctx types2.Context, e uint64) (types.BtcStatus, *types.SubmissionKey, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFinalizedEpochDataWithBestSubmission", ctx, e) + ret := m.ctrl.Call(m, "GetBestSubmission", ctx, e) ret0, _ := ret[0].(types.BtcStatus) - ret1, _ := ret[1].(*types0.RawCheckpoint) - ret2, _ := ret[2].(*types.SubmissionKey) - ret3, _ := ret[3].(error) - return ret0, ret1, ret2, ret3 + ret1, _ := ret[1].(*types.SubmissionKey) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } -// GetFinalizedEpochDataWithBestSubmission indicates an expected call of GetFinalizedEpochDataWithBestSubmission. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetFinalizedEpochDataWithBestSubmission(ctx, e interface{}) *gomock.Call { +// GetBestSubmission indicates an expected call of GetBestSubmission. +func (mr *MockBtcCheckpointKeeperMockRecorder) GetBestSubmission(ctx, e interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalizedEpochDataWithBestSubmission", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetFinalizedEpochDataWithBestSubmission), ctx, e) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBestSubmission", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetBestSubmission), ctx, e) } // GetSubmissionData mocks base method. @@ -571,6 +570,21 @@ func (mr *MockCheckpointingKeeperMockRecorder) GetBLSPubKeySet(ctx, epochNumber return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBLSPubKeySet", reflect.TypeOf((*MockCheckpointingKeeper)(nil).GetBLSPubKeySet), ctx, epochNumber) } +// GetRawCheckpoint mocks base method. +func (m *MockCheckpointingKeeper) GetRawCheckpoint(ctx types2.Context, epochNumber uint64) (*types0.RawCheckpointWithMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRawCheckpoint", ctx, epochNumber) + ret0, _ := ret[0].(*types0.RawCheckpointWithMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRawCheckpoint indicates an expected call of GetRawCheckpoint. +func (mr *MockCheckpointingKeeperMockRecorder) GetRawCheckpoint(ctx, epochNumber interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRawCheckpoint", reflect.TypeOf((*MockCheckpointingKeeper)(nil).GetRawCheckpoint), ctx, epochNumber) +} + // MockEpochingKeeper is a mock of EpochingKeeper interface. type MockEpochingKeeper struct { ctrl *gomock.Controller From 947918885d4e5b39d4a2a2ebd950d23f07af25c2 Mon Sep 17 00:00:00 2001 From: KonradStaniec Date: Thu, 19 Jan 2023 08:19:06 +0100 Subject: [PATCH 24/37] Fix vulnerability when processing bls sig transactions (#287) --- app/test_helpers.go | 1 - x/checkpointing/keeper/bls_signer.go | 3 +- x/checkpointing/keeper/keeper.go | 5 ++++ x/checkpointing/keeper/msg_server_test.go | 25 ++++++++++++++++ x/checkpointing/types/errors.go | 1 + x/epoching/testepoching/helper.go | 36 ++++++++++++++++++----- 6 files changed, 61 insertions(+), 10 deletions(-) diff --git a/app/test_helpers.go b/app/test_helpers.go index 75395f567..170e95d19 100644 --- a/app/test_helpers.go +++ b/app/test_helpers.go @@ -95,7 +95,6 @@ func setup(withGenesis bool, invCheckPeriod uint) (*BabylonApp, GenesisState) { // one validator in validator set during InitGenesis abci call - https://github.com/cosmos/cosmos-sdk/pull/9697 func NewBabyblonAppWithCustomOptions(t *testing.T, isCheckTx bool, privSigner *PrivSigner, options SetupOptions) *BabylonApp { t.Helper() - privVal := datagen.NewPV() pubKey, err := privVal.GetPubKey() require.NoError(t, err) diff --git a/x/checkpointing/keeper/bls_signer.go b/x/checkpointing/keeper/bls_signer.go index 5a73234db..56009ccad 100644 --- a/x/checkpointing/keeper/bls_signer.go +++ b/x/checkpointing/keeper/bls_signer.go @@ -2,9 +2,10 @@ package keeper import ( "fmt" - epochingtypes "github.com/babylonchain/babylon/x/epoching/types" "time" + epochingtypes "github.com/babylonchain/babylon/x/epoching/types" + "github.com/babylonchain/babylon/client/tx" "github.com/babylonchain/babylon/crypto/bls12381" "github.com/babylonchain/babylon/types/retry" diff --git a/x/checkpointing/keeper/keeper.go b/x/checkpointing/keeper/keeper.go index 92d711def..b5e43823a 100644 --- a/x/checkpointing/keeper/keeper.go +++ b/x/checkpointing/keeper/keeper.go @@ -87,6 +87,11 @@ func (k Keeper) addBlsSig(ctx sdk.Context, sig *types.BlsSig) error { return nil } + if !sig.LastCommitHash.Equal(*ckptWithMeta.Ckpt.LastCommitHash) { + // processed BlsSig message is for invalid last commit hash + return types.ErrInvalidLastCommitHash + } + // get signer's address signerAddr, err := sdk.ValAddressFromBech32(sig.SignerAddress) if err != nil { diff --git a/x/checkpointing/keeper/msg_server_test.go b/x/checkpointing/keeper/msg_server_test.go index ac0ba6f95..786b93a1a 100644 --- a/x/checkpointing/keeper/msg_server_test.go +++ b/x/checkpointing/keeper/msg_server_test.go @@ -163,6 +163,31 @@ func FuzzWrappedCreateValidator(f *testing.F) { }) } +func TestInvalidLastCommitHash(t *testing.T) { + helper := testepoching.NewHelperWithValSet(t) + ck := helper.App.CheckpointingKeeper + msgServer := checkpointingkeeper.NewMsgServerImpl(ck) + // needed to init total voting power + helper.BeginBlock() + + epoch := uint64(1) + validLch := datagen.GenRandomByteArray(32) + // correct checkpoint for epoch 1 + _, err := ck.BuildRawCheckpoint(helper.Ctx, epoch, validLch) + require.NoError(t, err) + + // Malicious validator created message with valid bls signature but for invalid + // commit hash + invalidLch := datagen.GenRandomByteArray(32) + val0Info := helper.ValBlsPrivKeys[0] + signBytes := append(sdk.Uint64ToBigEndian(epoch), invalidLch...) + sig := bls12381.Sign(val0Info.BlsKey, signBytes) + msg := types.NewMsgAddBlsSig(epoch, invalidLch, sig, val0Info.Address) + + _, err = msgServer.AddBlsSig(helper.Ctx, msg) + require.ErrorIs(t, err, types.ErrInvalidLastCommitHash) +} + func buildMsgWrappedCreateValidator(addr sdk.AccAddress) (*types.MsgWrappedCreateValidator, error) { tmValPrivkey := ed25519.GenPrivKey() bondTokens := sdk.TokensFromConsensusPower(10, sdk.DefaultPowerReduction) diff --git a/x/checkpointing/types/errors.go b/x/checkpointing/types/errors.go index ab9c5ed08..95fd9cdcb 100644 --- a/x/checkpointing/types/errors.go +++ b/x/checkpointing/types/errors.go @@ -16,4 +16,5 @@ var ( ErrBlsKeyAlreadyExist = sdkerrors.Register(ModuleName, 1210, "BLS public key already exists") ErrBlsPrivKeyDoesNotExist = sdkerrors.Register(ModuleName, 1211, "BLS private key does not exist") ErrConflictingCheckpoint = sdkerrors.Register(ModuleName, 1212, "Conflicting checkpoint is found") + ErrInvalidLastCommitHash = sdkerrors.Register(ModuleName, 1213, "Provided last commit hash is Invalid") ) diff --git a/x/epoching/testepoching/helper.go b/x/epoching/testepoching/helper.go index 887e71c64..56c401b98 100644 --- a/x/epoching/testepoching/helper.go +++ b/x/epoching/testepoching/helper.go @@ -8,7 +8,6 @@ import ( "cosmossdk.io/math" appparams "github.com/babylonchain/babylon/app/params" - "github.com/stretchr/testify/require" "github.com/babylonchain/babylon/app" @@ -26,6 +25,11 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) +type ValidatorInfo struct { + BlsKey bls12381.PrivateKey + Address sdk.ValAddress +} + // Helper is a structure which wraps the entire app and exposes functionalities for testing the epoching module type Helper struct { t *testing.T @@ -37,7 +41,8 @@ type Helper struct { QueryClient types.QueryClient StakingKeeper *stakingkeeper.Keeper - GenAccs []authtypes.GenesisAccount + GenAccs []authtypes.GenesisAccount + ValBlsPrivKeys []ValidatorInfo } // NewHelper creates the helper for testing the epoching module @@ -51,7 +56,8 @@ func NewHelper(t *testing.T) *Helper { valSet := epochingKeeper.GetValidatorSet(ctx, 0) require.Len(t, valSet, 1) genesisVal := valSet[0] - genesisBLSPubkey := bls12381.GenPrivKey().PubKey() + blsPrivKey := bls12381.GenPrivKey() + genesisBLSPubkey := blsPrivKey.PubKey() err := app.CheckpointingKeeper.CreateRegistration(ctx, genesisBLSPubkey, genesisVal.Addr) require.NoError(t, err) @@ -61,7 +67,20 @@ func NewHelper(t *testing.T) *Helper { queryClient := types.NewQueryClient(queryHelper) msgSrvr := keeper.NewMsgServerImpl(epochingKeeper) - return &Helper{t, ctx, app, &epochingKeeper, msgSrvr, queryClient, &app.StakingKeeper, nil} + return &Helper{ + t, + ctx, + app, + &epochingKeeper, + msgSrvr, + queryClient, + &app.StakingKeeper, + nil, + []ValidatorInfo{ValidatorInfo{ + blsPrivKey, + genesisVal.Addr, + }}, + } } // NewHelperWithValSet is same as NewHelper, except that it creates a set of validators @@ -86,22 +105,23 @@ func NewHelperWithValSet(t *testing.T) *Helper { // get necessary subsets of the app/keeper epochingKeeper := app.EpochingKeeper - + valInfos := []ValidatorInfo{} // add BLS pubkey to the genesis validator valSet := epochingKeeper.GetValidatorSet(ctx, 0) for _, val := range valSet { - blsPubkey := bls12381.GenPrivKey().PubKey() + blsPrivKey := bls12381.GenPrivKey() + valInfos = append(valInfos, ValidatorInfo{blsPrivKey, val.Addr}) + blsPubkey := blsPrivKey.PubKey() err = app.CheckpointingKeeper.CreateRegistration(ctx, blsPubkey, val.Addr) require.NoError(t, err) } - querier := keeper.Querier{Keeper: epochingKeeper} queryHelper := baseapp.NewQueryServerTestHelper(ctx, app.InterfaceRegistry()) types.RegisterQueryServer(queryHelper, querier) queryClient := types.NewQueryClient(queryHelper) msgSrvr := keeper.NewMsgServerImpl(epochingKeeper) - return &Helper{t, ctx, app, &epochingKeeper, msgSrvr, queryClient, &app.StakingKeeper, GenAccs} + return &Helper{t, ctx, app, &epochingKeeper, msgSrvr, queryClient, &app.StakingKeeper, GenAccs, valInfos} } // GenAndApplyEmptyBlock generates a new empty block and appends it to the current blockchain From 039d4ab14e4c829b26602eb6647de1619c3a6cb4 Mon Sep 17 00:00:00 2001 From: Cirrus Gai Date: Mon, 23 Jan 2023 10:56:28 +0800 Subject: [PATCH 25/37] feat: Monitor/Add new KV and query for checkpoint reported btc height (#286) --- proto/babylon/monitor/query.proto | 27 +- test/e2e/configurer/chain/queries.go | 17 +- test/e2e/e2e_test.go | 4 +- .../keeper/grpc_query_checkpoint.go | 2 +- x/checkpointing/keeper/hooks.go | 15 + x/checkpointing/keeper/keeper.go | 34 +- x/checkpointing/types/expected_keepers.go | 8 +- x/checkpointing/types/hooks.go | 14 + x/checkpointing/types/types.go | 5 - x/checkpointing/types/utils.go | 18 + x/epoching/keeper/hooks.go | 11 +- x/monitor/keeper/grpc_query.go | 20 +- x/monitor/keeper/hooks.go | 21 + x/monitor/keeper/keeper.go | 65 +- x/monitor/types/errors.go | 3 +- x/monitor/types/keys.go | 13 +- x/monitor/types/query.pb.go | 556 +++++++++++++++--- x/monitor/types/query.pb.gw.go | 129 +++- x/zoneconcierge/keeper/hooks.go | 11 +- 19 files changed, 824 insertions(+), 149 deletions(-) diff --git a/proto/babylon/monitor/query.proto b/proto/babylon/monitor/query.proto index 43e322cec..ae2128f31 100644 --- a/proto/babylon/monitor/query.proto +++ b/proto/babylon/monitor/query.proto @@ -15,9 +15,14 @@ service Query { option (google.api.http).get = "/babylon/monitor/v1/params"; } - // FinishedEpochBtcHeight btc light client height at provided epoch finish - rpc FinishedEpochBtcHeight(QueryFinishedEpochBtcHeightRequest) returns (QueryFinishedEpochBtcHeightResponse) { - option (google.api.http).get = "/babylon/monitor/v1/{epoch_num}"; + // EndedEpochBtcHeight returns the BTC light client height at provided epoch finish + rpc EndedEpochBtcHeight(QueryEndedEpochBtcHeightRequest) returns (QueryEndedEpochBtcHeightResponse) { + option (google.api.http).get = "/babylon/monitor/v1/epochs/{epoch_num}"; + } + + // ReportedCheckpointBtcHeight returns the BTC light client height at which the checkpoint with the given hash is reported back to Babylon + rpc ReportedCheckpointBtcHeight(QueryReportedCheckpointBtcHeightRequest) returns (QueryReportedCheckpointBtcHeightResponse) { + option (google.api.http).get = "/babylon/monitor/v1/checkpoints/{ckpt_hash}"; } } @@ -30,11 +35,21 @@ message QueryParamsResponse { Params params = 1 [ (gogoproto.nullable) = false ]; } -message QueryFinishedEpochBtcHeightRequest { +message QueryEndedEpochBtcHeightRequest { uint64 epoch_num = 1; } -message QueryFinishedEpochBtcHeightResponse { - // height of btc ligh client when epoch ended +message QueryEndedEpochBtcHeightResponse { + // height of btc light client when epoch ended + uint64 btc_light_client_height = 1; +} + +message QueryReportedCheckpointBtcHeightRequest { + // ckpt_hash is hex encoded byte string of the hash of the checkpoint + string ckpt_hash = 1; +} + +message QueryReportedCheckpointBtcHeightResponse { + // height of btc light client when checkpoint is reported uint64 btc_light_client_height = 1; } diff --git a/test/e2e/configurer/chain/queries.go b/test/e2e/configurer/chain/queries.go index 2b90047d2..3144ca161 100644 --- a/test/e2e/configurer/chain/queries.go +++ b/test/e2e/configurer/chain/queries.go @@ -230,11 +230,22 @@ func (n *NodeConfig) QueryCurrentEpoch() (uint64, error) { return epochResponse.CurrentEpoch, nil } -func (n *NodeConfig) QueryLightClientHeighEpochEnd(epoch uint64) (uint64, error) { - monitorPath := fmt.Sprintf("/babylon/monitor/v1/%d", epoch) +func (n *NodeConfig) QueryLightClientHeightEpochEnd(epoch uint64) (uint64, error) { + monitorPath := fmt.Sprintf("/babylon/monitor/v1/epochs/%d", epoch) bz, err := n.QueryGRPCGateway(monitorPath) require.NoError(n.t, err) - var mResponse mtypes.QueryFinishedEpochBtcHeightResponse + var mResponse mtypes.QueryEndedEpochBtcHeightResponse + if err := util.Cdc.UnmarshalJSON(bz, &mResponse); err != nil { + return 0, err + } + return mResponse.BtcLightClientHeight, nil +} + +func (n *NodeConfig) QueryLightClientHeightCheckpointReported(ckptHash []byte) (uint64, error) { + monitorPath := fmt.Sprintf("/babylon/monitor/v1/checkpoints/%x", ckptHash) + bz, err := n.QueryGRPCGateway(monitorPath) + require.NoError(n.t, err) + var mResponse mtypes.QueryReportedCheckpointBtcHeightResponse if err := util.Cdc.UnmarshalJSON(bz, &mResponse); err != nil { return 0, err } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 3bb5720d5..182430580 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -49,10 +49,10 @@ func (s *IntegrationTestSuite) TestIbcCheckpointing() { currEpoch, err := nonValidatorNode.QueryCurrentEpoch() s.NoError(err) - heightAtFinishedEpoch, err := nonValidatorNode.QueryLightClientHeighEpochEnd(currEpoch - 1) + heightAtEndedEpoch, err := nonValidatorNode.QueryLightClientHeightEpochEnd(currEpoch - 1) s.NoError(err) - if heightAtFinishedEpoch == 0 { + if heightAtEndedEpoch == 0 { // we can only assert, that btc lc height is larger than 0. s.FailNow(fmt.Sprintf("Light client height should be > 0 on epoch %d", currEpoch-1)) } diff --git a/x/checkpointing/keeper/grpc_query_checkpoint.go b/x/checkpointing/keeper/grpc_query_checkpoint.go index 34350ce83..120a03f6d 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint.go @@ -141,7 +141,7 @@ func (k Keeper) GetLastCheckpointedEpoch(ctx sdk.Context) (uint64, error) { if curEpoch <= 0 { return 0, fmt.Errorf("current epoch should be more than 0") } - // minus 1 is because the current epoch is not finished + // minus 1 is because the current epoch is not ended tipEpoch := curEpoch - 1 _, err := k.GetRawCheckpoint(ctx, tipEpoch) if err != nil { diff --git a/x/checkpointing/keeper/hooks.go b/x/checkpointing/keeper/hooks.go index fca60aba1..fa29134d2 100644 --- a/x/checkpointing/keeper/hooks.go +++ b/x/checkpointing/keeper/hooks.go @@ -24,6 +24,13 @@ func (k Keeper) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error return nil } +func (k Keeper) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *types.RawCheckpoint) error { + if k.hooks != nil { + return k.hooks.AfterRawCheckpointForgotten(ctx, ckpt) + } + return nil +} + // AfterRawCheckpointFinalized - call hook if the checkpoint is finalized func (k Keeper) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error { if k.hooks != nil { @@ -31,3 +38,11 @@ func (k Keeper) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error } return nil } + +// AfterRawCheckpointBlsSigVerified - call hook if the checkpoint's BLS sig is verified +func (k Keeper) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *types.RawCheckpoint) error { + if k.hooks != nil { + return k.hooks.AfterRawCheckpointBlsSigVerified(ctx, ckpt) + } + return nil +} diff --git a/x/checkpointing/keeper/keeper.go b/x/checkpointing/keeper/keeper.go index b5e43823a..63246c5e7 100644 --- a/x/checkpointing/keeper/keeper.go +++ b/x/checkpointing/keeper/keeper.go @@ -127,12 +127,12 @@ func (k Keeper) addBlsSig(ctx sdk.Context, sig *types.BlsSig) error { &types.EventCheckpointSealed{Checkpoint: ckptWithMeta}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint sealed event for epoch %v", ckptWithMeta.Ckpt.EpochNum) + k.Logger(ctx).Error("failed to emit checkpoint sealed event for epoch %v", ckptWithMeta.Ckpt.EpochNum) } // record state update of Sealed ckptWithMeta.RecordStateUpdate(ctx, types.Sealed) // log in console - ctx.Logger().Info(fmt.Sprintf("Checkpointing: checkpoint for epoch %v is Sealed", ckptWithMeta.Ckpt.EpochNum)) + k.Logger(ctx).Info(fmt.Sprintf("Checkpointing: checkpoint for epoch %v is Sealed", ckptWithMeta.Ckpt.EpochNum)) } // if reaching this line, it means ckptWithMeta is updated, @@ -168,7 +168,7 @@ func (k Keeper) BuildRawCheckpoint(ctx sdk.Context, epochNum uint64, lch types.L if err != nil { return nil, err } - ctx.Logger().Info(fmt.Sprintf("Checkpointing: a new raw checkpoint is built for epoch %v", epochNum)) + k.Logger(ctx).Info(fmt.Sprintf("Checkpointing: a new raw checkpoint is built for epoch %v", epochNum)) return ckptWithMeta, nil } @@ -196,7 +196,7 @@ func (k Keeper) VerifyCheckpoint(ctx sdk.Context, checkpoint txformat.RawBtcChec func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcCheckpoint) (*types.RawCheckpointWithMeta, error) { ckpt, err := types.FromBTCCkptToRawCkpt(rawCheckpoint) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to decode raw checkpoint from BTC raw checkpoint: %w", err) } // sanity check err = ckpt.ValidateBasic() @@ -205,7 +205,7 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcC } ckptWithMeta, err := k.GetRawCheckpoint(ctx, ckpt.EpochNum) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to fetch the raw checkpoint at epoch %d from database: %w", ckpt.EpochNum, err) } // can skip the checks if it is identical with the local checkpoint that is not accumulating @@ -218,7 +218,7 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcC totalPower := k.GetTotalVotingPower(ctx, ckpt.EpochNum) signerSet, err := k.GetValidatorSet(ctx, ckpt.EpochNum).FindSubset(ckpt.Bitmap) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get the signer set via bitmap of epoch %d: %w", ckpt.EpochNum, err) } var sum int64 signersPubKeys := make([]bls12381.PublicKey, len(signerSet)) @@ -241,6 +241,12 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcC return nil, types.ErrInvalidRawCheckpoint.Wrap("invalid BLS multi-sig") } + // record verified checkpoint + err = k.AfterRawCheckpointBlsSigVerified(ctx, ckpt) + if err != nil { + return nil, err + } + // now the checkpoint's multi-sig is valid, if the lastcommithash is the // same with that of the local checkpoint, it means it is valid except that // it is signed by a different signer set @@ -249,7 +255,7 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcC } // multi-sig is valid but the quorum is on a different branch, meaning conflicting is observed - ctx.Logger().Error(types.ErrConflictingCheckpoint.Wrapf("epoch %v", ckpt.EpochNum).Error()) + k.Logger(ctx).Error(types.ErrConflictingCheckpoint.Wrapf("epoch %v", ckpt.EpochNum).Error()) // report conflicting checkpoint event err = ctx.EventManager().EmitTypedEvent( &types.EventConflictingCheckpoint{ @@ -272,7 +278,7 @@ func (k Keeper) SetCheckpointSubmitted(ctx sdk.Context, epoch uint64) { &types.EventCheckpointSubmitted{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint submitted event for epoch %v", ckpt.Ckpt.EpochNum) + k.Logger(ctx).Error("failed to emit checkpoint submitted event for epoch %v", ckpt.Ckpt.EpochNum) } } @@ -284,11 +290,11 @@ func (k Keeper) SetCheckpointConfirmed(ctx sdk.Context, epoch uint64) { &types.EventCheckpointConfirmed{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint confirmed event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to emit checkpoint confirmed event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } // invoke hook if err := k.AfterRawCheckpointConfirmed(ctx, epoch); err != nil { - ctx.Logger().Error("failed to trigger checkpoint confirmed hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to trigger checkpoint confirmed hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } } @@ -300,11 +306,11 @@ func (k Keeper) SetCheckpointFinalized(ctx sdk.Context, epoch uint64) { &types.EventCheckpointFinalized{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint finalized event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to emit checkpoint finalized event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } // invoke hook, which is currently subscribed by ZoneConcierge if err := k.AfterRawCheckpointFinalized(ctx, epoch); err != nil { - ctx.Logger().Error("failed to trigger checkpoint finalized hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to trigger checkpoint finalized hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } } @@ -316,7 +322,7 @@ func (k Keeper) SetCheckpointForgotten(ctx sdk.Context, epoch uint64) { &types.EventCheckpointForgotten{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint forgotten event for epoch %v", ckpt.Ckpt.EpochNum) + k.Logger(ctx).Error("failed to emit checkpoint forgotten event for epoch %v", ckpt.Ckpt.EpochNum) } } @@ -342,7 +348,7 @@ func (k Keeper) setCheckpointStatus(ctx sdk.Context, epoch uint64, from types.Ch panic("failed to update checkpoint status") } statusChangeMsg := fmt.Sprintf("Checkpointing: checkpoint status for epoch %v successfully changed from %v to %v", epoch, from.String(), to.String()) - ctx.Logger().Info(statusChangeMsg) + k.Logger(ctx).Info(statusChangeMsg) return ckptWithMeta } diff --git a/x/checkpointing/types/expected_keepers.go b/x/checkpointing/types/expected_keepers.go index bc2c0417d..2d3645970 100644 --- a/x/checkpointing/types/expected_keepers.go +++ b/x/checkpointing/types/expected_keepers.go @@ -34,7 +34,9 @@ type EpochingKeeper interface { // CheckpointingHooks event hooks for raw checkpoint object (noalias) type CheckpointingHooks interface { - AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error // Must be called when a BLS key is registered - AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is CONFIRMED - AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is FINALIZED + AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error // Must be called when a BLS key is registered + AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is CONFIRMED + AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *RawCheckpoint) error // Must be called when a raw checkpoint is FORGOTTEN + AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is FINALIZED + AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *RawCheckpoint) error // Must be called when a raw checkpoint's multi-sig is verified } diff --git a/x/checkpointing/types/hooks.go b/x/checkpointing/types/hooks.go index 8e0519e63..d003f59eb 100644 --- a/x/checkpointing/types/hooks.go +++ b/x/checkpointing/types/hooks.go @@ -31,6 +31,13 @@ func (h MultiCheckpointingHooks) AfterRawCheckpointConfirmed(ctx sdk.Context, ep return nil } +func (h MultiCheckpointingHooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *RawCheckpoint) error { + for i := range h { + return h[i].AfterRawCheckpointForgotten(ctx, ckpt) + } + return nil +} + func (h MultiCheckpointingHooks) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error { for i := range h { if err := h[i].AfterRawCheckpointFinalized(ctx, epoch); err != nil { @@ -39,3 +46,10 @@ func (h MultiCheckpointingHooks) AfterRawCheckpointFinalized(ctx sdk.Context, ep } return nil } + +func (h MultiCheckpointingHooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *RawCheckpoint) error { + for i := range h { + return h[i].AfterRawCheckpointBlsSigVerified(ctx, ckpt) + } + return nil +} diff --git a/x/checkpointing/types/types.go b/x/checkpointing/types/types.go index 25bc56288..3f2a26b4a 100644 --- a/x/checkpointing/types/types.go +++ b/x/checkpointing/types/types.go @@ -1,7 +1,6 @@ package types import ( - "bytes" "crypto/sha256" "encoding/hex" "errors" @@ -217,7 +216,3 @@ func BytesToCkptWithMeta(cdc codec.BinaryCodec, bz []byte) (*RawCheckpointWithMe err := cdc.Unmarshal(bz, ckptWithMeta) return ckptWithMeta, err } - -func (m RawCkptHash) Equals(h RawCkptHash) bool { - return bytes.Equal(m.Bytes(), h.Bytes()) -} diff --git a/x/checkpointing/types/utils.go b/x/checkpointing/types/utils.go index 1dc48ecdb..584037089 100644 --- a/x/checkpointing/types/utils.go +++ b/x/checkpointing/types/utils.go @@ -1,6 +1,8 @@ package types import ( + "bytes" + "encoding/hex" "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/crypto/bls12381" sdk "github.com/cosmos/cosmos-sdk/types" @@ -27,6 +29,10 @@ func (m RawCheckpoint) Hash() RawCkptHash { return hash(fields) } +func (m RawCheckpoint) HashStr() string { + return m.Hash().String() +} + // SignedMsg is the message corresponding to the BLS sig in this raw checkpoint // Its value should be (epoch_number || last_commit_hash) func (m RawCheckpoint) SignedMsg() []byte { @@ -49,6 +55,18 @@ func (m RawCkptHash) Bytes() []byte { return m } +func (m RawCkptHash) Equals(h RawCkptHash) bool { + return bytes.Equal(m.Bytes(), h.Bytes()) +} + +func (m RawCkptHash) String() string { + return hex.EncodeToString(m) +} + +func FromStringToCkptHash(s string) (RawCkptHash, error) { + return hex.DecodeString(s) +} + func FromBTCCkptBytesToRawCkpt(btcCkptBytes []byte) (*RawCheckpoint, error) { btcCkpt, err := btctxformatter.DecodeRawCheckpoint(btctxformatter.CurrentVersion, btcCkptBytes) if err != nil { diff --git a/x/epoching/keeper/hooks.go b/x/epoching/keeper/hooks.go index 803012b31..7117d4a13 100644 --- a/x/epoching/keeper/hooks.go +++ b/x/epoching/keeper/hooks.go @@ -124,4 +124,13 @@ func (h Hooks) AfterDelegationModified(ctx sdk.Context, delAddr sdk.AccAddress, return nil } func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { return nil } -func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } + +func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } + +func (h Hooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} + +func (h Hooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} diff --git a/x/monitor/keeper/grpc_query.go b/x/monitor/keeper/grpc_query.go index 5f564cfb1..6a38d7a90 100644 --- a/x/monitor/keeper/grpc_query.go +++ b/x/monitor/keeper/grpc_query.go @@ -11,7 +11,7 @@ import ( var _ types.QueryServer = Keeper{} -func (k Keeper) FinishedEpochBtcHeight(c context.Context, req *types.QueryFinishedEpochBtcHeightRequest) (*types.QueryFinishedEpochBtcHeightResponse, error) { +func (k Keeper) EndedEpochBtcHeight(c context.Context, req *types.QueryEndedEpochBtcHeightRequest) (*types.QueryEndedEpochBtcHeightResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") } @@ -24,5 +24,21 @@ func (k Keeper) FinishedEpochBtcHeight(c context.Context, req *types.QueryFinish return nil, err } - return &types.QueryFinishedEpochBtcHeightResponse{BtcLightClientHeight: btcHeight}, nil + return &types.QueryEndedEpochBtcHeightResponse{BtcLightClientHeight: btcHeight}, nil +} + +func (k Keeper) ReportedCheckpointBtcHeight(c context.Context, req *types.QueryReportedCheckpointBtcHeightRequest) (*types.QueryReportedCheckpointBtcHeightResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + btcHeight, err := k.LightclientHeightAtCheckpointReported(ctx, req.CkptHash) + + if err != nil { + return nil, err + } + + return &types.QueryReportedCheckpointBtcHeightResponse{BtcLightClientHeight: btcHeight}, nil } diff --git a/x/monitor/keeper/hooks.go b/x/monitor/keeper/hooks.go index c7f91bf7f..91bd8437d 100644 --- a/x/monitor/keeper/hooks.go +++ b/x/monitor/keeper/hooks.go @@ -1,6 +1,7 @@ package keeper import ( + checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" etypes "github.com/babylonchain/babylon/x/epoching/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -8,6 +9,7 @@ import ( // Helper interface to be sure Hooks implement both epoching and light client hooks type HandledHooks interface { etypes.EpochingHooks + checkpointingtypes.CheckpointingHooks } type Hooks struct { @@ -25,3 +27,22 @@ func (h Hooks) AfterEpochEnds(ctx sdk.Context, epoch uint64) { } func (h Hooks) BeforeSlashThreshold(ctx sdk.Context, valSet etypes.ValidatorSet) {} + +func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { + return nil +} +func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { + return nil +} + +func (h Hooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return h.k.removeCheckpointRecord(ctx, ckpt) +} + +func (h Hooks) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error { + return nil +} + +func (h Hooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return h.k.updateBtcLightClientHeightForCheckpoint(ctx, ckpt) +} diff --git a/x/monitor/keeper/keeper.go b/x/monitor/keeper/keeper.go index e711f7fb6..156ebf28f 100644 --- a/x/monitor/keeper/keeper.go +++ b/x/monitor/keeper/keeper.go @@ -2,6 +2,7 @@ package keeper import ( "fmt" + ckpttypes "github.com/babylonchain/babylon/x/checkpointing/types" "github.com/babylonchain/babylon/x/monitor/types" "github.com/cosmos/cosmos-sdk/codec" @@ -60,15 +61,51 @@ func (k Keeper) updateBtcLightClientHeightForEpoch(ctx sdk.Context, epoch uint64 store.Set(types.GetEpochEndLightClientHeightKey(epoch), sdk.Uint64ToBigEndian(currentTipHeight)) } +func (k Keeper) updateBtcLightClientHeightForCheckpoint(ctx sdk.Context, ckpt *ckpttypes.RawCheckpoint) error { + store := ctx.KVStore(k.storeKey) + ckptHashStr := ckpt.HashStr() + + // if the checkpoint exists, meaning an earlier checkpoint with a lower btc height is already recorded + // we should keep the lower btc height in the store + if store.Has([]byte(ckptHashStr)) { + k.Logger(ctx).With("module", fmt.Sprintf("checkpoint %s is already recorded", ckptHashStr)) + return nil + } + + storeKey, err := types.GetCheckpointReportedLightClientHeightKey(ckptHashStr) + if err != nil { + return err + } + + currentTipHeight := k.btcLightClientKeeper.GetTipInfo(ctx).Height + store.Set(storeKey, sdk.Uint64ToBigEndian(currentTipHeight)) + + return nil +} + +func (k Keeper) removeCheckpointRecord(ctx sdk.Context, ckpt *ckpttypes.RawCheckpoint) error { + store := ctx.KVStore(k.storeKey) + ckptHashStr := ckpt.HashStr() + + storeKey, err := types.GetCheckpointReportedLightClientHeightKey(ckptHashStr) + if err != nil { + return err + } + + store.Delete(storeKey) + + return nil +} + func (k Keeper) LightclientHeightAtEpochEnd(ctx sdk.Context, epoch uint64) (uint64, error) { store := ctx.KVStore(k.storeKey) btcHeightBytes := store.Get(types.GetEpochEndLightClientHeightKey(epoch)) - - if len(btcHeightBytes) == 0 { + // nil would be returned if key does not exist + if btcHeightBytes == nil { // we do not have any key under given epoch, most probably epoch did not finish // yet - return 0, types.ErrEpochNotFinishedYet + return 0, types.ErrEpochNotEnded.Wrapf("epoch %d", epoch) } btcHeight, err := bytesToUint64(btcHeightBytes) @@ -79,3 +116,25 @@ func (k Keeper) LightclientHeightAtEpochEnd(ctx sdk.Context, epoch uint64) (uint return btcHeight, nil } + +func (k Keeper) LightclientHeightAtCheckpointReported(ctx sdk.Context, hashString string) (uint64, error) { + store := ctx.KVStore(k.storeKey) + + storeKey, err := types.GetCheckpointReportedLightClientHeightKey(hashString) + if err != nil { + return 0, err + } + + btcHeightBytes := store.Get(storeKey) + // nil would be returned if key does not exist + if btcHeightBytes == nil { + return 0, types.ErrCheckpointNotReported.Wrapf("checkpoint hash: %s", hashString) + } + + btcHeight, err := bytesToUint64(btcHeightBytes) + if err != nil { + panic("invalid data in database") + } + + return btcHeight, nil +} diff --git a/x/monitor/types/errors.go b/x/monitor/types/errors.go index 56c20fa0b..565387fe6 100644 --- a/x/monitor/types/errors.go +++ b/x/monitor/types/errors.go @@ -8,5 +8,6 @@ import ( // x/monitor module sentinel errors var ( - ErrEpochNotFinishedYet = sdkerrors.Register(ModuleName, 1100, "Epoch not finished yet") + ErrEpochNotEnded = sdkerrors.Register(ModuleName, 1100, "Epoch not ended yet") + ErrCheckpointNotReported = sdkerrors.Register(ModuleName, 1101, "Checkpoint not reported yet") ) diff --git a/x/monitor/types/keys.go b/x/monitor/types/keys.go index 6e8f79c6a..1f65b3c03 100644 --- a/x/monitor/types/keys.go +++ b/x/monitor/types/keys.go @@ -1,6 +1,8 @@ package types import ( + "fmt" + "github.com/babylonchain/babylon/x/checkpointing/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -22,7 +24,8 @@ const ( ) var ( - EpochEndLightClientHeightPrefix = []byte{1} + EpochEndLightClientHeightPrefix = []byte{1} + CheckpointReportedLightClientHeightPrefix = []byte{2} ) func KeyPrefix(p string) []byte { @@ -32,3 +35,11 @@ func KeyPrefix(p string) []byte { func GetEpochEndLightClientHeightKey(e uint64) []byte { return append(EpochEndLightClientHeightPrefix, sdk.Uint64ToBigEndian(e)...) } + +func GetCheckpointReportedLightClientHeightKey(hashString string) ([]byte, error) { + hashBytes, err := types.FromStringToCkptHash(hashString) + if err != nil { + return nil, fmt.Errorf("invalid hash string %s: %w", hashString, err) + } + return append(CheckpointReportedLightClientHeightPrefix, hashBytes...), nil +} diff --git a/x/monitor/types/query.pb.go b/x/monitor/types/query.pb.go index 2fd362550..8be5efc78 100644 --- a/x/monitor/types/query.pb.go +++ b/x/monitor/types/query.pb.go @@ -113,22 +113,22 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } -type QueryFinishedEpochBtcHeightRequest struct { +type QueryEndedEpochBtcHeightRequest struct { EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` } -func (m *QueryFinishedEpochBtcHeightRequest) Reset() { *m = QueryFinishedEpochBtcHeightRequest{} } -func (m *QueryFinishedEpochBtcHeightRequest) String() string { return proto.CompactTextString(m) } -func (*QueryFinishedEpochBtcHeightRequest) ProtoMessage() {} -func (*QueryFinishedEpochBtcHeightRequest) Descriptor() ([]byte, []int) { +func (m *QueryEndedEpochBtcHeightRequest) Reset() { *m = QueryEndedEpochBtcHeightRequest{} } +func (m *QueryEndedEpochBtcHeightRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEndedEpochBtcHeightRequest) ProtoMessage() {} +func (*QueryEndedEpochBtcHeightRequest) Descriptor() ([]byte, []int) { return fileDescriptor_3b70877a7534d1c4, []int{2} } -func (m *QueryFinishedEpochBtcHeightRequest) XXX_Unmarshal(b []byte) error { +func (m *QueryEndedEpochBtcHeightRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryFinishedEpochBtcHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryEndedEpochBtcHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryFinishedEpochBtcHeightRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryEndedEpochBtcHeightRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -138,42 +138,42 @@ func (m *QueryFinishedEpochBtcHeightRequest) XXX_Marshal(b []byte, deterministic return b[:n], nil } } -func (m *QueryFinishedEpochBtcHeightRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinishedEpochBtcHeightRequest.Merge(m, src) +func (m *QueryEndedEpochBtcHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEndedEpochBtcHeightRequest.Merge(m, src) } -func (m *QueryFinishedEpochBtcHeightRequest) XXX_Size() int { +func (m *QueryEndedEpochBtcHeightRequest) XXX_Size() int { return m.Size() } -func (m *QueryFinishedEpochBtcHeightRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinishedEpochBtcHeightRequest.DiscardUnknown(m) +func (m *QueryEndedEpochBtcHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEndedEpochBtcHeightRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryFinishedEpochBtcHeightRequest proto.InternalMessageInfo +var xxx_messageInfo_QueryEndedEpochBtcHeightRequest proto.InternalMessageInfo -func (m *QueryFinishedEpochBtcHeightRequest) GetEpochNum() uint64 { +func (m *QueryEndedEpochBtcHeightRequest) GetEpochNum() uint64 { if m != nil { return m.EpochNum } return 0 } -type QueryFinishedEpochBtcHeightResponse struct { - // height of btc ligh client when epoch ended +type QueryEndedEpochBtcHeightResponse struct { + // height of btc light client when epoch ended BtcLightClientHeight uint64 `protobuf:"varint,1,opt,name=btc_light_client_height,json=btcLightClientHeight,proto3" json:"btc_light_client_height,omitempty"` } -func (m *QueryFinishedEpochBtcHeightResponse) Reset() { *m = QueryFinishedEpochBtcHeightResponse{} } -func (m *QueryFinishedEpochBtcHeightResponse) String() string { return proto.CompactTextString(m) } -func (*QueryFinishedEpochBtcHeightResponse) ProtoMessage() {} -func (*QueryFinishedEpochBtcHeightResponse) Descriptor() ([]byte, []int) { +func (m *QueryEndedEpochBtcHeightResponse) Reset() { *m = QueryEndedEpochBtcHeightResponse{} } +func (m *QueryEndedEpochBtcHeightResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEndedEpochBtcHeightResponse) ProtoMessage() {} +func (*QueryEndedEpochBtcHeightResponse) Descriptor() ([]byte, []int) { return fileDescriptor_3b70877a7534d1c4, []int{3} } -func (m *QueryFinishedEpochBtcHeightResponse) XXX_Unmarshal(b []byte) error { +func (m *QueryEndedEpochBtcHeightResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryFinishedEpochBtcHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryEndedEpochBtcHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryFinishedEpochBtcHeightResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryEndedEpochBtcHeightResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -183,19 +183,113 @@ func (m *QueryFinishedEpochBtcHeightResponse) XXX_Marshal(b []byte, deterministi return b[:n], nil } } -func (m *QueryFinishedEpochBtcHeightResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinishedEpochBtcHeightResponse.Merge(m, src) +func (m *QueryEndedEpochBtcHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEndedEpochBtcHeightResponse.Merge(m, src) } -func (m *QueryFinishedEpochBtcHeightResponse) XXX_Size() int { +func (m *QueryEndedEpochBtcHeightResponse) XXX_Size() int { return m.Size() } -func (m *QueryFinishedEpochBtcHeightResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinishedEpochBtcHeightResponse.DiscardUnknown(m) +func (m *QueryEndedEpochBtcHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEndedEpochBtcHeightResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryFinishedEpochBtcHeightResponse proto.InternalMessageInfo +var xxx_messageInfo_QueryEndedEpochBtcHeightResponse proto.InternalMessageInfo -func (m *QueryFinishedEpochBtcHeightResponse) GetBtcLightClientHeight() uint64 { +func (m *QueryEndedEpochBtcHeightResponse) GetBtcLightClientHeight() uint64 { + if m != nil { + return m.BtcLightClientHeight + } + return 0 +} + +type QueryReportedCheckpointBtcHeightRequest struct { + // ckpt_hash is hex encoded byte string of the hash of the checkpoint + CkptHash string `protobuf:"bytes,1,opt,name=ckpt_hash,json=ckptHash,proto3" json:"ckpt_hash,omitempty"` +} + +func (m *QueryReportedCheckpointBtcHeightRequest) Reset() { + *m = QueryReportedCheckpointBtcHeightRequest{} +} +func (m *QueryReportedCheckpointBtcHeightRequest) String() string { return proto.CompactTextString(m) } +func (*QueryReportedCheckpointBtcHeightRequest) ProtoMessage() {} +func (*QueryReportedCheckpointBtcHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{4} +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest.Merge(m, src) +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest proto.InternalMessageInfo + +func (m *QueryReportedCheckpointBtcHeightRequest) GetCkptHash() string { + if m != nil { + return m.CkptHash + } + return "" +} + +type QueryReportedCheckpointBtcHeightResponse struct { + // height of btc light client when checkpoint is reported + BtcLightClientHeight uint64 `protobuf:"varint,1,opt,name=btc_light_client_height,json=btcLightClientHeight,proto3" json:"btc_light_client_height,omitempty"` +} + +func (m *QueryReportedCheckpointBtcHeightResponse) Reset() { + *m = QueryReportedCheckpointBtcHeightResponse{} +} +func (m *QueryReportedCheckpointBtcHeightResponse) String() string { return proto.CompactTextString(m) } +func (*QueryReportedCheckpointBtcHeightResponse) ProtoMessage() {} +func (*QueryReportedCheckpointBtcHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{5} +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse.Merge(m, src) +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse proto.InternalMessageInfo + +func (m *QueryReportedCheckpointBtcHeightResponse) GetBtcLightClientHeight() uint64 { if m != nil { return m.BtcLightClientHeight } @@ -205,41 +299,49 @@ func (m *QueryFinishedEpochBtcHeightResponse) GetBtcLightClientHeight() uint64 { func init() { proto.RegisterType((*QueryParamsRequest)(nil), "babylon.monitor.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.monitor.v1.QueryParamsResponse") - proto.RegisterType((*QueryFinishedEpochBtcHeightRequest)(nil), "babylon.monitor.v1.QueryFinishedEpochBtcHeightRequest") - proto.RegisterType((*QueryFinishedEpochBtcHeightResponse)(nil), "babylon.monitor.v1.QueryFinishedEpochBtcHeightResponse") + proto.RegisterType((*QueryEndedEpochBtcHeightRequest)(nil), "babylon.monitor.v1.QueryEndedEpochBtcHeightRequest") + proto.RegisterType((*QueryEndedEpochBtcHeightResponse)(nil), "babylon.monitor.v1.QueryEndedEpochBtcHeightResponse") + proto.RegisterType((*QueryReportedCheckpointBtcHeightRequest)(nil), "babylon.monitor.v1.QueryReportedCheckpointBtcHeightRequest") + proto.RegisterType((*QueryReportedCheckpointBtcHeightResponse)(nil), "babylon.monitor.v1.QueryReportedCheckpointBtcHeightResponse") } func init() { proto.RegisterFile("babylon/monitor/query.proto", fileDescriptor_3b70877a7534d1c4) } var fileDescriptor_3b70877a7534d1c4 = []byte{ - // 426 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xcd, 0xaa, 0xd3, 0x40, - 0x14, 0x4e, 0x2e, 0xd7, 0xa2, 0xe3, 0x6e, 0x2c, 0x2a, 0xb9, 0x97, 0x5c, 0x8d, 0xe0, 0x15, 0x17, - 0x19, 0x72, 0xc5, 0x9f, 0xad, 0x15, 0x45, 0x41, 0xfc, 0xe9, 0x52, 0x84, 0x30, 0x33, 0x0e, 0xc9, - 0x40, 0x32, 0x27, 0xcd, 0x4c, 0x8a, 0x45, 0xba, 0xf1, 0x09, 0x04, 0xdf, 0xc4, 0xad, 0x2f, 0xd0, - 0x65, 0xc1, 0x8d, 0x2b, 0x91, 0xd6, 0x07, 0x91, 0x4c, 0xa6, 0x05, 0xdb, 0x5a, 0x71, 0x97, 0x9c, - 0xef, 0xe7, 0x7c, 0xe7, 0x9c, 0x41, 0x47, 0x8c, 0xb2, 0x49, 0x01, 0x8a, 0x94, 0xa0, 0xa4, 0x81, - 0x9a, 0x8c, 0x1a, 0x51, 0x4f, 0xe2, 0xaa, 0x06, 0x03, 0x18, 0x3b, 0x30, 0x76, 0x60, 0x3c, 0x4e, - 0x82, 0x7e, 0x06, 0x19, 0x58, 0x98, 0xb4, 0x5f, 0x1d, 0x33, 0x38, 0xce, 0x00, 0xb2, 0x42, 0x10, - 0x5a, 0x49, 0x42, 0x95, 0x02, 0x43, 0x8d, 0x04, 0xa5, 0x1d, 0x7a, 0x9b, 0x83, 0x2e, 0x41, 0x13, - 0x46, 0xb5, 0xe8, 0x1a, 0x90, 0x71, 0xc2, 0x84, 0xa1, 0x09, 0xa9, 0x68, 0x26, 0x95, 0x25, 0xaf, - 0x9c, 0x36, 0x03, 0x55, 0xb4, 0xa6, 0xa5, 0x73, 0x8a, 0xfa, 0x08, 0xbf, 0x6e, 0xf5, 0xaf, 0x6c, - 0x71, 0x28, 0x46, 0x8d, 0xd0, 0x26, 0x7a, 0x89, 0x2e, 0xfd, 0x51, 0xd5, 0x15, 0x28, 0x2d, 0xf0, - 0x03, 0xd4, 0xeb, 0xc4, 0x57, 0xfd, 0x6b, 0xfe, 0xad, 0x8b, 0x67, 0x41, 0xbc, 0x3d, 0x4f, 0xdc, - 0x69, 0x06, 0x87, 0xb3, 0x1f, 0x27, 0xde, 0xd0, 0xf1, 0xa3, 0x87, 0x28, 0xb2, 0x86, 0x4f, 0xa4, - 0x92, 0x3a, 0x17, 0xef, 0x1e, 0x57, 0xc0, 0xf3, 0x81, 0xe1, 0x4f, 0x85, 0xcc, 0x72, 0xe3, 0xda, - 0xe2, 0x23, 0x74, 0x41, 0xb4, 0x40, 0xaa, 0x9a, 0xd2, 0xb6, 0x38, 0x1c, 0x9e, 0xb7, 0x85, 0x17, - 0x4d, 0x19, 0xbd, 0x45, 0x37, 0xf6, 0x5a, 0xb8, 0x8c, 0x77, 0xd1, 0x15, 0x66, 0x78, 0x5a, 0xb4, - 0xc5, 0x94, 0x17, 0x52, 0x28, 0x93, 0xe6, 0x96, 0xe2, 0x1c, 0xfb, 0xcc, 0xf0, 0xe7, 0xed, 0xff, - 0x23, 0x0b, 0x76, 0xf2, 0xb3, 0xaf, 0x07, 0xe8, 0x9c, 0xb5, 0xc7, 0x53, 0xd4, 0xeb, 0x46, 0xc0, - 0x37, 0x77, 0x8d, 0xb7, 0xbd, 0xad, 0xe0, 0xf4, 0x9f, 0xbc, 0x2e, 0x5b, 0x14, 0x7d, 0xfc, 0xf6, - 0xeb, 0xf3, 0xc1, 0x31, 0x0e, 0xc8, 0xe6, 0x4d, 0xc6, 0x89, 0x3b, 0x0b, 0xfe, 0xe2, 0xa3, 0xcb, - 0xbb, 0x47, 0xc4, 0xf7, 0xfe, 0xda, 0x67, 0xef, 0x5a, 0x83, 0xfb, 0xff, 0xad, 0x73, 0x79, 0x4f, - 0x6d, 0xde, 0xeb, 0xf8, 0x64, 0x57, 0xde, 0x0f, 0xeb, 0x53, 0x4d, 0x07, 0xcf, 0x66, 0x8b, 0xd0, - 0x9f, 0x2f, 0x42, 0xff, 0xe7, 0x22, 0xf4, 0x3f, 0x2d, 0x43, 0x6f, 0xbe, 0x0c, 0xbd, 0xef, 0xcb, - 0xd0, 0x7b, 0x43, 0x32, 0x69, 0xf2, 0x86, 0xc5, 0x1c, 0xca, 0x95, 0x09, 0xcf, 0xa9, 0x54, 0x6b, - 0xc7, 0xf7, 0x6b, 0x4f, 0x33, 0xa9, 0x84, 0x66, 0x3d, 0xfb, 0x2e, 0xef, 0xfc, 0x0e, 0x00, 0x00, - 0xff, 0xff, 0x3f, 0x2c, 0xa9, 0x89, 0x48, 0x03, 0x00, 0x00, + // 515 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4f, 0x6b, 0xd4, 0x4e, + 0x18, 0xde, 0xfc, 0xd8, 0xdf, 0xd2, 0x8e, 0xb7, 0xe9, 0x82, 0x92, 0x2d, 0x69, 0xc9, 0xa1, 0x5d, + 0x14, 0x33, 0x6c, 0x57, 0x41, 0x50, 0x3c, 0x6c, 0xa9, 0x54, 0x10, 0xff, 0xe4, 0xa6, 0x97, 0x30, + 0x99, 0x1d, 0x92, 0xa1, 0x9b, 0x99, 0x69, 0x66, 0xb2, 0xb8, 0x94, 0xbd, 0x78, 0xf3, 0x26, 0xf8, + 0x45, 0xfc, 0x18, 0xbd, 0x08, 0x05, 0x2f, 0x9e, 0x44, 0x76, 0xfd, 0x20, 0x92, 0xc9, 0x34, 0x60, + 0x9b, 0x75, 0x51, 0x6f, 0xc9, 0xfb, 0xbc, 0xcf, 0xf3, 0x3e, 0x6f, 0xde, 0x87, 0x80, 0x5e, 0x8c, + 0xe3, 0xd9, 0x44, 0x70, 0x94, 0x09, 0xce, 0xb4, 0xc8, 0xd1, 0x69, 0x41, 0xf3, 0x59, 0x20, 0x73, + 0xa1, 0x05, 0x84, 0x16, 0x0c, 0x2c, 0x18, 0x4c, 0x07, 0x6e, 0x37, 0x11, 0x89, 0x30, 0x30, 0x2a, + 0x9f, 0xaa, 0x4e, 0x77, 0x3b, 0x11, 0x22, 0x99, 0x50, 0x84, 0x25, 0x43, 0x98, 0x73, 0xa1, 0xb1, + 0x66, 0x82, 0x2b, 0x8b, 0xde, 0x26, 0x42, 0x65, 0x42, 0xa1, 0x18, 0x2b, 0x5a, 0x0d, 0x40, 0xd3, + 0x41, 0x4c, 0x35, 0x1e, 0x20, 0x89, 0x13, 0xc6, 0x4d, 0xf3, 0xa5, 0xd2, 0x55, 0x43, 0x12, 0xe7, + 0x38, 0xb3, 0x4a, 0x7e, 0x17, 0xc0, 0x57, 0x25, 0xff, 0xa5, 0x29, 0x86, 0xf4, 0xb4, 0xa0, 0x4a, + 0xfb, 0x2f, 0xc0, 0xd6, 0x2f, 0x55, 0x25, 0x05, 0x57, 0x14, 0x3e, 0x00, 0x9d, 0x8a, 0x7c, 0xcb, + 0xd9, 0x75, 0xfa, 0x37, 0x0e, 0xdc, 0xe0, 0xfa, 0x3e, 0x41, 0xc5, 0x19, 0xb5, 0xcf, 0xbf, 0xed, + 0xb4, 0x42, 0xdb, 0xef, 0x3f, 0x06, 0x3b, 0x46, 0xf0, 0x88, 0x8f, 0xe9, 0xf8, 0x48, 0x0a, 0x92, + 0x8e, 0x34, 0x39, 0xa6, 0x2c, 0x49, 0xb5, 0x9d, 0x09, 0x7b, 0x60, 0x93, 0x96, 0x40, 0xc4, 0x8b, + 0xcc, 0xe8, 0xb7, 0xc3, 0x0d, 0x53, 0x78, 0x5e, 0x64, 0xfe, 0x6b, 0xb0, 0xbb, 0x9a, 0x6f, 0xdd, + 0xdd, 0x07, 0x37, 0x63, 0x4d, 0xa2, 0x49, 0x59, 0x8c, 0xc8, 0x84, 0x51, 0xae, 0xa3, 0xd4, 0xb4, + 0x58, 0xb9, 0x6e, 0xac, 0xc9, 0xb3, 0xf2, 0xfd, 0xd0, 0x80, 0x15, 0xdd, 0x7f, 0x02, 0xf6, 0x8d, + 0x74, 0x48, 0xa5, 0xc8, 0x35, 0x1d, 0x1f, 0xa6, 0x94, 0x9c, 0x48, 0xc1, 0xb8, 0x6e, 0xb2, 0x48, + 0x4e, 0xa4, 0x8e, 0x52, 0xac, 0x52, 0xa3, 0xb9, 0x19, 0x6e, 0x94, 0x85, 0x63, 0xac, 0x52, 0x1f, + 0x83, 0xfe, 0x7a, 0x9d, 0x7f, 0xb2, 0x7a, 0xf0, 0xbe, 0x0d, 0xfe, 0x37, 0x33, 0xe0, 0x1c, 0x74, + 0xaa, 0xef, 0x0c, 0xf7, 0x9a, 0x6e, 0x70, 0xfd, 0xa4, 0xee, 0xfe, 0xda, 0xbe, 0xca, 0x9b, 0xef, + 0xbf, 0xfb, 0xf2, 0xe3, 0xe3, 0x7f, 0xdb, 0xd0, 0x45, 0x57, 0x83, 0x33, 0x1d, 0xd8, 0xec, 0xc0, + 0x4f, 0x0e, 0xd8, 0x6a, 0x38, 0x05, 0x1c, 0xae, 0x1c, 0xb2, 0xfa, 0xf0, 0xee, 0xbd, 0x3f, 0x23, + 0x59, 0x9b, 0x81, 0xb1, 0xd9, 0x87, 0x7b, 0x4d, 0x36, 0x4d, 0x6e, 0x14, 0x3a, 0xab, 0x03, 0x35, + 0x87, 0x9f, 0x1d, 0xd0, 0xfb, 0xcd, 0x69, 0xe0, 0xc3, 0x95, 0x2e, 0xd6, 0x07, 0xc3, 0x7d, 0xf4, + 0x77, 0x64, 0xbb, 0xca, 0xd0, 0xac, 0x72, 0x17, 0xde, 0x69, 0x5a, 0x85, 0xd4, 0x44, 0x85, 0xce, + 0xea, 0xf4, 0xcd, 0x47, 0x4f, 0xcf, 0x17, 0x9e, 0x73, 0xb1, 0xf0, 0x9c, 0xef, 0x0b, 0xcf, 0xf9, + 0xb0, 0xf4, 0x5a, 0x17, 0x4b, 0xaf, 0xf5, 0x75, 0xe9, 0xb5, 0xde, 0xa0, 0x84, 0xe9, 0xb4, 0x88, + 0x03, 0x22, 0xb2, 0x4b, 0x41, 0x92, 0x62, 0xc6, 0x6b, 0xf5, 0xb7, 0xb5, 0xbe, 0x9e, 0x49, 0xaa, + 0xe2, 0x8e, 0xf9, 0x15, 0x0c, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x8a, 0xb1, 0xb8, 0xbb, + 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -256,8 +358,10 @@ const _ = grpc.SupportPackageIsVersion4 type QueryClient interface { // Parameters queries the parameters of the module. Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) - // FinishedEpochBtcHeight btc light client height at provided epoch finish - FinishedEpochBtcHeight(ctx context.Context, in *QueryFinishedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryFinishedEpochBtcHeightResponse, error) + // EndedEpochBtcHeight returns the BTC light client height at provided epoch finish + EndedEpochBtcHeight(ctx context.Context, in *QueryEndedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryEndedEpochBtcHeightResponse, error) + // ReportedCheckpointBtcHeight returns the BTC light client height at which the checkpoint with the given hash is reported back to Babylon + ReportedCheckpointBtcHeight(ctx context.Context, in *QueryReportedCheckpointBtcHeightRequest, opts ...grpc.CallOption) (*QueryReportedCheckpointBtcHeightResponse, error) } type queryClient struct { @@ -277,9 +381,18 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts . return out, nil } -func (c *queryClient) FinishedEpochBtcHeight(ctx context.Context, in *QueryFinishedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryFinishedEpochBtcHeightResponse, error) { - out := new(QueryFinishedEpochBtcHeightResponse) - err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/FinishedEpochBtcHeight", in, out, opts...) +func (c *queryClient) EndedEpochBtcHeight(ctx context.Context, in *QueryEndedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryEndedEpochBtcHeightResponse, error) { + out := new(QueryEndedEpochBtcHeightResponse) + err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/EndedEpochBtcHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ReportedCheckpointBtcHeight(ctx context.Context, in *QueryReportedCheckpointBtcHeightRequest, opts ...grpc.CallOption) (*QueryReportedCheckpointBtcHeightResponse, error) { + out := new(QueryReportedCheckpointBtcHeightResponse) + err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/ReportedCheckpointBtcHeight", in, out, opts...) if err != nil { return nil, err } @@ -290,8 +403,10 @@ func (c *queryClient) FinishedEpochBtcHeight(ctx context.Context, in *QueryFinis type QueryServer interface { // Parameters queries the parameters of the module. Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) - // FinishedEpochBtcHeight btc light client height at provided epoch finish - FinishedEpochBtcHeight(context.Context, *QueryFinishedEpochBtcHeightRequest) (*QueryFinishedEpochBtcHeightResponse, error) + // EndedEpochBtcHeight returns the BTC light client height at provided epoch finish + EndedEpochBtcHeight(context.Context, *QueryEndedEpochBtcHeightRequest) (*QueryEndedEpochBtcHeightResponse, error) + // ReportedCheckpointBtcHeight returns the BTC light client height at which the checkpoint with the given hash is reported back to Babylon + ReportedCheckpointBtcHeight(context.Context, *QueryReportedCheckpointBtcHeightRequest) (*QueryReportedCheckpointBtcHeightResponse, error) } // UnimplementedQueryServer can be embedded to have forward compatible implementations. @@ -301,8 +416,11 @@ type UnimplementedQueryServer struct { func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") } -func (*UnimplementedQueryServer) FinishedEpochBtcHeight(ctx context.Context, req *QueryFinishedEpochBtcHeightRequest) (*QueryFinishedEpochBtcHeightResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinishedEpochBtcHeight not implemented") +func (*UnimplementedQueryServer) EndedEpochBtcHeight(ctx context.Context, req *QueryEndedEpochBtcHeightRequest) (*QueryEndedEpochBtcHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EndedEpochBtcHeight not implemented") +} +func (*UnimplementedQueryServer) ReportedCheckpointBtcHeight(ctx context.Context, req *QueryReportedCheckpointBtcHeightRequest) (*QueryReportedCheckpointBtcHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReportedCheckpointBtcHeight not implemented") } func RegisterQueryServer(s grpc1.Server, srv QueryServer) { @@ -327,20 +445,38 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _Query_FinishedEpochBtcHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryFinishedEpochBtcHeightRequest) +func _Query_EndedEpochBtcHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEndedEpochBtcHeightRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).FinishedEpochBtcHeight(ctx, in) + return srv.(QueryServer).EndedEpochBtcHeight(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/babylon.monitor.v1.Query/FinishedEpochBtcHeight", + FullMethod: "/babylon.monitor.v1.Query/EndedEpochBtcHeight", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).FinishedEpochBtcHeight(ctx, req.(*QueryFinishedEpochBtcHeightRequest)) + return srv.(QueryServer).EndedEpochBtcHeight(ctx, req.(*QueryEndedEpochBtcHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ReportedCheckpointBtcHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryReportedCheckpointBtcHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ReportedCheckpointBtcHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.monitor.v1.Query/ReportedCheckpointBtcHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ReportedCheckpointBtcHeight(ctx, req.(*QueryReportedCheckpointBtcHeightRequest)) } return interceptor(ctx, in, info, handler) } @@ -354,8 +490,12 @@ var _Query_serviceDesc = grpc.ServiceDesc{ Handler: _Query_Params_Handler, }, { - MethodName: "FinishedEpochBtcHeight", - Handler: _Query_FinishedEpochBtcHeight_Handler, + MethodName: "EndedEpochBtcHeight", + Handler: _Query_EndedEpochBtcHeight_Handler, + }, + { + MethodName: "ReportedCheckpointBtcHeight", + Handler: _Query_ReportedCheckpointBtcHeight_Handler, }, }, Streams: []grpc.StreamDesc{}, @@ -418,7 +558,7 @@ func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *QueryFinishedEpochBtcHeightRequest) Marshal() (dAtA []byte, err error) { +func (m *QueryEndedEpochBtcHeightRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -428,12 +568,12 @@ func (m *QueryFinishedEpochBtcHeightRequest) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *QueryFinishedEpochBtcHeightRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryEndedEpochBtcHeightRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryFinishedEpochBtcHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryEndedEpochBtcHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -446,7 +586,65 @@ func (m *QueryFinishedEpochBtcHeightRequest) MarshalToSizedBuffer(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *QueryFinishedEpochBtcHeightResponse) Marshal() (dAtA []byte, err error) { +func (m *QueryEndedEpochBtcHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEndedEpochBtcHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEndedEpochBtcHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BtcLightClientHeight != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.BtcLightClientHeight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryReportedCheckpointBtcHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryReportedCheckpointBtcHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryReportedCheckpointBtcHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CkptHash) > 0 { + i -= len(m.CkptHash) + copy(dAtA[i:], m.CkptHash) + i = encodeVarintQuery(dAtA, i, uint64(len(m.CkptHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryReportedCheckpointBtcHeightResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -456,12 +654,12 @@ func (m *QueryFinishedEpochBtcHeightResponse) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *QueryFinishedEpochBtcHeightResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryReportedCheckpointBtcHeightResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryFinishedEpochBtcHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryReportedCheckpointBtcHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -505,7 +703,7 @@ func (m *QueryParamsResponse) Size() (n int) { return n } -func (m *QueryFinishedEpochBtcHeightRequest) Size() (n int) { +func (m *QueryEndedEpochBtcHeightRequest) Size() (n int) { if m == nil { return 0 } @@ -517,7 +715,32 @@ func (m *QueryFinishedEpochBtcHeightRequest) Size() (n int) { return n } -func (m *QueryFinishedEpochBtcHeightResponse) Size() (n int) { +func (m *QueryEndedEpochBtcHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BtcLightClientHeight != 0 { + n += 1 + sovQuery(uint64(m.BtcLightClientHeight)) + } + return n +} + +func (m *QueryReportedCheckpointBtcHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CkptHash) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryReportedCheckpointBtcHeightResponse) Size() (n int) { if m == nil { return 0 } @@ -668,7 +891,7 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryFinishedEpochBtcHeightRequest) Unmarshal(dAtA []byte) error { +func (m *QueryEndedEpochBtcHeightRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -691,10 +914,10 @@ func (m *QueryFinishedEpochBtcHeightRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryFinishedEpochBtcHeightRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryEndedEpochBtcHeightRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryFinishedEpochBtcHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryEndedEpochBtcHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -737,7 +960,158 @@ func (m *QueryFinishedEpochBtcHeightRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryFinishedEpochBtcHeightResponse) Unmarshal(dAtA []byte) error { +func (m *QueryEndedEpochBtcHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEndedEpochBtcHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEndedEpochBtcHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BtcLightClientHeight", wireType) + } + m.BtcLightClientHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BtcLightClientHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryReportedCheckpointBtcHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CkptHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CkptHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryReportedCheckpointBtcHeightResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -760,10 +1134,10 @@ func (m *QueryFinishedEpochBtcHeightResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryFinishedEpochBtcHeightResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryFinishedEpochBtcHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: diff --git a/x/monitor/types/query.pb.gw.go b/x/monitor/types/query.pb.gw.go index de833cc5c..a750169ed 100644 --- a/x/monitor/types/query.pb.gw.go +++ b/x/monitor/types/query.pb.gw.go @@ -51,8 +51,8 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal } -func request_Query_FinishedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinishedEpochBtcHeightRequest +func request_Query_EndedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEndedEpochBtcHeightRequest var metadata runtime.ServerMetadata var ( @@ -73,13 +73,13 @@ func request_Query_FinishedEpochBtcHeight_0(ctx context.Context, marshaler runti return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := client.FinishedEpochBtcHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.EndedEpochBtcHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_FinishedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinishedEpochBtcHeightRequest +func local_request_Query_EndedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEndedEpochBtcHeightRequest var metadata runtime.ServerMetadata var ( @@ -100,7 +100,61 @@ func local_request_Query_FinishedEpochBtcHeight_0(ctx context.Context, marshaler return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := server.FinishedEpochBtcHeight(ctx, &protoReq) + msg, err := server.EndedEpochBtcHeight(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ReportedCheckpointBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryReportedCheckpointBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["ckpt_hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "ckpt_hash") + } + + protoReq.CkptHash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ckpt_hash", err) + } + + msg, err := client.ReportedCheckpointBtcHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ReportedCheckpointBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryReportedCheckpointBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["ckpt_hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "ckpt_hash") + } + + protoReq.CkptHash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ckpt_hash", err) + } + + msg, err := server.ReportedCheckpointBtcHeight(ctx, &protoReq) return msg, metadata, err } @@ -134,7 +188,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_FinishedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_EndedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -145,7 +199,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_FinishedEpochBtcHeight_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_EndedEpochBtcHeight_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -153,7 +207,30 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_FinishedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_EndedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ReportedCheckpointBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ReportedCheckpointBtcHeight_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ReportedCheckpointBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -218,7 +295,7 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_FinishedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_EndedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -227,14 +304,34 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_FinishedEpochBtcHeight_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_EndedEpochBtcHeight_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_FinishedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_EndedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ReportedCheckpointBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ReportedCheckpointBtcHeight_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ReportedCheckpointBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -244,11 +341,15 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie var ( pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "monitor", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_FinishedEpochBtcHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "monitor", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_EndedEpochBtcHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "monitor", "v1", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ReportedCheckpointBtcHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "monitor", "v1", "checkpoints", "ckpt_hash"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( forward_Query_Params_0 = runtime.ForwardResponseMessage - forward_Query_FinishedEpochBtcHeight_0 = runtime.ForwardResponseMessage + forward_Query_EndedEpochBtcHeight_0 = runtime.ForwardResponseMessage + + forward_Query_ReportedCheckpointBtcHeight_0 = runtime.ForwardResponseMessage ) diff --git a/x/zoneconcierge/keeper/hooks.go b/x/zoneconcierge/keeper/hooks.go index 1e02adc7d..df53b72fa 100644 --- a/x/zoneconcierge/keeper/hooks.go +++ b/x/zoneconcierge/keeper/hooks.go @@ -76,7 +76,14 @@ func (h Hooks) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error // Other unused hooks -func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { return nil } -func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } +func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { return nil } +func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } + +func (h Hooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} +func (h Hooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} func (h Hooks) AfterEpochBegins(ctx sdk.Context, epoch uint64) {} func (h Hooks) BeforeSlashThreshold(ctx sdk.Context, valSet epochingtypes.ValidatorSet) {} From 42e37444c65097d6571caee8fca0bac1abca22fc Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Tue, 24 Jan 2023 11:48:49 +1100 Subject: [PATCH 26/37] zoneconcierge: proper initialisation for chain info (#285) --- .../keeper/canonical_chain_indexer.go | 6 +- .../keeper/canonical_chain_indexer_test.go | 3 +- x/zoneconcierge/keeper/chain_info_indexer.go | 56 +++++++++++++++---- .../keeper/epoch_chain_info_indexer.go | 7 ++- x/zoneconcierge/keeper/fork_indexer_test.go | 3 +- x/zoneconcierge/keeper/grpc_query.go | 5 +- x/zoneconcierge/keeper/hooks.go | 20 ++++++- x/zoneconcierge/module_test.go | 9 ++- x/zoneconcierge/types/errors.go | 13 +++-- 9 files changed, 93 insertions(+), 29 deletions(-) diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer.go b/x/zoneconcierge/keeper/canonical_chain_indexer.go index f71a72a60..860e53008 100644 --- a/x/zoneconcierge/keeper/canonical_chain_indexer.go +++ b/x/zoneconcierge/keeper/canonical_chain_indexer.go @@ -11,9 +11,9 @@ import ( // FindClosestHeader finds the IndexedHeader that is closest to (but not after) the given height func (k Keeper) FindClosestHeader(ctx sdk.Context, chainID string, height uint64) (*types.IndexedHeader, error) { - chainInfo := k.GetChainInfo(ctx, chainID) - if chainInfo.LatestHeader == nil { - return nil, fmt.Errorf("chain with ID %s does not have a timestamped header", chainID) + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + return nil, fmt.Errorf("failed to get chain info for chain with ID %s: %w", chainID, err) } // if the given height is no lower than the latest header, return the latest header directly diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go index bdae49362..4248fb530 100644 --- a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go +++ b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go @@ -35,7 +35,8 @@ func FuzzCanonicalChainIndexer(f *testing.F) { } // check if the chain info is updated or not - chainInfo := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + chainInfo, err := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + require.NoError(t, err) require.NotNil(t, chainInfo.LatestHeader) require.Equal(t, czChain.ChainID, chainInfo.LatestHeader.ChainId) require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height) diff --git a/x/zoneconcierge/keeper/chain_info_indexer.go b/x/zoneconcierge/keeper/chain_info_indexer.go index 1a5024ebb..b85462fe2 100644 --- a/x/zoneconcierge/keeper/chain_info_indexer.go +++ b/x/zoneconcierge/keeper/chain_info_indexer.go @@ -1,6 +1,8 @@ package keeper import ( + "fmt" + sdkerrors "cosmossdk.io/errors" "github.com/babylonchain/babylon/x/zoneconcierge/types" "github.com/cosmos/cosmos-sdk/store/prefix" @@ -12,25 +14,49 @@ func (k Keeper) setChainInfo(ctx sdk.Context, chainInfo *types.ChainInfo) { store.Set([]byte(chainInfo.ChainId), k.cdc.MustMarshal(chainInfo)) } +func (k Keeper) InitChainInfo(ctx sdk.Context, chainID string) (*types.ChainInfo, error) { + if len(chainID) == 0 { + return nil, fmt.Errorf("chainID is empty") + } + // ensure chain info has not been initialised yet + if k.HasChainInfo(ctx, chainID) { + return nil, sdkerrors.Wrapf(types.ErrInvalidChainInfo, "chain info has already initialized") + } + + chainInfo := &types.ChainInfo{ + ChainId: chainID, + LatestHeader: nil, + LatestForks: &types.Forks{ + Headers: []*types.IndexedHeader{}, + }, + TimestampedHeadersCount: 0, + } + + k.setChainInfo(ctx, chainInfo) + return chainInfo, nil +} + +// HasChainInfo returns whether the chain info exists for a given ID +// Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection, +// we can only check its existence every time, and return an empty one if it's not initialised yet. +func (k Keeper) HasChainInfo(ctx sdk.Context, chainID string) bool { + store := k.chainInfoStore(ctx) + return store.Has([]byte(chainID)) +} + // GetChainInfo returns the ChainInfo struct for a chain with a given ID // Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection, // we can only check its existence every time, and return an empty one if it's not initialised yet. -func (k Keeper) GetChainInfo(ctx sdk.Context, chainID string) *types.ChainInfo { +func (k Keeper) GetChainInfo(ctx sdk.Context, chainID string) (*types.ChainInfo, error) { store := k.chainInfoStore(ctx) if !store.Has([]byte(chainID)) { - return &types.ChainInfo{ - ChainId: chainID, - LatestHeader: nil, - LatestForks: &types.Forks{ - Headers: []*types.IndexedHeader{}, - }, - } + return nil, types.ErrEpochChainInfoNotFound } chainInfoBytes := store.Get([]byte(chainID)) var chainInfo types.ChainInfo k.cdc.MustUnmarshal(chainInfoBytes, &chainInfo) - return &chainInfo + return &chainInfo, nil } // updateLatestHeader updates the chainInfo w.r.t. the given header, including @@ -43,9 +69,14 @@ func (k Keeper) updateLatestHeader(ctx sdk.Context, chainID string, header *type if header == nil { return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") } - chainInfo := k.GetChainInfo(ctx, chainID) + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + // chain info has not been initialised yet + return fmt.Errorf("failed to get chain info of %s: %w", chainID, err) + } chainInfo.LatestHeader = header // replace the old latest header with the given one chainInfo.TimestampedHeadersCount++ // increment the number of timestamped headers + k.setChainInfo(ctx, chainInfo) return nil } @@ -60,7 +91,10 @@ func (k Keeper) tryToUpdateLatestForkHeader(ctx sdk.Context, chainID string, hea return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") } - chainInfo := k.GetChainInfo(ctx, chainID) + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + return sdkerrors.Wrapf(types.ErrChainInfoNotFound, "cannot insert fork header when chain info is not initialized") + } if len(chainInfo.LatestForks.Headers) == 0 { // no fork at the moment, add this fork header as the latest one diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go index dc3cdced1..f9d92e628 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go @@ -94,8 +94,11 @@ func (k Keeper) GetEpochHeaders(ctx sdk.Context, chainID string, epochNumber uin // where the latest chain info is retrieved from the chain info indexer func (k Keeper) recordEpochChainInfo(ctx sdk.Context, chainID string, epochNumber uint64) { // get the latest known chain info - // NOTE: GetChainInfo returns an empty ChainInfo object when the ChainInfo does not exist - chainInfo := k.GetChainInfo(ctx, chainID) + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + k.Logger(ctx).Debug("chain info does not exist yet, nothing to record") + return + } // NOTE: we can record epoch chain info without ancestor since IBC connection can be established at any height store := k.epochChainInfoStore(ctx, chainID) store.Set(sdk.Uint64ToBigEndian(epochNumber), k.cdc.MustMarshal(chainInfo)) diff --git a/x/zoneconcierge/keeper/fork_indexer_test.go b/x/zoneconcierge/keeper/fork_indexer_test.go index 1f48a4420..b6a8a28f4 100644 --- a/x/zoneconcierge/keeper/fork_indexer_test.go +++ b/x/zoneconcierge/keeper/fork_indexer_test.go @@ -35,7 +35,8 @@ func FuzzForkIndexer(f *testing.F) { } // check if the chain info is updated or not - chainInfo := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + chainInfo, err := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + require.NoError(t, err) require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers))) for i := range forks.Headers { require.Equal(t, czChain.ChainID, chainInfo.LatestForks.Headers[i].ChainId) diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index f69392475..057e79bed 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -37,7 +37,10 @@ func (k Keeper) ChainInfo(c context.Context, req *types.QueryChainInfoRequest) ( ctx := sdk.UnwrapSDKContext(c) // find the chain info of this epoch - chainInfo := k.GetChainInfo(ctx, req.ChainId) + chainInfo, err := k.GetChainInfo(ctx, req.ChainId) + if err != nil { + return nil, err + } resp := &types.QueryChainInfoResponse{ChainInfo: chainInfo} return resp, nil } diff --git a/x/zoneconcierge/keeper/hooks.go b/x/zoneconcierge/keeper/hooks.go index df53b72fa..d3e931377 100644 --- a/x/zoneconcierge/keeper/hooks.go +++ b/x/zoneconcierge/keeper/hooks.go @@ -1,10 +1,13 @@ package keeper import ( + "fmt" + checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" epochingtypes "github.com/babylonchain/babylon/x/epoching/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ibcclientkeeper "github.com/cosmos/ibc-go/v5/modules/core/02-client/keeper" ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" ) @@ -31,6 +34,21 @@ func (h Hooks) AfterHeaderWithValidCommit(ctx sdk.Context, txHash []byte, header BabylonEpoch: h.k.GetEpoch(ctx).EpochNumber, BabylonTxHash: txHash, } + + // initialise chain info if not exist + chainInfo, err := h.k.GetChainInfo(ctx, indexedHeader.ChainId) + if err != nil { + if sdkerrors.IsOf(err, types.ErrEpochChainInfoNotFound) { + // chain info does not exist yet, initialise chain info for this chain + chainInfo, err = h.k.InitChainInfo(ctx, indexedHeader.ChainId) + if err != nil { + panic(fmt.Errorf("failed to initialize chain info of %s: %w", indexedHeader.ChainId, err)) + } + } else { + panic(fmt.Errorf("failed to get chain info of %s: %w", indexedHeader.ChainId, err)) + } + } + if isOnFork { // insert header to fork index if err := h.k.insertForkHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { @@ -44,7 +62,7 @@ func (h Hooks) AfterHeaderWithValidCommit(ctx sdk.Context, txHash []byte, header // ensure the header is the latest one, otherwise ignore it // NOTE: while an old header is considered acceptable in IBC-Go (see Case_valid_past_update), but // ZoneConcierge should not checkpoint it since Babylon requires monotonic checkpointing - if !h.k.GetChainInfo(ctx, indexedHeader.ChainId).IsLatestHeader(&indexedHeader) { + if !chainInfo.IsLatestHeader(&indexedHeader) { return } diff --git a/x/zoneconcierge/module_test.go b/x/zoneconcierge/module_test.go index 7ab128a8a..8289001a0 100644 --- a/x/zoneconcierge/module_test.go +++ b/x/zoneconcierge/module_test.go @@ -340,7 +340,8 @@ func (suite *ZoneConciergeTestSuite) TestUpdateClientTendermint() { suite.Require().Equal(expUpdateHeader.Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(expUpdateHeader.Height, updateHeaderHeight) // updateHeader should be correctly recorded in chain info indexer - chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) + chainInfo, err := suite.zcKeeper.GetChainInfo(ctx, czChainID) + suite.Require().NoError(err) suite.Require().Equal(chainInfo.LatestHeader.Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestHeader.Height, updateHeaderHeight) } else { @@ -348,7 +349,8 @@ func (suite *ZoneConciergeTestSuite) TestUpdateClientTendermint() { _, err := suite.zcKeeper.GetHeader(ctx, czChainID, updateHeaderHeight) suite.Require().Error(err) // the latest header in chain info indexer should be the last header - chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) + chainInfo, err := suite.zcKeeper.GetChainInfo(ctx, czChainID) + suite.Require().NoError(err) suite.Require().Equal(chainInfo.LatestHeader.Hash, suite.czChain.LastHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestHeader.Height, uint64(suite.czChain.LastHeader.Header.Height)) } @@ -364,7 +366,8 @@ func (suite *ZoneConciergeTestSuite) TestUpdateClientTendermint() { suite.Require().Equal(expForks.Headers[0].Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(expForks.Headers[0].Height, updateHeaderHeight) // updateHeader should be correctly recorded in chain info indexer - chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) + chainInfo, err := suite.zcKeeper.GetChainInfo(ctx, czChainID) + suite.Require().NoError(err) suite.Require().Equal(1, len(chainInfo.LatestForks.Headers)) suite.Require().Equal(chainInfo.LatestForks.Headers[0].Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestForks.Headers[0].Height, updateHeaderHeight) diff --git a/x/zoneconcierge/types/errors.go b/x/zoneconcierge/types/errors.go index 5224d40be..483ae213b 100644 --- a/x/zoneconcierge/types/errors.go +++ b/x/zoneconcierge/types/errors.go @@ -16,10 +16,11 @@ var ( ErrNoValidAncestorHeader = sdkerrors.Register(ModuleName, 1105, "no valid ancestor for this header") ErrForkNotFound = sdkerrors.Register(ModuleName, 1106, "cannot find fork") ErrInvalidForks = sdkerrors.Register(ModuleName, 1107, "input forks is invalid") - ErrEpochChainInfoNotFound = sdkerrors.Register(ModuleName, 1108, "no chain info exists at this epoch") - ErrEpochHeadersNotFound = sdkerrors.Register(ModuleName, 1109, "no timestamped header exists at this epoch") - ErrFinalizedEpochNotFound = sdkerrors.Register(ModuleName, 1110, "cannot find a finalized epoch") - ErrInvalidProofEpochSealed = sdkerrors.Register(ModuleName, 1111, "invalid ProofEpochSealed") - ErrInvalidMerkleProof = sdkerrors.Register(ModuleName, 1112, "invalid Merkle inclusion proof") - ErrInvalidChainInfo = sdkerrors.Register(ModuleName, 1113, "invalid chain info") + ErrChainInfoNotFound = sdkerrors.Register(ModuleName, 1108, "no chain info exists") + ErrEpochChainInfoNotFound = sdkerrors.Register(ModuleName, 1109, "no chain info exists at this epoch") + ErrEpochHeadersNotFound = sdkerrors.Register(ModuleName, 1110, "no timestamped header exists at this epoch") + ErrFinalizedEpochNotFound = sdkerrors.Register(ModuleName, 1111, "cannot find a finalized epoch") + ErrInvalidProofEpochSealed = sdkerrors.Register(ModuleName, 1112, "invalid ProofEpochSealed") + ErrInvalidMerkleProof = sdkerrors.Register(ModuleName, 1113, "invalid Merkle inclusion proof") + ErrInvalidChainInfo = sdkerrors.Register(ModuleName, 1114, "invalid chain info") ) From 529b1e042b6aa008e808afe0110534d44e66a547 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Tue, 24 Jan 2023 13:47:08 +1100 Subject: [PATCH 27/37] btccheckpoint API: add hash to `BtcCheckpointHeightAndHash` API (#290) --- client/docs/swagger-ui/swagger.yaml | 133 +++++++++--- proto/babylon/btccheckpoint/query.proto | 11 +- x/btccheckpoint/client/cli/query.go | 14 +- x/btccheckpoint/keeper/grpc_query.go | 20 +- x/btccheckpoint/types/query.pb.go | 259 +++++++++++++++--------- x/btccheckpoint/types/query.pb.gw.go | 28 +-- 6 files changed, 307 insertions(+), 158 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 8ba1ffd78..d90fac506 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -74,9 +74,9 @@ paths: /babylon/btccheckpoint/v1/{epoch_num}: get: summary: >- - BtcCheckpointHeight returns earliest block height for given - rawcheckpoint - operationId: BtcCheckpointHeight + BtcCheckpointHeightAndHash returns earliest block height and hash for + given rawcheckpoint + operationId: BtcCheckpointHeightAndHash responses: '200': description: A successful response. @@ -87,9 +87,12 @@ paths: type: string format: uint64 title: Earliest btc block number containing given raw checkpoint + earliest_btc_block_hash: + type: string + format: byte title: >- - QueryCurrentEpochResponse is response type for the - Query/CurrentEpoch RPC method + QueryBtcCheckpointHeightAndHashResponse is response type for the + Query/BtcCheckpointHeightAndHash RPC method default: description: An unexpected error response. schema: @@ -4035,6 +4038,81 @@ paths: format: uint64 tags: - Query + /babylon/checkpointing/v1/last_raw_checkpoint/{status}: + get: + summary: >- + LastCheckpointWithStatus queries the last checkpoint with a given status + or a more matured status + operationId: LastCheckpointWithStatus + responses: + '200': + description: A successful response. + schema: + type: object + properties: + raw_checkpoint: + type: object + properties: + epoch_num: + type: string + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: status + in: path + required: true + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + tags: + - Query /babylon/checkpointing/v1/params: get: summary: Parameters queries the parameters of the module. @@ -6864,10 +6942,10 @@ paths: to make TransactionInfo self-contained. For example, storing the key allows TransactionInfo - to not relay on + to not relay on the fact that TransactionInfo will be ordered in the - same order as + same order as TransactionKeys in SubmissionKey. transaction: @@ -6882,7 +6960,7 @@ paths: in the position in `key` TODO: maybe it could use here better format as we - already processed and + already processed and valideated the proof? title: >- @@ -7816,10 +7894,10 @@ paths: to make TransactionInfo self-contained. For example, storing the key allows TransactionInfo - to not relay on + to not relay on the fact that TransactionInfo will be ordered in the - same order as + same order as TransactionKeys in SubmissionKey. transaction: @@ -7834,7 +7912,7 @@ paths: in the position in `key` TODO: maybe it could use here better format as we - already processed and + already processed and valideated the proof? title: >- @@ -9068,16 +9146,19 @@ definitions: (w in research paper) description: Params defines the parameters for the module. - babylon.btccheckpoint.v1.QueryBtcCheckpointHeightResponse: + babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashResponse: type: object properties: earliest_btc_block_number: type: string format: uint64 title: Earliest btc block number containing given raw checkpoint + earliest_btc_block_hash: + type: string + format: byte title: >- - QueryCurrentEpochResponse is response type for the Query/CurrentEpoch RPC - method + QueryBtcCheckpointHeightAndHashResponse is response type for the + Query/BtcCheckpointHeightAndHash RPC method babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse: type: object properties: @@ -12989,8 +13070,8 @@ definitions: key is the position (txIdx, blockHash) of this tx on BTC blockchain Although it is already a part of SubmissionKey, we store it here again to make TransactionInfo self-contained. - For example, storing the key allows TransactionInfo to not relay on - the fact that TransactionInfo will be ordered in the same order as + For example, storing the key allows TransactionInfo to not relay on + the fact that TransactionInfo will be ordered in the same order as TransactionKeys in SubmissionKey. transaction: type: string @@ -13004,7 +13085,7 @@ definitions: `key` TODO: maybe it could use here better format as we already processed - and + and valideated the proof? title: >- @@ -13800,10 +13881,10 @@ definitions: to make TransactionInfo self-contained. For example, storing the key allows TransactionInfo to not relay - on + on the fact that TransactionInfo will be ordered in the same order - as + as TransactionKeys in SubmissionKey. transaction: @@ -13818,7 +13899,7 @@ definitions: position in `key` TODO: maybe it could use here better format as we already - processed and + processed and valideated the proof? title: >- @@ -15102,10 +15183,10 @@ definitions: to make TransactionInfo self-contained. For example, storing the key allows TransactionInfo to not - relay on + relay on the fact that TransactionInfo will be ordered in the same - order as + order as TransactionKeys in SubmissionKey. transaction: @@ -15120,7 +15201,7 @@ definitions: position in `key` TODO: maybe it could use here better format as we already - processed and + processed and valideated the proof? title: >- @@ -15824,10 +15905,10 @@ definitions: to make TransactionInfo self-contained. For example, storing the key allows TransactionInfo to not - relay on + relay on the fact that TransactionInfo will be ordered in the same - order as + order as TransactionKeys in SubmissionKey. transaction: @@ -15842,7 +15923,7 @@ definitions: position in `key` TODO: maybe it could use here better format as we already - processed and + processed and valideated the proof? title: >- diff --git a/proto/babylon/btccheckpoint/query.proto b/proto/babylon/btccheckpoint/query.proto index dfaeb091e..731aaa42d 100644 --- a/proto/babylon/btccheckpoint/query.proto +++ b/proto/babylon/btccheckpoint/query.proto @@ -16,8 +16,8 @@ service Query { option (google.api.http).get = "/babylon/btccheckpoint/v1/params"; } - // BtcCheckpointHeight returns earliest block height for given rawcheckpoint - rpc BtcCheckpointHeight(QueryBtcCheckpointHeightRequest) returns (QueryBtcCheckpointHeightResponse) { + // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint + rpc BtcCheckpointHeightAndHash(QueryBtcCheckpointHeightAndHashRequest) returns (QueryBtcCheckpointHeightAndHashResponse) { option (google.api.http).get = "/babylon/btccheckpoint/v1/{epoch_num}"; } @@ -35,15 +35,16 @@ message QueryParamsResponse { Params params = 1 [ (gogoproto.nullable) = false ]; } -message QueryBtcCheckpointHeightRequest { +message QueryBtcCheckpointHeightAndHashRequest { // Number of epoch for which the earliest checkpointing btc height is requested uint64 epoch_num = 1; } -// QueryCurrentEpochResponse is response type for the Query/CurrentEpoch RPC method -message QueryBtcCheckpointHeightResponse { +// QueryBtcCheckpointHeightAndHashResponse is response type for the Query/BtcCheckpointHeightAndHash RPC method +message QueryBtcCheckpointHeightAndHashResponse { // Earliest btc block number containing given raw checkpoint uint64 earliest_btc_block_number = 1; + bytes earliest_btc_block_hash = 2; } message QueryEpochSubmissionsRequest { diff --git a/x/btccheckpoint/client/cli/query.go b/x/btccheckpoint/client/cli/query.go index 4ecbd7963..de284a7cc 100644 --- a/x/btccheckpoint/client/cli/query.go +++ b/x/btccheckpoint/client/cli/query.go @@ -26,15 +26,15 @@ func GetQueryCmd(queryRoute string) *cobra.Command { cmd.AddCommand(CmdQueryParams()) - cmd.AddCommand(CmdBtcCheckpointHeight()) + cmd.AddCommand(CmdBtcCheckpointHeightAndHash()) cmd.AddCommand(CmdEpochSubmissions()) return cmd } -func CmdBtcCheckpointHeight() *cobra.Command { +func CmdBtcCheckpointHeightAndHash() *cobra.Command { cmd := &cobra.Command{ - Use: "btc-height ", - Short: "retrieve earliest btc height for given epoch", + Use: "btc-height-hash ", + Short: "retrieve earliest btc height and hash for given epoch", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { clientCtx := client.GetClientContextFromCmd(cmd) @@ -47,15 +47,15 @@ func CmdBtcCheckpointHeight() *cobra.Command { return err } - params := types.QueryBtcCheckpointHeightRequest{EpochNum: epoch_num} + req := types.QueryBtcCheckpointHeightAndHashRequest{EpochNum: epoch_num} - res, err := queryClient.BtcCheckpointHeight(context.Background(), ¶ms) + resp, err := queryClient.BtcCheckpointHeightAndHash(context.Background(), &req) if err != nil { return err } - return clientCtx.PrintProto(res) + return clientCtx.PrintProto(resp) }, } diff --git a/x/btccheckpoint/keeper/grpc_query.go b/x/btccheckpoint/keeper/grpc_query.go index 2648777c0..5e08324f0 100644 --- a/x/btccheckpoint/keeper/grpc_query.go +++ b/x/btccheckpoint/keeper/grpc_query.go @@ -14,14 +14,15 @@ import ( var _ types.QueryServer = Keeper{} -func (k Keeper) lowestBtcHeight(ctx sdk.Context, subKey *types.SubmissionKey) (uint64, error) { +func (k Keeper) lowestBtcHeightAndHash(ctx sdk.Context, subKey *types.SubmissionKey) (uint64, []byte, error) { // initializing to max, as then every header number will be smaller var lowestHeaderNumber uint64 = math.MaxUint64 + var lowestHeaderHash []byte for _, tk := range subKey.Key { if !k.CheckHeaderIsOnMainChain(ctx, tk.Hash) { - return 0, errors.New("one of submission headers not on main chain") + return 0, nil, errors.New("one of submission headers not on main chain") } headerNumber, err := k.GetBlockHeight(ctx, tk.Hash) @@ -35,13 +36,14 @@ func (k Keeper) lowestBtcHeight(ctx sdk.Context, subKey *types.SubmissionKey) (u if headerNumber < lowestHeaderNumber { lowestHeaderNumber = headerNumber + lowestHeaderHash = *tk.Hash } } - return lowestHeaderNumber, nil + return lowestHeaderNumber, lowestHeaderHash, nil } -func (k Keeper) BtcCheckpointHeight(c context.Context, req *types.QueryBtcCheckpointHeightRequest) (*types.QueryBtcCheckpointHeightResponse, error) { +func (k Keeper) BtcCheckpointHeightAndHash(c context.Context, req *types.QueryBtcCheckpointHeightAndHashRequest) (*types.QueryBtcCheckpointHeightAndHashResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") } @@ -58,11 +60,12 @@ func (k Keeper) BtcCheckpointHeight(c context.Context, req *types.QueryBtcCheckp } var lowestHeaderNumber uint64 = math.MaxUint64 + var lowestHeaderHash []byte // we need to go for each submission in given epoch for _, submissionKey := range epochData.Key { - headerNumber, err := k.lowestBtcHeight(ctx, submissionKey) + headerNumber, headerHash, err := k.lowestBtcHeightAndHash(ctx, submissionKey) if err != nil { // submission is not valid for some reason, ignore it @@ -71,6 +74,7 @@ func (k Keeper) BtcCheckpointHeight(c context.Context, req *types.QueryBtcCheckp if headerNumber < lowestHeaderNumber { lowestHeaderNumber = headerNumber + lowestHeaderHash = headerHash } } @@ -78,7 +82,11 @@ func (k Keeper) BtcCheckpointHeight(c context.Context, req *types.QueryBtcCheckp return nil, errors.New("there is no valid submission for given raw checkpoint") } - return &types.QueryBtcCheckpointHeightResponse{EarliestBtcBlockNumber: lowestHeaderNumber}, nil + resp := &types.QueryBtcCheckpointHeightAndHashResponse{ + EarliestBtcBlockNumber: lowestHeaderNumber, + EarliestBtcBlockHash: lowestHeaderHash, + } + return resp, nil } func getOffset(pageReq *query.PageRequest) uint64 { diff --git a/x/btccheckpoint/types/query.pb.go b/x/btccheckpoint/types/query.pb.go index 22af72a4b..4fd32f6e3 100644 --- a/x/btccheckpoint/types/query.pb.go +++ b/x/btccheckpoint/types/query.pb.go @@ -113,23 +113,25 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } -type QueryBtcCheckpointHeightRequest struct { +type QueryBtcCheckpointHeightAndHashRequest struct { // Number of epoch for which the earliest checkpointing btc height is requested EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` } -func (m *QueryBtcCheckpointHeightRequest) Reset() { *m = QueryBtcCheckpointHeightRequest{} } -func (m *QueryBtcCheckpointHeightRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointHeightRequest) ProtoMessage() {} -func (*QueryBtcCheckpointHeightRequest) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointHeightAndHashRequest) Reset() { + *m = QueryBtcCheckpointHeightAndHashRequest{} +} +func (m *QueryBtcCheckpointHeightAndHashRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointHeightAndHashRequest) ProtoMessage() {} +func (*QueryBtcCheckpointHeightAndHashRequest) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{2} } -func (m *QueryBtcCheckpointHeightRequest) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointHeightRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -139,43 +141,46 @@ func (m *QueryBtcCheckpointHeightRequest) XXX_Marshal(b []byte, deterministic bo return b[:n], nil } } -func (m *QueryBtcCheckpointHeightRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointHeightRequest.Merge(m, src) +func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest.Merge(m, src) } -func (m *QueryBtcCheckpointHeightRequest) XXX_Size() int { +func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointHeightRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointHeightRequest.DiscardUnknown(m) +func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointHeightRequest proto.InternalMessageInfo +var xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest proto.InternalMessageInfo -func (m *QueryBtcCheckpointHeightRequest) GetEpochNum() uint64 { +func (m *QueryBtcCheckpointHeightAndHashRequest) GetEpochNum() uint64 { if m != nil { return m.EpochNum } return 0 } -// QueryCurrentEpochResponse is response type for the Query/CurrentEpoch RPC method -type QueryBtcCheckpointHeightResponse struct { +// QueryBtcCheckpointHeightAndHashResponse is response type for the Query/BtcCheckpointHeightAndHash RPC method +type QueryBtcCheckpointHeightAndHashResponse struct { // Earliest btc block number containing given raw checkpoint EarliestBtcBlockNumber uint64 `protobuf:"varint,1,opt,name=earliest_btc_block_number,json=earliestBtcBlockNumber,proto3" json:"earliest_btc_block_number,omitempty"` + EarliestBtcBlockHash []byte `protobuf:"bytes,2,opt,name=earliest_btc_block_hash,json=earliestBtcBlockHash,proto3" json:"earliest_btc_block_hash,omitempty"` } -func (m *QueryBtcCheckpointHeightResponse) Reset() { *m = QueryBtcCheckpointHeightResponse{} } -func (m *QueryBtcCheckpointHeightResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointHeightResponse) ProtoMessage() {} -func (*QueryBtcCheckpointHeightResponse) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointHeightAndHashResponse) Reset() { + *m = QueryBtcCheckpointHeightAndHashResponse{} +} +func (m *QueryBtcCheckpointHeightAndHashResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointHeightAndHashResponse) ProtoMessage() {} +func (*QueryBtcCheckpointHeightAndHashResponse) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{3} } -func (m *QueryBtcCheckpointHeightResponse) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointHeightResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -185,25 +190,32 @@ func (m *QueryBtcCheckpointHeightResponse) XXX_Marshal(b []byte, deterministic b return b[:n], nil } } -func (m *QueryBtcCheckpointHeightResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointHeightResponse.Merge(m, src) +func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse.Merge(m, src) } -func (m *QueryBtcCheckpointHeightResponse) XXX_Size() int { +func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointHeightResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointHeightResponse.DiscardUnknown(m) +func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointHeightResponse proto.InternalMessageInfo +var xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse proto.InternalMessageInfo -func (m *QueryBtcCheckpointHeightResponse) GetEarliestBtcBlockNumber() uint64 { +func (m *QueryBtcCheckpointHeightAndHashResponse) GetEarliestBtcBlockNumber() uint64 { if m != nil { return m.EarliestBtcBlockNumber } return 0 } +func (m *QueryBtcCheckpointHeightAndHashResponse) GetEarliestBtcBlockHash() []byte { + if m != nil { + return m.EarliestBtcBlockHash + } + return nil +} + type QueryEpochSubmissionsRequest struct { // Number of epoch for which submissions are requested EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` @@ -313,8 +325,8 @@ func (m *QueryEpochSubmissionsResponse) GetPagination() *query.PageResponse { func init() { proto.RegisterType((*QueryParamsRequest)(nil), "babylon.btccheckpoint.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.btccheckpoint.v1.QueryParamsResponse") - proto.RegisterType((*QueryBtcCheckpointHeightRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightRequest") - proto.RegisterType((*QueryBtcCheckpointHeightResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightResponse") + proto.RegisterType((*QueryBtcCheckpointHeightAndHashRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashRequest") + proto.RegisterType((*QueryBtcCheckpointHeightAndHashResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashResponse") proto.RegisterType((*QueryEpochSubmissionsRequest)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsRequest") proto.RegisterType((*QueryEpochSubmissionsResponse)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse") } @@ -322,43 +334,45 @@ func init() { func init() { proto.RegisterFile("babylon/btccheckpoint/query.proto", fileDescriptor_009c1165ec392ace) } var fileDescriptor_009c1165ec392ace = []byte{ - // 571 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x8b, 0x13, 0x3f, - 0x1c, 0xc6, 0x9b, 0xfd, 0xf5, 0x57, 0x34, 0x7b, 0x91, 0xec, 0x22, 0xb5, 0xae, 0xb3, 0x75, 0x40, - 0x5b, 0xc5, 0x4d, 0x68, 0x17, 0x95, 0x2a, 0xec, 0xa1, 0xe2, 0x1f, 0x10, 0xd6, 0xb5, 0xe2, 0x45, - 0x90, 0x92, 0x84, 0x30, 0x1d, 0xda, 0x99, 0xcc, 0x36, 0x99, 0x62, 0x11, 0x2f, 0xfa, 0x02, 0x14, - 0xbc, 0xf8, 0x06, 0x7c, 0x15, 0x82, 0xe7, 0x3d, 0x2e, 0x78, 0xf1, 0x24, 0xd2, 0xfa, 0x42, 0x64, - 0x32, 0x69, 0xbb, 0x5d, 0x3b, 0xb4, 0x7a, 0x2b, 0xc9, 0xf3, 0x7c, 0x9f, 0x4f, 0xf3, 0x7c, 0x5b, - 0x78, 0x99, 0x51, 0x36, 0xec, 0xc9, 0x90, 0x30, 0xcd, 0x79, 0x47, 0xf0, 0x6e, 0x24, 0xfd, 0x50, - 0x93, 0xc3, 0x58, 0xf4, 0x87, 0x38, 0xea, 0x4b, 0x2d, 0x51, 0xd1, 0x4a, 0xf0, 0x9c, 0x04, 0x0f, - 0x6a, 0xa5, 0x4d, 0x4f, 0x7a, 0xd2, 0x88, 0x48, 0xf2, 0x29, 0xd5, 0x97, 0xb6, 0x3c, 0x29, 0xbd, - 0x9e, 0x20, 0x34, 0xf2, 0x09, 0x0d, 0x43, 0xa9, 0xa9, 0xf6, 0x65, 0xa8, 0xec, 0xed, 0x75, 0x2e, - 0x55, 0x20, 0x15, 0x61, 0x54, 0x89, 0x34, 0x86, 0x0c, 0x6a, 0x4c, 0x68, 0x5a, 0x23, 0x11, 0xf5, - 0xfc, 0xd0, 0x88, 0xad, 0xd6, 0x5d, 0x0c, 0x17, 0xd1, 0x3e, 0x0d, 0x26, 0xf3, 0xae, 0x2d, 0xd6, - 0xcc, 0xb3, 0x1a, 0xa9, 0xbb, 0x09, 0xd1, 0xd3, 0x24, 0xf0, 0xc0, 0xf8, 0x5b, 0xe2, 0x30, 0x16, - 0x4a, 0xbb, 0xcf, 0xe1, 0xc6, 0xdc, 0xa9, 0x8a, 0x64, 0xa8, 0x04, 0xda, 0x83, 0x85, 0x34, 0xa7, - 0x08, 0xca, 0xa0, 0xba, 0x5e, 0x2f, 0xe3, 0xac, 0x67, 0xc0, 0xa9, 0xb3, 0x99, 0x3f, 0xfa, 0xb1, - 0x9d, 0x6b, 0x59, 0x97, 0xbb, 0x07, 0xb7, 0xcd, 0xd8, 0xa6, 0xe6, 0xf7, 0xa6, 0xea, 0x47, 0xc2, - 0xf7, 0x3a, 0xda, 0x26, 0xa3, 0x8b, 0xf0, 0xac, 0x88, 0x24, 0xef, 0xb4, 0xc3, 0x38, 0x30, 0x29, - 0xf9, 0xd6, 0x19, 0x73, 0xb0, 0x1f, 0x07, 0xee, 0x4b, 0x58, 0xce, 0xf6, 0x5b, 0xc6, 0x06, 0xbc, - 0x20, 0x68, 0xbf, 0xe7, 0x0b, 0xa5, 0xdb, 0x4c, 0xf3, 0x36, 0xeb, 0x49, 0xde, 0x4d, 0xa6, 0x31, - 0xd1, 0xb7, 0x03, 0xcf, 0x4f, 0x04, 0x4d, 0xcd, 0x9b, 0xc9, 0xf5, 0xbe, 0xb9, 0x75, 0xdf, 0x01, - 0xb8, 0x65, 0xe6, 0xdf, 0x4f, 0x02, 0x9f, 0xc5, 0x2c, 0xf0, 0x95, 0x4a, 0x6a, 0x5a, 0x05, 0x0e, - 0x3d, 0x80, 0x70, 0x56, 0x56, 0x71, 0xcd, 0x3c, 0xd0, 0x55, 0x9c, 0x36, 0x8b, 0x93, 0x66, 0x71, - 0xba, 0x40, 0xb6, 0x59, 0x7c, 0x40, 0x3d, 0x61, 0x07, 0xb7, 0x4e, 0x38, 0xdd, 0xcf, 0x00, 0x5e, - 0xca, 0xa0, 0xb0, 0x5f, 0xf1, 0x2e, 0xcc, 0x77, 0xc5, 0x30, 0x29, 0xe1, 0xbf, 0xea, 0x7a, 0xbd, - 0x92, 0x5d, 0xc2, 0xcc, 0xfc, 0x58, 0x0c, 0x5b, 0xc6, 0x84, 0x1e, 0x2e, 0xc0, 0xac, 0x2c, 0xc5, - 0x4c, 0x93, 0x4f, 0x72, 0xd6, 0x3f, 0xe5, 0xe1, 0xff, 0x86, 0x13, 0xbd, 0x07, 0xb0, 0x90, 0xf6, - 0x8d, 0x6e, 0x64, 0xc3, 0xfc, 0xb9, 0x66, 0xa5, 0x9d, 0x15, 0xd5, 0x69, 0xba, 0x5b, 0x7d, 0xfb, - 0xed, 0xd7, 0xc7, 0x35, 0x17, 0x95, 0xc9, 0xe2, 0xfd, 0x1e, 0xd4, 0xec, 0xcf, 0x00, 0x7d, 0x01, - 0x70, 0x63, 0xc1, 0x92, 0xa0, 0xc6, 0x92, 0xc0, 0xec, 0xc5, 0x2c, 0xdd, 0xf9, 0x17, 0xab, 0x05, - 0xdf, 0x31, 0xe0, 0x15, 0x74, 0x25, 0x1b, 0xfc, 0xf5, 0x74, 0xb1, 0xde, 0xa0, 0xaf, 0x00, 0x9e, - 0x3b, 0x5d, 0x3e, 0xba, 0xb5, 0x24, 0x3f, 0x63, 0x67, 0x4b, 0xb7, 0xff, 0xda, 0x67, 0xa1, 0x1b, - 0x06, 0x7a, 0x17, 0xd5, 0x56, 0x82, 0x26, 0x6a, 0x36, 0xa2, 0xf9, 0xe4, 0x68, 0xe4, 0x80, 0xe3, - 0x91, 0x03, 0x7e, 0x8e, 0x1c, 0xf0, 0x61, 0xec, 0xe4, 0x8e, 0xc7, 0x4e, 0xee, 0xfb, 0xd8, 0xc9, - 0xbd, 0xb8, 0xe9, 0xf9, 0xba, 0x13, 0x33, 0xcc, 0x65, 0x30, 0x19, 0xcb, 0x3b, 0xd4, 0x0f, 0xa7, - 0x19, 0xaf, 0x4e, 0xa5, 0xe8, 0x61, 0x24, 0x14, 0x2b, 0x98, 0x3f, 0xab, 0xdd, 0xdf, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x35, 0xfb, 0xc6, 0x34, 0x9a, 0x05, 0x00, 0x00, + // 595 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcf, 0x8a, 0xd3, 0x40, + 0x18, 0xef, 0xac, 0xb5, 0xe8, 0xd4, 0x83, 0x8c, 0x45, 0x6b, 0x5c, 0x63, 0x0d, 0xb8, 0xad, 0xe2, + 0x26, 0xb4, 0xcb, 0x2a, 0x8b, 0x20, 0x6e, 0x65, 0x75, 0x41, 0x58, 0xd7, 0x88, 0x17, 0x2f, 0x65, + 0x32, 0x0e, 0x49, 0x68, 0x93, 0xc9, 0x76, 0x26, 0xc5, 0x22, 0x5e, 0xf4, 0x01, 0x14, 0x3c, 0x7a, + 0xf6, 0xe8, 0x2b, 0x78, 0xde, 0x63, 0xc1, 0x8b, 0x27, 0x91, 0xd6, 0x07, 0x91, 0x4c, 0xa6, 0xed, + 0xb6, 0x36, 0xb4, 0xe2, 0xad, 0x64, 0x7e, 0xff, 0xbe, 0xf9, 0x7e, 0x53, 0x78, 0xdd, 0xc1, 0x4e, + 0xbf, 0xc3, 0x42, 0xcb, 0x11, 0x84, 0x78, 0x94, 0xb4, 0x23, 0xe6, 0x87, 0xc2, 0x3a, 0x8a, 0x69, + 0xb7, 0x6f, 0x46, 0x5d, 0x26, 0x18, 0x2a, 0x2b, 0x88, 0x39, 0x03, 0x31, 0x7b, 0x75, 0xad, 0xe4, + 0x32, 0x97, 0x49, 0x90, 0x95, 0xfc, 0x4a, 0xf1, 0xda, 0xba, 0xcb, 0x98, 0xdb, 0xa1, 0x16, 0x8e, + 0x7c, 0x0b, 0x87, 0x21, 0x13, 0x58, 0xf8, 0x2c, 0xe4, 0xea, 0xf4, 0x16, 0x61, 0x3c, 0x60, 0xdc, + 0x72, 0x30, 0xa7, 0xa9, 0x8d, 0xd5, 0xab, 0x3b, 0x54, 0xe0, 0xba, 0x15, 0x61, 0xd7, 0x0f, 0x25, + 0x58, 0x61, 0x8d, 0xc5, 0xe1, 0x22, 0xdc, 0xc5, 0xc1, 0x58, 0xef, 0xe6, 0x62, 0xcc, 0x6c, 0x56, + 0x09, 0x35, 0x4a, 0x10, 0x3d, 0x4b, 0x0c, 0x0f, 0x25, 0xdf, 0xa6, 0x47, 0x31, 0xe5, 0xc2, 0x78, + 0x01, 0x2f, 0xcc, 0x7c, 0xe5, 0x11, 0x0b, 0x39, 0x45, 0xf7, 0x61, 0x21, 0xf5, 0x29, 0x83, 0x0a, + 0xa8, 0x15, 0x1b, 0x15, 0x33, 0xeb, 0x1a, 0xcc, 0x94, 0xd9, 0xcc, 0x1f, 0xff, 0xbc, 0x96, 0xb3, + 0x15, 0xcb, 0xd8, 0x83, 0x1b, 0x52, 0xb6, 0x29, 0xc8, 0xc3, 0x09, 0x7a, 0x9f, 0xfa, 0xae, 0x27, + 0x76, 0xc3, 0x57, 0xfb, 0x98, 0x7b, 0x2a, 0x00, 0xba, 0x02, 0xcf, 0xd2, 0x88, 0x11, 0xaf, 0x15, + 0xc6, 0x81, 0x34, 0xcb, 0xdb, 0x67, 0xe4, 0x87, 0x83, 0x38, 0x30, 0x3e, 0x03, 0x58, 0x5d, 0xaa, + 0xa3, 0x22, 0xef, 0xc0, 0xcb, 0x14, 0x77, 0x3b, 0x3e, 0xe5, 0xa2, 0xe5, 0x08, 0xd2, 0x72, 0x3a, + 0x8c, 0xb4, 0x13, 0x55, 0x87, 0x76, 0x95, 0xf0, 0xc5, 0x31, 0xa0, 0x29, 0x48, 0x33, 0x39, 0x3e, + 0x90, 0xa7, 0x68, 0x1b, 0x5e, 0x5a, 0x40, 0xf5, 0x30, 0xf7, 0xca, 0x6b, 0x15, 0x50, 0x3b, 0x67, + 0x97, 0xe6, 0x89, 0x89, 0xb3, 0xf1, 0x1e, 0xc0, 0x75, 0x99, 0x6e, 0x2f, 0xc9, 0xfb, 0x3c, 0x76, + 0x02, 0x9f, 0xf3, 0x64, 0xd9, 0xab, 0xcc, 0x86, 0x1e, 0x41, 0x38, 0x5d, 0xb9, 0xf4, 0x29, 0x36, + 0x36, 0xcc, 0xb4, 0x1f, 0x66, 0xd2, 0x0f, 0x33, 0xad, 0xa1, 0xea, 0x87, 0x79, 0x88, 0x5d, 0xaa, + 0x84, 0xed, 0x13, 0x4c, 0xe3, 0x0b, 0x80, 0x57, 0x33, 0x52, 0xa8, 0x9b, 0xb9, 0x07, 0xf3, 0x6d, + 0xda, 0x4f, 0x56, 0x79, 0xaa, 0x56, 0x6c, 0x54, 0xb3, 0x57, 0x39, 0x25, 0x3f, 0xa1, 0x7d, 0x5b, + 0x92, 0xd0, 0xe3, 0x05, 0x31, 0xab, 0x4b, 0x63, 0xa6, 0xce, 0x27, 0x73, 0x36, 0xbe, 0xe6, 0xe1, + 0x69, 0x99, 0x13, 0x7d, 0x00, 0xb0, 0x90, 0xb6, 0x06, 0xdd, 0xce, 0x0e, 0xf3, 0x77, 0x59, 0xb5, + 0xcd, 0x15, 0xd1, 0xa9, 0xbb, 0x51, 0x7b, 0xf7, 0xfd, 0xf7, 0xa7, 0x35, 0x03, 0x55, 0xac, 0xc5, + 0xaf, 0xa4, 0x57, 0x57, 0x8f, 0x09, 0x0d, 0x00, 0xd4, 0xb2, 0x2b, 0x86, 0x1e, 0x2c, 0xf1, 0x5d, + 0xda, 0x72, 0x6d, 0xf7, 0x3f, 0x14, 0xd4, 0x34, 0x9b, 0x72, 0x9a, 0x2a, 0xba, 0x91, 0x3d, 0xcd, + 0x9b, 0x49, 0xdb, 0xde, 0xa2, 0x6f, 0x00, 0x9e, 0x9f, 0x6f, 0x04, 0xba, 0xb3, 0x24, 0x46, 0x46, + 0x91, 0xb5, 0xbb, 0xff, 0xcc, 0x53, 0xa1, 0x77, 0x64, 0xe8, 0x2d, 0x54, 0x5f, 0x29, 0xb4, 0xc5, + 0xa7, 0x12, 0xcd, 0xa7, 0xc7, 0x43, 0x1d, 0x0c, 0x86, 0x3a, 0xf8, 0x35, 0xd4, 0xc1, 0xc7, 0x91, + 0x9e, 0x1b, 0x8c, 0xf4, 0xdc, 0x8f, 0x91, 0x9e, 0x7b, 0xb9, 0xed, 0xfa, 0xc2, 0x8b, 0x1d, 0x93, + 0xb0, 0x60, 0x2c, 0x4b, 0x3c, 0xec, 0x87, 0x13, 0x8f, 0xd7, 0x73, 0x2e, 0xa2, 0x1f, 0x51, 0xee, + 0x14, 0xe4, 0xff, 0xe0, 0xd6, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xac, 0x9c, 0xf6, 0x14, 0xf5, + 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -375,8 +389,8 @@ const _ = grpc.SupportPackageIsVersion4 type QueryClient interface { // Parameters queries the parameters of the module. Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) - // BtcCheckpointHeight returns earliest block height for given rawcheckpoint - BtcCheckpointHeight(ctx context.Context, in *QueryBtcCheckpointHeightRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightResponse, error) + // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint + BtcCheckpointHeightAndHash(ctx context.Context, in *QueryBtcCheckpointHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightAndHashResponse, error) EpochSubmissions(ctx context.Context, in *QueryEpochSubmissionsRequest, opts ...grpc.CallOption) (*QueryEpochSubmissionsResponse, error) } @@ -397,9 +411,9 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts . return out, nil } -func (c *queryClient) BtcCheckpointHeight(ctx context.Context, in *QueryBtcCheckpointHeightRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightResponse, error) { - out := new(QueryBtcCheckpointHeightResponse) - err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeight", in, out, opts...) +func (c *queryClient) BtcCheckpointHeightAndHash(ctx context.Context, in *QueryBtcCheckpointHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightAndHashResponse, error) { + out := new(QueryBtcCheckpointHeightAndHashResponse) + err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeightAndHash", in, out, opts...) if err != nil { return nil, err } @@ -419,8 +433,8 @@ func (c *queryClient) EpochSubmissions(ctx context.Context, in *QueryEpochSubmis type QueryServer interface { // Parameters queries the parameters of the module. Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) - // BtcCheckpointHeight returns earliest block height for given rawcheckpoint - BtcCheckpointHeight(context.Context, *QueryBtcCheckpointHeightRequest) (*QueryBtcCheckpointHeightResponse, error) + // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint + BtcCheckpointHeightAndHash(context.Context, *QueryBtcCheckpointHeightAndHashRequest) (*QueryBtcCheckpointHeightAndHashResponse, error) EpochSubmissions(context.Context, *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) } @@ -431,8 +445,8 @@ type UnimplementedQueryServer struct { func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") } -func (*UnimplementedQueryServer) BtcCheckpointHeight(ctx context.Context, req *QueryBtcCheckpointHeightRequest) (*QueryBtcCheckpointHeightResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointHeight not implemented") +func (*UnimplementedQueryServer) BtcCheckpointHeightAndHash(ctx context.Context, req *QueryBtcCheckpointHeightAndHashRequest) (*QueryBtcCheckpointHeightAndHashResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointHeightAndHash not implemented") } func (*UnimplementedQueryServer) EpochSubmissions(ctx context.Context, req *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EpochSubmissions not implemented") @@ -460,20 +474,20 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _Query_BtcCheckpointHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBtcCheckpointHeightRequest) +func _Query_BtcCheckpointHeightAndHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBtcCheckpointHeightAndHashRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).BtcCheckpointHeight(ctx, in) + return srv.(QueryServer).BtcCheckpointHeightAndHash(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeight", + FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeightAndHash", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).BtcCheckpointHeight(ctx, req.(*QueryBtcCheckpointHeightRequest)) + return srv.(QueryServer).BtcCheckpointHeightAndHash(ctx, req.(*QueryBtcCheckpointHeightAndHashRequest)) } return interceptor(ctx, in, info, handler) } @@ -505,8 +519,8 @@ var _Query_serviceDesc = grpc.ServiceDesc{ Handler: _Query_Params_Handler, }, { - MethodName: "BtcCheckpointHeight", - Handler: _Query_BtcCheckpointHeight_Handler, + MethodName: "BtcCheckpointHeightAndHash", + Handler: _Query_BtcCheckpointHeightAndHash_Handler, }, { MethodName: "EpochSubmissions", @@ -573,7 +587,7 @@ func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointHeightRequest) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointHeightAndHashRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -583,12 +597,12 @@ func (m *QueryBtcCheckpointHeightRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryBtcCheckpointHeightRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointHeightAndHashRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointHeightAndHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -601,7 +615,7 @@ func (m *QueryBtcCheckpointHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointHeightResponse) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointHeightAndHashResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -611,16 +625,23 @@ func (m *QueryBtcCheckpointHeightResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryBtcCheckpointHeightResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointHeightAndHashResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointHeightAndHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.EarliestBtcBlockHash) > 0 { + i -= len(m.EarliestBtcBlockHash) + copy(dAtA[i:], m.EarliestBtcBlockHash) + i = encodeVarintQuery(dAtA, i, uint64(len(m.EarliestBtcBlockHash))) + i-- + dAtA[i] = 0x12 + } if m.EarliestBtcBlockNumber != 0 { i = encodeVarintQuery(dAtA, i, uint64(m.EarliestBtcBlockNumber)) i-- @@ -749,7 +770,7 @@ func (m *QueryParamsResponse) Size() (n int) { return n } -func (m *QueryBtcCheckpointHeightRequest) Size() (n int) { +func (m *QueryBtcCheckpointHeightAndHashRequest) Size() (n int) { if m == nil { return 0 } @@ -761,7 +782,7 @@ func (m *QueryBtcCheckpointHeightRequest) Size() (n int) { return n } -func (m *QueryBtcCheckpointHeightResponse) Size() (n int) { +func (m *QueryBtcCheckpointHeightAndHashResponse) Size() (n int) { if m == nil { return 0 } @@ -770,6 +791,10 @@ func (m *QueryBtcCheckpointHeightResponse) Size() (n int) { if m.EarliestBtcBlockNumber != 0 { n += 1 + sovQuery(uint64(m.EarliestBtcBlockNumber)) } + l = len(m.EarliestBtcBlockHash) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } return n } @@ -947,7 +972,7 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointHeightRequest) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointHeightAndHashRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -970,10 +995,10 @@ func (m *QueryBtcCheckpointHeightRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1016,7 +1041,7 @@ func (m *QueryBtcCheckpointHeightRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointHeightResponse) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointHeightAndHashResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1039,10 +1064,10 @@ func (m *QueryBtcCheckpointHeightResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1064,6 +1089,40 @@ func (m *QueryBtcCheckpointHeightResponse) Unmarshal(dAtA []byte) error { break } } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EarliestBtcBlockHash = append(m.EarliestBtcBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.EarliestBtcBlockHash == nil { + m.EarliestBtcBlockHash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) diff --git a/x/btccheckpoint/types/query.pb.gw.go b/x/btccheckpoint/types/query.pb.gw.go index 7e4a5dd86..272c4dbeb 100644 --- a/x/btccheckpoint/types/query.pb.gw.go +++ b/x/btccheckpoint/types/query.pb.gw.go @@ -51,8 +51,8 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal } -func request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointHeightRequest +func request_Query_BtcCheckpointHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointHeightAndHashRequest var metadata runtime.ServerMetadata var ( @@ -73,13 +73,13 @@ func request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler runtime. return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := client.BtcCheckpointHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.BtcCheckpointHeightAndHash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointHeightRequest +func local_request_Query_BtcCheckpointHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointHeightAndHashRequest var metadata runtime.ServerMetadata var ( @@ -100,7 +100,7 @@ func local_request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler ru return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := server.BtcCheckpointHeight(ctx, &protoReq) + msg, err := server.BtcCheckpointHeightAndHash(ctx, &protoReq) return msg, metadata, err } @@ -206,7 +206,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_BtcCheckpointHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -217,7 +217,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_BtcCheckpointHeight_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_BtcCheckpointHeightAndHash_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -225,7 +225,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_BtcCheckpointHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -313,7 +313,7 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_BtcCheckpointHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -322,14 +322,14 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_BtcCheckpointHeight_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_BtcCheckpointHeightAndHash_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_BtcCheckpointHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -359,7 +359,7 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie var ( pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "btccheckpoint", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_BtcCheckpointHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "btccheckpoint", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_BtcCheckpointHeightAndHash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "btccheckpoint", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_EpochSubmissions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"babylon", "btccheckpoint", "v1", "epoch_num", "submissions"}, "", runtime.AssumeColonVerbOpt(false))) ) @@ -367,7 +367,7 @@ var ( var ( forward_Query_Params_0 = runtime.ForwardResponseMessage - forward_Query_BtcCheckpointHeight_0 = runtime.ForwardResponseMessage + forward_Query_BtcCheckpointHeightAndHash_0 = runtime.ForwardResponseMessage forward_Query_EpochSubmissions_0 = runtime.ForwardResponseMessage ) From e9e8de0ddd74acd84c76d77145008ca279ef8cd2 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Tue, 24 Jan 2023 14:08:49 +1100 Subject: [PATCH 28/37] epoching: range query for epochs (#289) --- client/docs/swagger-ui/swagger.yaml | 804 +++++++++++++++++++++++++- proto/babylon/epoching/v1/query.proto | 19 + x/epoching/keeper/epochs.go | 4 + x/epoching/keeper/grpc_query.go | 31 +- x/epoching/keeper/grpc_query_test.go | 38 +- x/epoching/types/query.pb.go | 615 +++++++++++++++++--- x/epoching/types/query.pb.gw.go | 83 +++ 7 files changed, 1501 insertions(+), 93 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index d90fac506..e2093965d 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -1242,6 +1242,488 @@ paths: type: string tags: - Query + /babylon/epoching/v1/epochs: + get: + summary: EpochsInfo range-queries the information of epochs + operationId: EpochsInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + epochs: + type: array + items: + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in + this epoch. + + Babylon needs to remember the last header of each epoch + to complete unbonding validators/delegations when a + previous epoch's checkpoint is finalised. + + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch + + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query /babylon/epoching/v1/epochs/{epoch_num}: get: summary: EpochInfo queries the information of a given epoch @@ -3997,14 +4479,87 @@ paths: format: uint64 status_count: type: object - additionalProperties: - type: string - format: uint64 - description: >- - QueryRecentEpochStatusCountResponse is the response type for the - Query/EpochStatusCount - - RPC method. + additionalProperties: + type: string + format: uint64 + description: >- + QueryRecentEpochStatusCountResponse is the response type for the + Query/EpochStatusCount + + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: epoch_count + description: >- + epoch_count is the number of the most recent epochs to include in + the aggregation. + in: query + required: false + type: string + format: uint64 + tags: + - Query + /babylon/checkpointing/v1/last_raw_checkpoint/{status}: + get: + summary: >- + LastCheckpointWithStatus queries the last checkpoint with a given status + or a more matured status + operationId: LastCheckpointWithStatus + responses: + '200': + description: A successful response. + schema: + type: object + properties: + raw_checkpoint: + type: object + properties: + epoch_num: + type: string + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data default: description: An unexpected error response. schema: @@ -4028,14 +4583,16 @@ paths: type: string format: byte parameters: - - name: epoch_count - description: >- - epoch_count is the number of the most recent epochs to include in - the aggregation. - in: query - required: false + - name: status + in: path + required: true type: string - format: uint64 + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED tags: - Query /babylon/checkpointing/v1/last_raw_checkpoint/{status}: @@ -10810,6 +11367,223 @@ definitions: repeated Bar results = 1; PageResponse page = 2; } + babylon.epoching.v1.QueryEpochsInfoResponse: + type: object + properties: + epochs: + type: array + items: + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. + + The last_block_header field is nil in the epoch's beginning, and + is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: |- + app_hash_root is the Merkle root of all AppHashs in this epoch + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } babylon.epoching.v1.QueryLatestEpochMsgsResponse: type: object properties: diff --git a/proto/babylon/epoching/v1/query.proto b/proto/babylon/epoching/v1/query.proto index 3333d5676..956f7a028 100644 --- a/proto/babylon/epoching/v1/query.proto +++ b/proto/babylon/epoching/v1/query.proto @@ -21,6 +21,13 @@ service Query { option (google.api.http).get = "/babylon/epoching/v1/epochs/{epoch_num=*}"; } + // EpochsInfo queries the metadata of epochs in a given range, depending on the + // parameters in the pagination request. Th main use case will be querying the + // latest epochs in time order. + rpc EpochsInfo(QueryEpochsInfoRequest) returns (QueryEpochsInfoResponse) { + option (google.api.http).get = "/babylon/epoching/v1/epochs"; + } + // CurrentEpoch queries the current epoch rpc CurrentEpoch(QueryCurrentEpochRequest) returns (QueryCurrentEpochResponse) { option (google.api.http).get = "/babylon/epoching/v1/current_epoch"; @@ -69,6 +76,18 @@ message QueryEpochInfoResponse { babylon.epoching.v1.Epoch epoch = 1; } +message QueryEpochsInfoRequest { + // pagination defines whether to have the pagination in the response + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +message QueryEpochsInfoResponse { + repeated babylon.epoching.v1.Epoch epochs = 1; + + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + // QueryCurrentEpochRequest is the request type for the Query/CurrentEpoch RPC method message QueryCurrentEpochRequest {} diff --git a/x/epoching/keeper/epochs.go b/x/epoching/keeper/epochs.go index 8948bbfab..93be60372 100644 --- a/x/epoching/keeper/epochs.go +++ b/x/epoching/keeper/epochs.go @@ -137,6 +137,10 @@ func (k Keeper) IncEpoch(ctx sdk.Context) types.Epoch { return newEpoch } +// epochInfoStore returns the store for epoch metadata +// prefix: EpochInfoKey +// key: epochNumber +// value: epoch metadata func (k Keeper) epochInfoStore(ctx sdk.Context) prefix.Store { store := ctx.KVStore(k.storeKey) return prefix.NewStore(store, types.EpochInfoKey) diff --git a/x/epoching/keeper/grpc_query.go b/x/epoching/keeper/grpc_query.go index af203312d..270018ebc 100644 --- a/x/epoching/keeper/grpc_query.go +++ b/x/epoching/keeper/grpc_query.go @@ -2,9 +2,10 @@ package keeper import ( "context" - "cosmossdk.io/math" "errors" + "cosmossdk.io/math" + "github.com/babylonchain/babylon/x/epoching/types" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" @@ -53,6 +54,34 @@ func (k Keeper) EpochInfo(c context.Context, req *types.QueryEpochInfoRequest) ( return resp, nil } +// EpochsInfo handles the QueryEpochsInfoRequest query +func (k Keeper) EpochsInfo(c context.Context, req *types.QueryEpochsInfoRequest) (*types.QueryEpochsInfoResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + + epochInfoStore := k.epochInfoStore(ctx) + epochs := []*types.Epoch{} + pageRes, err := query.Paginate(epochInfoStore, req.Pagination, func(key, value []byte) error { + // unmarshal to epoch metadata + var epoch types.Epoch + if err := k.cdc.Unmarshal(value, &epoch); err != nil { + return err + } + // append to epochs list + epochs = append(epochs, &epoch) + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + resp := &types.QueryEpochsInfoResponse{ + Epochs: epochs, + Pagination: pageRes, + } + + return resp, nil +} + // EpochMsgs handles the QueryEpochMsgsRequest query func (k Keeper) EpochMsgs(c context.Context, req *types.QueryEpochMsgsRequest) (*types.QueryEpochMsgsResponse, error) { ctx := sdk.UnwrapSDKContext(c) diff --git a/x/epoching/keeper/grpc_query_test.go b/x/epoching/keeper/grpc_query_test.go index 1c32d40c7..663b0aeea 100644 --- a/x/epoching/keeper/grpc_query_test.go +++ b/x/epoching/keeper/grpc_query_test.go @@ -88,10 +88,46 @@ func FuzzCurrentEpoch(f *testing.F) { }) } +func FuzzEpochsInfo(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + numEpochs := datagen.RandomInt(10) + 1 + limit := datagen.RandomInt(10) + 1 + + helper := testepoching.NewHelper(t) + ctx, keeper, queryClient := helper.Ctx, helper.EpochingKeeper, helper.QueryClient + wctx := sdk.WrapSDKContext(ctx) + + // enque the first block of the numEpochs'th epoch + epochInterval := keeper.GetParams(ctx).EpochInterval + for i := uint64(0); i < numEpochs-1; i++ { + for j := uint64(0); j < epochInterval; j++ { + helper.GenAndApplyEmptyBlock() + } + } + + // get epoch msgs + req := types.QueryEpochsInfoRequest{ + Pagination: &query.PageRequest{ + Limit: limit, + }, + } + resp, err := queryClient.EpochsInfo(wctx, &req) + require.NoError(t, err) + + require.Equal(t, testepoching.Min(numEpochs, limit), uint64(len(resp.Epochs))) + for i, epoch := range resp.Epochs { + require.Equal(t, uint64(i), epoch.EpochNumber) + } + }) +} + // FuzzEpochMsgsQuery fuzzes queryClient.EpochMsgs // 1. randomly generate msgs and limit in pagination // 2. check the returned msg was previously enqueued -// NOTE: Msgs in QueryEpochMsgsResponse are out-of-roder +// NOTE: Msgs in QueryEpochMsgsResponse are out-of-order func FuzzEpochMsgsQuery(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) diff --git a/x/epoching/types/query.pb.go b/x/epoching/types/query.pb.go index 101e6ddf7..b75c30e0a 100644 --- a/x/epoching/types/query.pb.go +++ b/x/epoching/types/query.pb.go @@ -201,6 +201,104 @@ func (m *QueryEpochInfoResponse) GetEpoch() *Epoch { return nil } +type QueryEpochsInfoRequest struct { + // pagination defines whether to have the pagination in the response + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryEpochsInfoRequest) Reset() { *m = QueryEpochsInfoRequest{} } +func (m *QueryEpochsInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEpochsInfoRequest) ProtoMessage() {} +func (*QueryEpochsInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1821b530f2ec2711, []int{4} +} +func (m *QueryEpochsInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochsInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochsInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochsInfoRequest.Merge(m, src) +} +func (m *QueryEpochsInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochsInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochsInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochsInfoRequest proto.InternalMessageInfo + +func (m *QueryEpochsInfoRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryEpochsInfoResponse struct { + Epochs []*Epoch `protobuf:"bytes,1,rep,name=epochs,proto3" json:"epochs,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryEpochsInfoResponse) Reset() { *m = QueryEpochsInfoResponse{} } +func (m *QueryEpochsInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEpochsInfoResponse) ProtoMessage() {} +func (*QueryEpochsInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1821b530f2ec2711, []int{5} +} +func (m *QueryEpochsInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochsInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochsInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochsInfoResponse.Merge(m, src) +} +func (m *QueryEpochsInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochsInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochsInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochsInfoResponse proto.InternalMessageInfo + +func (m *QueryEpochsInfoResponse) GetEpochs() []*Epoch { + if m != nil { + return m.Epochs + } + return nil +} + +func (m *QueryEpochsInfoResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + // QueryCurrentEpochRequest is the request type for the Query/CurrentEpoch RPC method type QueryCurrentEpochRequest struct { } @@ -209,7 +307,7 @@ func (m *QueryCurrentEpochRequest) Reset() { *m = QueryCurrentEpochReque func (m *QueryCurrentEpochRequest) String() string { return proto.CompactTextString(m) } func (*QueryCurrentEpochRequest) ProtoMessage() {} func (*QueryCurrentEpochRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{4} + return fileDescriptor_1821b530f2ec2711, []int{6} } func (m *QueryCurrentEpochRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -250,7 +348,7 @@ func (m *QueryCurrentEpochResponse) Reset() { *m = QueryCurrentEpochResp func (m *QueryCurrentEpochResponse) String() string { return proto.CompactTextString(m) } func (*QueryCurrentEpochResponse) ProtoMessage() {} func (*QueryCurrentEpochResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{5} + return fileDescriptor_1821b530f2ec2711, []int{7} } func (m *QueryCurrentEpochResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -305,7 +403,7 @@ func (m *QueryEpochMsgsRequest) Reset() { *m = QueryEpochMsgsRequest{} } func (m *QueryEpochMsgsRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochMsgsRequest) ProtoMessage() {} func (*QueryEpochMsgsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{6} + return fileDescriptor_1821b530f2ec2711, []int{8} } func (m *QueryEpochMsgsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +458,7 @@ func (m *QueryEpochMsgsResponse) Reset() { *m = QueryEpochMsgsResponse{} func (m *QueryEpochMsgsResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochMsgsResponse) ProtoMessage() {} func (*QueryEpochMsgsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{7} + return fileDescriptor_1821b530f2ec2711, []int{9} } func (m *QueryEpochMsgsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -417,7 +515,7 @@ func (m *QueryLatestEpochMsgsRequest) Reset() { *m = QueryLatestEpochMsg func (m *QueryLatestEpochMsgsRequest) String() string { return proto.CompactTextString(m) } func (*QueryLatestEpochMsgsRequest) ProtoMessage() {} func (*QueryLatestEpochMsgsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{8} + return fileDescriptor_1821b530f2ec2711, []int{10} } func (m *QueryLatestEpochMsgsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -479,7 +577,7 @@ func (m *QueryLatestEpochMsgsResponse) Reset() { *m = QueryLatestEpochMs func (m *QueryLatestEpochMsgsResponse) String() string { return proto.CompactTextString(m) } func (*QueryLatestEpochMsgsResponse) ProtoMessage() {} func (*QueryLatestEpochMsgsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{9} + return fileDescriptor_1821b530f2ec2711, []int{11} } func (m *QueryLatestEpochMsgsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -530,7 +628,7 @@ func (m *QueryValidatorLifecycleRequest) Reset() { *m = QueryValidatorLi func (m *QueryValidatorLifecycleRequest) String() string { return proto.CompactTextString(m) } func (*QueryValidatorLifecycleRequest) ProtoMessage() {} func (*QueryValidatorLifecycleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{10} + return fileDescriptor_1821b530f2ec2711, []int{12} } func (m *QueryValidatorLifecycleRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -574,7 +672,7 @@ func (m *QueryValidatorLifecycleResponse) Reset() { *m = QueryValidatorL func (m *QueryValidatorLifecycleResponse) String() string { return proto.CompactTextString(m) } func (*QueryValidatorLifecycleResponse) ProtoMessage() {} func (*QueryValidatorLifecycleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{11} + return fileDescriptor_1821b530f2ec2711, []int{13} } func (m *QueryValidatorLifecycleResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -618,7 +716,7 @@ func (m *QueryDelegationLifecycleRequest) Reset() { *m = QueryDelegation func (m *QueryDelegationLifecycleRequest) String() string { return proto.CompactTextString(m) } func (*QueryDelegationLifecycleRequest) ProtoMessage() {} func (*QueryDelegationLifecycleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{12} + return fileDescriptor_1821b530f2ec2711, []int{14} } func (m *QueryDelegationLifecycleRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +760,7 @@ func (m *QueryDelegationLifecycleResponse) Reset() { *m = QueryDelegatio func (m *QueryDelegationLifecycleResponse) String() string { return proto.CompactTextString(m) } func (*QueryDelegationLifecycleResponse) ProtoMessage() {} func (*QueryDelegationLifecycleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{13} + return fileDescriptor_1821b530f2ec2711, []int{15} } func (m *QueryDelegationLifecycleResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -707,7 +805,7 @@ func (m *QueryEpochValSetRequest) Reset() { *m = QueryEpochValSetRequest func (m *QueryEpochValSetRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochValSetRequest) ProtoMessage() {} func (*QueryEpochValSetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{14} + return fileDescriptor_1821b530f2ec2711, []int{16} } func (m *QueryEpochValSetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -760,7 +858,7 @@ func (m *QueryEpochValSetResponse) Reset() { *m = QueryEpochValSetRespon func (m *QueryEpochValSetResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochValSetResponse) ProtoMessage() {} func (*QueryEpochValSetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{15} + return fileDescriptor_1821b530f2ec2711, []int{17} } func (m *QueryEpochValSetResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -815,6 +913,8 @@ func init() { proto.RegisterType((*QueryParamsResponse)(nil), "babylon.epoching.v1.QueryParamsResponse") proto.RegisterType((*QueryEpochInfoRequest)(nil), "babylon.epoching.v1.QueryEpochInfoRequest") proto.RegisterType((*QueryEpochInfoResponse)(nil), "babylon.epoching.v1.QueryEpochInfoResponse") + proto.RegisterType((*QueryEpochsInfoRequest)(nil), "babylon.epoching.v1.QueryEpochsInfoRequest") + proto.RegisterType((*QueryEpochsInfoResponse)(nil), "babylon.epoching.v1.QueryEpochsInfoResponse") proto.RegisterType((*QueryCurrentEpochRequest)(nil), "babylon.epoching.v1.QueryCurrentEpochRequest") proto.RegisterType((*QueryCurrentEpochResponse)(nil), "babylon.epoching.v1.QueryCurrentEpochResponse") proto.RegisterType((*QueryEpochMsgsRequest)(nil), "babylon.epoching.v1.QueryEpochMsgsRequest") @@ -832,71 +932,74 @@ func init() { func init() { proto.RegisterFile("babylon/epoching/v1/query.proto", fileDescriptor_1821b530f2ec2711) } var fileDescriptor_1821b530f2ec2711 = []byte{ - // 1022 bytes of a gzipped FileDescriptorProto + // 1071 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x97, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xb3, 0x4d, 0x5a, 0x92, 0xe7, 0x96, 0xc2, 0xa4, 0x40, 0xba, 0x29, 0x4e, 0xb4, 0x85, - 0x26, 0x24, 0xcd, 0x6e, 0xec, 0xa4, 0x45, 0xfd, 0x01, 0x88, 0x84, 0x1f, 0xa2, 0x4a, 0x51, 0xba, - 0x48, 0x39, 0x70, 0xb1, 0xc6, 0xde, 0xc9, 0x66, 0xa5, 0xf5, 0x8e, 0xbb, 0x3b, 0x6b, 0xb0, 0x4a, - 0x10, 0xe2, 0x2f, 0x40, 0xe2, 0x80, 0x7a, 0x43, 0xe2, 0xc8, 0x9f, 0x00, 0x07, 0xb8, 0xf5, 0x58, - 0xc4, 0x85, 0x13, 0x42, 0x09, 0x7f, 0x08, 0xda, 0x37, 0x63, 0x7b, 0x6d, 0x66, 0x63, 0xa7, 0x42, - 0xdc, 0x92, 0x99, 0xf7, 0xe3, 0xf3, 0xbe, 0xf3, 0xf6, 0x3d, 0x19, 0x16, 0xea, 0xb4, 0xde, 0x09, - 0x79, 0xe4, 0xb0, 0x16, 0x6f, 0x1c, 0x04, 0x91, 0xef, 0xb4, 0x2b, 0xce, 0xc3, 0x94, 0xc5, 0x1d, - 0xbb, 0x15, 0x73, 0xc1, 0xc9, 0xac, 0x32, 0xb0, 0xbb, 0x06, 0x76, 0xbb, 0x62, 0x5e, 0xf2, 0xb9, - 0xcf, 0xf1, 0xde, 0xc9, 0xfe, 0x92, 0xa6, 0xe6, 0x15, 0x9f, 0x73, 0x3f, 0x64, 0x0e, 0x6d, 0x05, - 0x0e, 0x8d, 0x22, 0x2e, 0xa8, 0x08, 0x78, 0x94, 0xa8, 0xdb, 0x95, 0x06, 0x4f, 0x9a, 0x3c, 0x71, - 0xea, 0x34, 0x61, 0x32, 0x83, 0xd3, 0xae, 0xd4, 0x99, 0xa0, 0x15, 0xa7, 0x45, 0xfd, 0x20, 0x42, - 0x63, 0x65, 0xbb, 0xa8, 0xa3, 0x6a, 0xd1, 0x98, 0x36, 0xbb, 0xd1, 0x2c, 0x9d, 0x45, 0x0f, 0x11, - 0x6d, 0xac, 0x4b, 0x40, 0x1e, 0x64, 0x79, 0x76, 0xd1, 0xd1, 0x65, 0x0f, 0x53, 0x96, 0x08, 0x6b, - 0x17, 0x66, 0x07, 0x4e, 0x93, 0x16, 0x8f, 0x12, 0x46, 0x6e, 0xc1, 0x39, 0x99, 0x60, 0xce, 0x58, - 0x34, 0x96, 0x4b, 0xd5, 0x79, 0x5b, 0x53, 0xb8, 0x2d, 0x9d, 0xb6, 0xa6, 0x9e, 0xfc, 0xb9, 0x30, - 0xe1, 0x2a, 0x07, 0x6b, 0x13, 0x5e, 0xc2, 0x88, 0xef, 0x67, 0x86, 0x1f, 0x45, 0xfb, 0x5c, 0xa5, - 0x22, 0xf3, 0x30, 0x83, 0xce, 0xb5, 0x28, 0x6d, 0x62, 0xd8, 0x29, 0x77, 0x1a, 0x0f, 0x3e, 0x4e, - 0x9b, 0xd6, 0x3d, 0x78, 0x79, 0xd8, 0x4b, 0xa1, 0xac, 0xc3, 0x59, 0xb4, 0x52, 0x24, 0xa6, 0x96, - 0x04, 0xdd, 0x5c, 0x69, 0x68, 0x99, 0x30, 0x87, 0xb1, 0xb6, 0xd3, 0x38, 0x66, 0x91, 0x90, 0x77, - 0xaa, 0x5e, 0x1f, 0x2e, 0x6b, 0xee, 0x54, 0xaa, 0xab, 0x70, 0xa1, 0x21, 0xcf, 0x6b, 0xfd, 0x94, - 0x53, 0xee, 0xf9, 0x46, 0xce, 0x98, 0xbc, 0x0e, 0xcf, 0xcb, 0x32, 0xea, 0x3c, 0x8d, 0x3c, 0x1a, - 0x77, 0xe6, 0xce, 0xa0, 0xd5, 0x05, 0x3c, 0xdd, 0x52, 0x87, 0xd6, 0x17, 0x79, 0x19, 0xee, 0x27, - 0x7e, 0x32, 0x8e, 0x0c, 0xe4, 0x03, 0x80, 0xfe, 0xf3, 0x63, 0xe0, 0x52, 0xf5, 0x9a, 0x2d, 0x7b, - 0xc5, 0xce, 0x7a, 0xc5, 0x96, 0xdd, 0xa8, 0x7a, 0xc5, 0xde, 0xa5, 0x3e, 0x53, 0x81, 0xdd, 0x9c, - 0xa7, 0xf5, 0xd8, 0xc8, 0xeb, 0x29, 0xd3, 0xab, 0x22, 0x6f, 0xc2, 0x54, 0x33, 0xf1, 0xb3, 0x87, - 0x9d, 0x5c, 0x2e, 0x55, 0x2d, 0xad, 0x9c, 0x0f, 0x52, 0x96, 0x32, 0xef, 0x3e, 0x4b, 0x92, 0x2c, - 0x3e, 0xda, 0x93, 0x0f, 0x35, 0x68, 0x4b, 0x23, 0xd1, 0x64, 0xd2, 0x01, 0xb6, 0x1f, 0x0c, 0x98, - 0x47, 0xb6, 0x1d, 0x2a, 0x58, 0x22, 0xb4, 0x02, 0x45, 0xde, 0xc0, 0x0b, 0x4c, 0xb3, 0xc8, 0x93, - 0xea, 0x2f, 0x40, 0x49, 0xaa, 0xd7, 0xe0, 0x69, 0x24, 0x94, 0xf4, 0x80, 0x47, 0xdb, 0xd9, 0xc9, - 0x90, 0x82, 0x93, 0xcf, 0xac, 0xe0, 0x4f, 0x06, 0x5c, 0xd1, 0x53, 0x2a, 0x1d, 0x5d, 0x78, 0x31, - 0xc4, 0x2b, 0x49, 0x5a, 0xcb, 0x89, 0x7a, 0x6d, 0xb4, 0xa8, 0x3b, 0x41, 0x22, 0xdc, 0x8b, 0xe1, - 0x60, 0xec, 0xff, 0x4e, 0xe3, 0x3b, 0x50, 0x46, 0xf8, 0x3d, 0x1a, 0x06, 0x1e, 0x15, 0x3c, 0xde, - 0x09, 0xf6, 0x59, 0xa3, 0xd3, 0x08, 0xbb, 0xb5, 0x92, 0xcb, 0x30, 0xdd, 0xa6, 0x61, 0x8d, 0x7a, - 0x5e, 0x8c, 0x22, 0xcf, 0xb8, 0xcf, 0xb5, 0x69, 0xf8, 0xae, 0xe7, 0xc5, 0x16, 0x83, 0x85, 0x42, - 0x67, 0x55, 0xfc, 0x96, 0xf4, 0x0e, 0x83, 0x7d, 0xa6, 0xbe, 0xcb, 0x25, 0x6d, 0xcd, 0x9a, 0x10, - 0x59, 0x9a, 0xec, 0x3f, 0xeb, 0xae, 0x4a, 0xf3, 0x1e, 0x0b, 0x99, 0x8f, 0xd8, 0x3a, 0x48, 0x8f, - 0x0d, 0x42, 0x7a, 0x4c, 0x42, 0xfa, 0xb0, 0x58, 0xec, 0xad, 0x28, 0xb7, 0xa5, 0x7b, 0x8e, 0x72, - 0x59, 0x4b, 0xa9, 0x8b, 0x91, 0x25, 0x42, 0xcc, 0x2f, 0xe1, 0x95, 0xfe, 0x97, 0xb4, 0x47, 0xc3, - 0x4f, 0x98, 0xf8, 0x5f, 0x3f, 0xe5, 0xdf, 0x0c, 0x35, 0xce, 0x06, 0x00, 0x54, 0x85, 0x6f, 0x03, - 0xb4, 0xbb, 0x12, 0x77, 0xbb, 0xaf, 0x7c, 0xf2, 0x4b, 0xb8, 0x39, 0x0f, 0x72, 0x1d, 0x88, 0xe0, - 0x82, 0x86, 0xb5, 0x36, 0x17, 0x41, 0xe4, 0xd7, 0x5a, 0xfc, 0x33, 0x16, 0x23, 0xec, 0xa4, 0xfb, - 0x02, 0xde, 0xec, 0xe1, 0xc5, 0x6e, 0x76, 0x3e, 0xd4, 0x9e, 0x93, 0xcf, 0xdc, 0x9e, 0xd5, 0x5f, - 0x01, 0xce, 0x62, 0x4d, 0xe4, 0x2b, 0x03, 0xce, 0xc9, 0x35, 0x42, 0x96, 0x8a, 0xbe, 0x9a, 0xa1, - 0x9d, 0x65, 0x2e, 0x8f, 0x36, 0x94, 0x39, 0xad, 0xab, 0x5f, 0xff, 0xfe, 0xf7, 0xb7, 0x67, 0x5e, - 0x25, 0xf3, 0x4e, 0xf1, 0x0a, 0x25, 0xdf, 0x19, 0x30, 0xd3, 0x5b, 0x3b, 0x64, 0xa5, 0x38, 0xf8, - 0xf0, 0x46, 0x33, 0x57, 0xc7, 0xb2, 0x55, 0x2c, 0x15, 0x64, 0x59, 0x25, 0x6f, 0x38, 0x85, 0xcb, - 0x3a, 0x71, 0x1e, 0xf5, 0xfa, 0xe9, 0xad, 0x95, 0x43, 0xf2, 0xd8, 0x80, 0xf3, 0xf9, 0x45, 0x45, - 0xd6, 0x8a, 0x13, 0x6a, 0x96, 0x9d, 0x69, 0x8f, 0x6b, 0xae, 0x10, 0x57, 0x10, 0xf1, 0x35, 0x62, - 0x69, 0x11, 0x07, 0x56, 0x23, 0xf9, 0xbe, 0xab, 0x1a, 0x0e, 0xae, 0x51, 0xaa, 0xe5, 0xe6, 0xfb, - 0x48, 0xd5, 0xf2, 0x53, 0xd6, 0xba, 0x8d, 0x48, 0x9b, 0xa4, 0x3a, 0xb6, 0x6a, 0x4e, 0x53, 0x4e, - 0xd8, 0x84, 0xfc, 0x68, 0xc0, 0xc5, 0xa1, 0xe9, 0x4d, 0xd6, 0x8b, 0x93, 0xeb, 0xd7, 0x91, 0x59, - 0x39, 0x85, 0x87, 0x82, 0xde, 0x40, 0xe8, 0x35, 0xb2, 0x7a, 0x02, 0xf4, 0x6d, 0x39, 0xfb, 0xfb, - 0xb4, 0x3f, 0x1b, 0x40, 0xfe, 0x3d, 0x2e, 0xc9, 0x46, 0x71, 0xfa, 0xc2, 0xe1, 0x6e, 0x6e, 0x9e, - 0xce, 0x49, 0x61, 0xdf, 0x41, 0xec, 0x1b, 0x64, 0x43, 0x8b, 0xdd, 0x9b, 0x1a, 0x38, 0x4f, 0xd1, - 0xd3, 0x79, 0xd4, 0x5d, 0x21, 0x87, 0xe4, 0x17, 0x03, 0x66, 0x35, 0x73, 0x94, 0x9c, 0x80, 0x52, - 0x3c, 0xf8, 0xcd, 0x1b, 0xa7, 0xf4, 0x52, 0x15, 0xdc, 0xc5, 0x0a, 0x6e, 0x92, 0x4d, 0x6d, 0x05, - 0x5e, 0xcf, 0x33, 0x5f, 0x42, 0x77, 0xc1, 0x1c, 0x66, 0xfd, 0x52, 0xca, 0x0d, 0x59, 0x72, 0x7d, - 0x44, 0xa3, 0x0e, 0x2c, 0x03, 0x73, 0x6d, 0x4c, 0x6b, 0x85, 0xfa, 0x0e, 0xa2, 0xde, 0x22, 0x6f, - 0x8e, 0xdf, 0xd8, 0xfd, 0x17, 0x48, 0x98, 0xd8, 0xba, 0xf7, 0xe4, 0xa8, 0x6c, 0x3c, 0x3d, 0x2a, - 0x1b, 0x7f, 0x1d, 0x95, 0x8d, 0x6f, 0x8e, 0xcb, 0x13, 0x4f, 0x8f, 0xcb, 0x13, 0x7f, 0x1c, 0x97, - 0x27, 0x3e, 0x5d, 0xf7, 0x03, 0x71, 0x90, 0xd6, 0xed, 0x06, 0x6f, 0x76, 0x83, 0x37, 0x0e, 0x68, - 0x10, 0xf5, 0x32, 0x7d, 0xde, 0xcf, 0x25, 0x3a, 0x2d, 0x96, 0xd4, 0xcf, 0xe1, 0x4f, 0x84, 0x8d, - 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x99, 0xd6, 0x70, 0x53, 0x00, 0x0d, 0x00, 0x00, + 0x14, 0xc7, 0xb3, 0x89, 0x1b, 0x92, 0x97, 0x96, 0xc2, 0xa4, 0x40, 0xbb, 0x29, 0x4e, 0xb4, 0x85, + 0x26, 0x24, 0xcd, 0x6e, 0x9c, 0xa4, 0x45, 0xfd, 0x01, 0x88, 0x84, 0x1f, 0xa2, 0x4a, 0x51, 0xba, + 0x48, 0x39, 0x70, 0x31, 0x63, 0xef, 0x64, 0xb3, 0xd2, 0x7a, 0xc7, 0xdd, 0x1f, 0x06, 0xab, 0x04, + 0x21, 0xce, 0x1c, 0x90, 0x90, 0x40, 0xbd, 0x21, 0x71, 0xe4, 0x4f, 0x80, 0x03, 0xc7, 0x1e, 0x83, + 0xb8, 0x70, 0x42, 0x28, 0xe1, 0x0f, 0x41, 0xfb, 0x66, 0xd6, 0x5e, 0xbb, 0xb3, 0xb1, 0x13, 0x45, + 0xdc, 0xda, 0x99, 0xf7, 0xe3, 0xf3, 0xde, 0x9b, 0x7d, 0x5f, 0x07, 0x66, 0x6b, 0xb4, 0xd6, 0xf6, + 0x79, 0x60, 0xb1, 0x26, 0xaf, 0xef, 0x79, 0x81, 0x6b, 0xb5, 0x2a, 0xd6, 0xa3, 0x84, 0x85, 0x6d, + 0xb3, 0x19, 0xf2, 0x98, 0x93, 0x69, 0x69, 0x60, 0x66, 0x06, 0x66, 0xab, 0xa2, 0x5f, 0x72, 0xb9, + 0xcb, 0xf1, 0xde, 0x4a, 0xff, 0x25, 0x4c, 0xf5, 0xab, 0x2e, 0xe7, 0xae, 0xcf, 0x2c, 0xda, 0xf4, + 0x2c, 0x1a, 0x04, 0x3c, 0xa6, 0xb1, 0xc7, 0x83, 0x48, 0xde, 0x2e, 0xd6, 0x79, 0xd4, 0xe0, 0x91, + 0x55, 0xa3, 0x11, 0x13, 0x19, 0xac, 0x56, 0xa5, 0xc6, 0x62, 0x5a, 0xb1, 0x9a, 0xd4, 0xf5, 0x02, + 0x34, 0x96, 0xb6, 0x73, 0x2a, 0xaa, 0x26, 0x0d, 0x69, 0x23, 0x8b, 0x66, 0xa8, 0x2c, 0x3a, 0x88, + 0x68, 0x63, 0x5c, 0x02, 0xf2, 0x30, 0xcd, 0xb3, 0x8d, 0x8e, 0x36, 0x7b, 0x94, 0xb0, 0x28, 0x36, + 0xb6, 0x61, 0xba, 0xe7, 0x34, 0x6a, 0xf2, 0x20, 0x62, 0xe4, 0x36, 0x8c, 0x8b, 0x04, 0x97, 0xb5, + 0x39, 0x6d, 0x61, 0x6a, 0x75, 0xc6, 0x54, 0x14, 0x6e, 0x0a, 0xa7, 0x8d, 0xd2, 0xd3, 0xbf, 0x67, + 0x47, 0x6c, 0xe9, 0x60, 0xac, 0xc3, 0x4b, 0x18, 0xf1, 0xfd, 0xd4, 0xf0, 0xa3, 0x60, 0x97, 0xcb, + 0x54, 0x64, 0x06, 0x26, 0xd1, 0xb9, 0x1a, 0x24, 0x0d, 0x0c, 0x5b, 0xb2, 0x27, 0xf0, 0xe0, 0xe3, + 0xa4, 0x61, 0xdc, 0x87, 0x97, 0xfb, 0xbd, 0x24, 0xca, 0x0a, 0x9c, 0x43, 0x2b, 0x49, 0xa2, 0x2b, + 0x49, 0xd0, 0xcd, 0x16, 0x86, 0xc6, 0x67, 0xf9, 0x58, 0x51, 0x1e, 0xe1, 0x03, 0x80, 0x6e, 0x77, + 0x65, 0xc0, 0xeb, 0xa6, 0x18, 0x85, 0x99, 0x8e, 0xc2, 0x14, 0xc3, 0x96, 0xa3, 0x30, 0xb7, 0xa9, + 0xcb, 0xa4, 0xaf, 0x9d, 0xf3, 0x34, 0x7e, 0xd0, 0xe0, 0x95, 0x67, 0x52, 0x48, 0xde, 0x55, 0x18, + 0x47, 0x8c, 0xb4, 0x75, 0x63, 0x03, 0x80, 0xa5, 0x25, 0xf9, 0xb0, 0x87, 0x6b, 0x14, 0xb9, 0xe6, + 0x07, 0x72, 0x89, 0x84, 0x3d, 0x60, 0x3a, 0x5c, 0x46, 0xae, 0xcd, 0x24, 0x0c, 0x59, 0x10, 0x8b, + 0x2c, 0x72, 0xd4, 0x2e, 0x5c, 0x51, 0xdc, 0x49, 0xea, 0x6b, 0x70, 0xa1, 0x2e, 0xce, 0xab, 0xdd, + 0x6e, 0x97, 0xec, 0xf3, 0xf5, 0x9c, 0x31, 0x79, 0x1d, 0x9e, 0x17, 0x13, 0xac, 0xf1, 0x24, 0x70, + 0x68, 0xd8, 0x46, 0xd4, 0x92, 0x7d, 0x01, 0x4f, 0x37, 0xe4, 0xa1, 0xf1, 0x65, 0xfe, 0x05, 0x3c, + 0x88, 0xdc, 0x68, 0x98, 0x17, 0xd0, 0x37, 0x9b, 0xd1, 0x53, 0xcf, 0xe6, 0x89, 0x96, 0x1f, 0xbf, + 0x48, 0x2f, 0x8b, 0xbc, 0x05, 0xa5, 0x46, 0xe4, 0x66, 0x83, 0x31, 0x94, 0x83, 0x79, 0x98, 0xb0, + 0x84, 0x39, 0x0f, 0x58, 0x14, 0xa5, 0xf1, 0xd1, 0xfe, 0xec, 0xc6, 0xf3, 0xb3, 0x06, 0x33, 0xc8, + 0xb6, 0x45, 0x63, 0x16, 0xc5, 0xca, 0x06, 0x05, 0x4e, 0xcf, 0x04, 0x26, 0x58, 0xe0, 0x88, 0xee, + 0xcf, 0xc2, 0x94, 0xe8, 0x5e, 0x9d, 0x27, 0x41, 0x2c, 0x5b, 0x0f, 0x78, 0xb4, 0x99, 0x9e, 0xf4, + 0x75, 0x70, 0xec, 0xd4, 0x1d, 0xfc, 0x55, 0x83, 0xab, 0x6a, 0x4a, 0xd9, 0x47, 0x1b, 0x5e, 0xf4, + 0xf1, 0x4a, 0x90, 0x56, 0x73, 0x4d, 0xbd, 0x3e, 0xb8, 0xa9, 0x5b, 0x5e, 0x14, 0xdb, 0x17, 0xfd, + 0xde, 0xd8, 0x67, 0xd7, 0xe3, 0xbb, 0x50, 0x46, 0xf8, 0x1d, 0xea, 0x7b, 0x0e, 0x8d, 0x79, 0xb8, + 0xe5, 0xed, 0xb2, 0x7a, 0xbb, 0xee, 0x67, 0xb5, 0x92, 0x2b, 0x30, 0xd1, 0xa2, 0x7e, 0x95, 0x3a, + 0x4e, 0x88, 0x4d, 0x9e, 0xb4, 0x9f, 0x6b, 0x51, 0xff, 0x5d, 0xc7, 0x09, 0x0d, 0x06, 0xb3, 0x85, + 0xce, 0xb2, 0xf8, 0x0d, 0xe1, 0xed, 0x7b, 0xbb, 0x4c, 0x6e, 0x90, 0x79, 0x65, 0xcd, 0x8a, 0x10, + 0x69, 0x9a, 0xf4, 0x7f, 0xc6, 0x3d, 0x99, 0xe6, 0x3d, 0xe6, 0x33, 0x17, 0xb1, 0x55, 0x90, 0x0e, + 0xeb, 0x85, 0x74, 0x98, 0x80, 0x74, 0x61, 0xae, 0xd8, 0x5b, 0x52, 0x6e, 0x0a, 0xf7, 0x1c, 0xe5, + 0x82, 0x92, 0x52, 0x15, 0x23, 0x4d, 0x84, 0x98, 0x5f, 0xe5, 0xb7, 0xdc, 0x0e, 0xf5, 0x3f, 0x61, + 0xf1, 0xff, 0xfa, 0x29, 0xff, 0xa1, 0xc9, 0x75, 0xd6, 0x03, 0x20, 0x2b, 0x7c, 0x1b, 0xa0, 0x95, + 0xb5, 0x38, 0x7b, 0x7d, 0xe5, 0xe3, 0x27, 0x61, 0xe7, 0x3c, 0xc8, 0x0d, 0x20, 0x31, 0x8f, 0xa9, + 0x5f, 0x6d, 0xf1, 0xd8, 0x0b, 0xdc, 0x6a, 0x93, 0x7f, 0xce, 0x42, 0x84, 0x1d, 0xb3, 0x5f, 0xc0, + 0x9b, 0x1d, 0xbc, 0xd8, 0x4e, 0xcf, 0xfb, 0x9e, 0xe7, 0xd8, 0xa9, 0x9f, 0xe7, 0xea, 0xc1, 0x14, + 0x9c, 0xc3, 0x9a, 0xc8, 0xd7, 0x1a, 0x8c, 0x0b, 0x05, 0x25, 0xf3, 0x45, 0x5f, 0x4d, 0x9f, 0x5c, + 0xeb, 0x0b, 0x83, 0x0d, 0x45, 0x4e, 0xe3, 0xda, 0x37, 0x7f, 0xfe, 0xfb, 0xfd, 0xe8, 0xab, 0x64, + 0xc6, 0x2a, 0xfe, 0xf5, 0x40, 0x7e, 0xd4, 0x60, 0xb2, 0xa3, 0xb8, 0x64, 0xb1, 0x38, 0x78, 0xbf, + 0x98, 0xeb, 0x4b, 0x43, 0xd9, 0x4a, 0x96, 0x0a, 0xb2, 0x2c, 0x91, 0x37, 0xac, 0xc2, 0xdf, 0x29, + 0x91, 0xf5, 0xb8, 0xf3, 0x9e, 0xde, 0x5a, 0xdc, 0x27, 0xdf, 0x6a, 0x00, 0x5d, 0x71, 0x25, 0x83, + 0xd2, 0xe5, 0x55, 0x5e, 0xbf, 0x31, 0x9c, 0xf1, 0x50, 0x8d, 0x92, 0x02, 0xfd, 0x44, 0x83, 0xf3, + 0x79, 0xdd, 0x24, 0xcb, 0xc5, 0x39, 0x14, 0xda, 0xab, 0x9b, 0xc3, 0x9a, 0x4b, 0xa8, 0x45, 0x84, + 0x7a, 0x8d, 0x18, 0x4a, 0xa8, 0x1e, 0xa5, 0x26, 0x3f, 0x65, 0x43, 0xc4, 0x3d, 0x3a, 0x68, 0x88, + 0x39, 0xb9, 0x19, 0x38, 0xc4, 0xfc, 0xd2, 0x37, 0xee, 0x20, 0xd2, 0x3a, 0x59, 0x1d, 0x7a, 0x88, + 0x56, 0x43, 0x2c, 0xfc, 0x88, 0xfc, 0xa2, 0xc1, 0xc5, 0x3e, 0x31, 0x21, 0x2b, 0xc5, 0xc9, 0xd5, + 0xea, 0xa8, 0x57, 0x4e, 0xe0, 0x21, 0xa1, 0xd7, 0x10, 0x7a, 0x99, 0x2c, 0x1d, 0x03, 0x7d, 0x47, + 0x48, 0x51, 0x97, 0xf6, 0x37, 0x0d, 0xc8, 0xb3, 0xdb, 0x9b, 0xac, 0x15, 0xa7, 0x2f, 0xd4, 0x1a, + 0x7d, 0xfd, 0x64, 0x4e, 0x12, 0xfb, 0x2e, 0x62, 0xdf, 0x24, 0x6b, 0x4a, 0xec, 0xce, 0x12, 0xc3, + 0xf5, 0x8e, 0x9e, 0xd6, 0xe3, 0x4c, 0xd1, 0xf6, 0xc9, 0xef, 0x1a, 0x4c, 0x2b, 0xd6, 0x3a, 0x39, + 0x06, 0xa5, 0x58, 0x87, 0xf4, 0x9b, 0x27, 0xf4, 0x92, 0x15, 0xdc, 0xc3, 0x0a, 0x6e, 0x91, 0x75, + 0x65, 0x05, 0x4e, 0xc7, 0x33, 0x5f, 0x42, 0xa6, 0x77, 0xfb, 0xe9, 0x7b, 0x99, 0xca, 0xed, 0x7c, + 0x32, 0xe8, 0x8b, 0xee, 0xd1, 0x26, 0x7d, 0x79, 0x48, 0x6b, 0x89, 0xfa, 0x0e, 0xa2, 0xde, 0x26, + 0x6f, 0x0e, 0xff, 0xb0, 0xbb, 0x13, 0x88, 0x58, 0xbc, 0x71, 0xff, 0xe9, 0x61, 0x59, 0x3b, 0x38, + 0x2c, 0x6b, 0xff, 0x1c, 0x96, 0xb5, 0xef, 0x8e, 0xca, 0x23, 0x07, 0x47, 0xe5, 0x91, 0xbf, 0x8e, + 0xca, 0x23, 0x9f, 0xae, 0xb8, 0x5e, 0xbc, 0x97, 0xd4, 0xcc, 0x3a, 0x6f, 0x64, 0xc1, 0xeb, 0x7b, + 0xd4, 0x0b, 0x3a, 0x99, 0xbe, 0xe8, 0xe6, 0x8a, 0xdb, 0x4d, 0x16, 0xd5, 0xc6, 0xf1, 0x8f, 0xb5, + 0xb5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x76, 0x01, 0x0c, 0x73, 0x8a, 0x0e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -915,6 +1018,8 @@ type QueryClient interface { Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) // EpochInfo queries the information of a given epoch EpochInfo(ctx context.Context, in *QueryEpochInfoRequest, opts ...grpc.CallOption) (*QueryEpochInfoResponse, error) + // EpochsInfo range-queries the information of epochs + EpochsInfo(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) // CurrentEpoch queries the current epoch CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) // EpochMsgs queries the messages of a given epoch @@ -955,6 +1060,15 @@ func (c *queryClient) EpochInfo(ctx context.Context, in *QueryEpochInfoRequest, return out, nil } +func (c *queryClient) EpochsInfo(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) { + out := new(QueryEpochsInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.epoching.v1.Query/EpochsInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) { out := new(QueryCurrentEpochResponse) err := c.cc.Invoke(ctx, "/babylon.epoching.v1.Query/CurrentEpoch", in, out, opts...) @@ -1015,6 +1129,8 @@ type QueryServer interface { Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) // EpochInfo queries the information of a given epoch EpochInfo(context.Context, *QueryEpochInfoRequest) (*QueryEpochInfoResponse, error) + // EpochsInfo range-queries the information of epochs + EpochsInfo(context.Context, *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) // CurrentEpoch queries the current epoch CurrentEpoch(context.Context, *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) // EpochMsgs queries the messages of a given epoch @@ -1039,6 +1155,9 @@ func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsReq func (*UnimplementedQueryServer) EpochInfo(ctx context.Context, req *QueryEpochInfoRequest) (*QueryEpochInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EpochInfo not implemented") } +func (*UnimplementedQueryServer) EpochsInfo(ctx context.Context, req *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EpochsInfo not implemented") +} func (*UnimplementedQueryServer) CurrentEpoch(ctx context.Context, req *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CurrentEpoch not implemented") } @@ -1098,6 +1217,24 @@ func _Query_EpochInfo_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _Query_EpochsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEpochsInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EpochsInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.epoching.v1.Query/EpochsInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EpochsInfo(ctx, req.(*QueryEpochsInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Query_CurrentEpoch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryCurrentEpochRequest) if err := dec(in); err != nil { @@ -1218,6 +1355,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "EpochInfo", Handler: _Query_EpochInfo_Handler, }, + { + MethodName: "EpochsInfo", + Handler: _Query_EpochsInfo_Handler, + }, { MethodName: "CurrentEpoch", Handler: _Query_CurrentEpoch_Handler, @@ -1366,6 +1507,90 @@ func (m *QueryEpochInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *QueryEpochsInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochsInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryEpochsInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochsInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Epochs) > 0 { + for iNdEx := len(m.Epochs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Epochs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *QueryCurrentEpochRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1885,6 +2110,38 @@ func (m *QueryEpochInfoResponse) Size() (n int) { return n } +func (m *QueryEpochsInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEpochsInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Epochs) > 0 { + for _, e := range m.Epochs { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + func (m *QueryCurrentEpochRequest) Size() (n int) { if m == nil { return 0 @@ -2366,6 +2623,212 @@ func (m *QueryEpochInfoResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryEpochsInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochsInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEpochsInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochsInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epochs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epochs = append(m.Epochs, &Epoch{}) + if err := m.Epochs[len(m.Epochs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryCurrentEpochRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/epoching/types/query.pb.gw.go b/x/epoching/types/query.pb.gw.go index 82e30aa48..46850074f 100644 --- a/x/epoching/types/query.pb.gw.go +++ b/x/epoching/types/query.pb.gw.go @@ -105,6 +105,42 @@ func local_request_Query_EpochInfo_0(ctx context.Context, marshaler runtime.Mars } +var ( + filter_Query_EpochsInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_EpochsInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochsInfoRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_EpochsInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.EpochsInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_EpochsInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochsInfoRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_EpochsInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.EpochsInfo(ctx, &protoReq) + return msg, metadata, err + +} + func request_Query_CurrentEpoch_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryCurrentEpochRequest var metadata runtime.ServerMetadata @@ -463,6 +499,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_EpochsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_EpochsInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_CurrentEpoch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -682,6 +741,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_EpochsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_EpochsInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_CurrentEpoch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -810,6 +889,8 @@ var ( pattern_Query_EpochInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "epoching", "v1", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_EpochsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "epoching", "v1", "epochs"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_CurrentEpoch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "epoching", "v1", "current_epoch"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_EpochMsgs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"babylon", "epoching", "v1", "epochs", "epoch_num", "messages"}, "", runtime.AssumeColonVerbOpt(false))) @@ -828,6 +909,8 @@ var ( forward_Query_EpochInfo_0 = runtime.ForwardResponseMessage + forward_Query_EpochsInfo_0 = runtime.ForwardResponseMessage + forward_Query_CurrentEpoch_0 = runtime.ForwardResponseMessage forward_Query_EpochMsgs_0 = runtime.ForwardResponseMessage From 820da8d2e38c51d143c43556df9645498bb37dac Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Wed, 25 Jan 2023 21:03:56 +1100 Subject: [PATCH 29/37] btccheckpoint API: range query for BTC checkpoints (#291) --- client/docs/swagger-ui/swagger.yaml | 262 +++++++--- proto/babylon/btccheckpoint/query.proto | 19 + x/btccheckpoint/keeper/grpc_query.go | 93 +++- x/btccheckpoint/types/query.pb.go | 654 ++++++++++++++++++++++-- x/btccheckpoint/types/query.pb.gw.go | 83 +++ x/epoching/types/query.pb.go | 8 +- 6 files changed, 982 insertions(+), 137 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index e2093965d..18a57df5a 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -4,6 +4,141 @@ info: description: A REST interface for state queries version: 1.0.0 paths: + /babylon/btccheckpoint/v1: + get: + summary: >- + BtcCheckpointsHeightAndHash returns earliest block height and hash for a + range of epochs + operationId: BtcCheckpointsHeightAndHash + responses: + '200': + description: A successful response. + schema: + type: object + properties: + earliest_btc_block_numbers: + type: array + items: + type: string + format: uint64 + earliest_btc_block_hashes: + type: array + items: + type: string + format: byte + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryBtcCheckpointsHeightAndHashResponse is response type for the + Query/BtcCheckpointsHeightAndHash RPC method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query /babylon/btccheckpoint/v1/params: get: summary: Parameters queries the parameters of the module. @@ -1244,7 +1379,14 @@ paths: - Query /babylon/epoching/v1/epochs: get: - summary: EpochsInfo range-queries the information of epochs + summary: >- + EpochsInfo queries the metadata of epochs in a given range, depending on + the + + parameters in the pagination request. Th main use case will be querying + the + + latest epochs in time order. operationId: EpochsInfo responses: '200': @@ -4595,81 +4737,6 @@ paths: - CKPT_STATUS_FINALIZED tags: - Query - /babylon/checkpointing/v1/last_raw_checkpoint/{status}: - get: - summary: >- - LastCheckpointWithStatus queries the last checkpoint with a given status - or a more matured status - operationId: LastCheckpointWithStatus - responses: - '200': - description: A successful response. - schema: - type: object - properties: - raw_checkpoint: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: >- - epoch_num defines the epoch number the raw checkpoint is - for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of - the BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated - from individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - parameters: - - name: status - in: path - required: true - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - tags: - - Query /babylon/checkpointing/v1/params: get: summary: Parameters queries the parameters of the module. @@ -9716,6 +9783,49 @@ definitions: title: >- QueryBtcCheckpointHeightAndHashResponse is response type for the Query/BtcCheckpointHeightAndHash RPC method + babylon.btccheckpoint.v1.QueryBtcCheckpointsHeightAndHashResponse: + type: object + properties: + earliest_btc_block_numbers: + type: array + items: + type: string + format: uint64 + earliest_btc_block_hashes: + type: array + items: + type: string + format: byte + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryBtcCheckpointsHeightAndHashResponse is response type for the + Query/BtcCheckpointsHeightAndHash RPC method babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse: type: object properties: diff --git a/proto/babylon/btccheckpoint/query.proto b/proto/babylon/btccheckpoint/query.proto index 731aaa42d..f4ac070dd 100644 --- a/proto/babylon/btccheckpoint/query.proto +++ b/proto/babylon/btccheckpoint/query.proto @@ -21,6 +21,11 @@ service Query { option (google.api.http).get = "/babylon/btccheckpoint/v1/{epoch_num}"; } + // BtcCheckpointsHeightAndHash returns earliest block height and hash for a range of epochs + rpc BtcCheckpointsHeightAndHash(QueryBtcCheckpointsHeightAndHashRequest) returns (QueryBtcCheckpointsHeightAndHashResponse) { + option (google.api.http).get = "/babylon/btccheckpoint/v1"; + } + rpc EpochSubmissions(QueryEpochSubmissionsRequest) returns (QueryEpochSubmissionsResponse) { option (google.api.http).get = "/babylon/btccheckpoint/v1/{epoch_num}/submissions"; } @@ -47,6 +52,20 @@ message QueryBtcCheckpointHeightAndHashResponse { bytes earliest_btc_block_hash = 2; } +message QueryBtcCheckpointsHeightAndHashRequest { + // pagination defines whether to have the pagination in the response + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryBtcCheckpointsHeightAndHashResponse is response type for the Query/BtcCheckpointsHeightAndHash RPC method +message QueryBtcCheckpointsHeightAndHashResponse { + repeated uint64 earliest_btc_block_numbers = 1; + repeated bytes earliest_btc_block_hashes = 2; + + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 3; +} + message QueryEpochSubmissionsRequest { // Number of epoch for which submissions are requested uint64 epoch_num = 1; diff --git a/x/btccheckpoint/keeper/grpc_query.go b/x/btccheckpoint/keeper/grpc_query.go index 5e08324f0..c6ae6fd56 100644 --- a/x/btccheckpoint/keeper/grpc_query.go +++ b/x/btccheckpoint/keeper/grpc_query.go @@ -3,9 +3,11 @@ package keeper import ( "context" "errors" + "fmt" "math" "github.com/babylonchain/babylon/x/btccheckpoint/types" + "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/query" "google.golang.org/grpc/codes" @@ -15,7 +17,7 @@ import ( var _ types.QueryServer = Keeper{} func (k Keeper) lowestBtcHeightAndHash(ctx sdk.Context, subKey *types.SubmissionKey) (uint64, []byte, error) { - // initializing to max, as then every header number will be smaller + // initializing to max, as then every header height will be smaller var lowestHeaderNumber uint64 = math.MaxUint64 var lowestHeaderHash []byte @@ -43,6 +45,35 @@ func (k Keeper) lowestBtcHeightAndHash(ctx sdk.Context, subKey *types.Submission return lowestHeaderNumber, lowestHeaderHash, nil } +func (k Keeper) lowestBtcHeightAndHashInKeys(ctx sdk.Context, subKeys []*types.SubmissionKey) (uint64, []byte, error) { + if len(subKeys) == 0 { + return 0, nil, errors.New("empty subKeys") + } + + // initializing to max, as then every header height will be smaller + var lowestHeaderNumber uint64 = math.MaxUint64 + var lowestHeaderHash []byte + + for _, subKey := range subKeys { + headerNumber, headerHash, err := k.lowestBtcHeightAndHash(ctx, subKey) + if err != nil { + // submission is not valid for some reason, ignore it + continue + } + + if headerNumber < lowestHeaderNumber { + lowestHeaderNumber = headerNumber + lowestHeaderHash = headerHash + } + } + + if lowestHeaderNumber == math.MaxUint64 { + return 0, nil, errors.New("there is no valid submission for given raw checkpoint") + } + + return lowestHeaderNumber, lowestHeaderHash, nil +} + func (k Keeper) BtcCheckpointHeightAndHash(c context.Context, req *types.QueryBtcCheckpointHeightAndHashRequest) (*types.QueryBtcCheckpointHeightAndHashResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") @@ -59,32 +90,58 @@ func (k Keeper) BtcCheckpointHeightAndHash(c context.Context, req *types.QueryBt return nil, errors.New("checkpoint for given epoch not yet submitted") } - var lowestHeaderNumber uint64 = math.MaxUint64 - var lowestHeaderHash []byte + lowestHeaderNumber, lowestHeaderHash, err := k.lowestBtcHeightAndHashInKeys(ctx, epochData.Key) + if err != nil { + return nil, fmt.Errorf("failed to get lowest BTC height and hash in keys of epoch %d: %w", req.EpochNum, err) + } - // we need to go for each submission in given epoch - for _, submissionKey := range epochData.Key { + resp := &types.QueryBtcCheckpointHeightAndHashResponse{ + EarliestBtcBlockNumber: lowestHeaderNumber, + EarliestBtcBlockHash: lowestHeaderHash, + } + return resp, nil +} - headerNumber, headerHash, err := k.lowestBtcHeightAndHash(ctx, submissionKey) +func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryBtcCheckpointsHeightAndHashRequest) (*types.QueryBtcCheckpointsHeightAndHashResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } - if err != nil { - // submission is not valid for some reason, ignore it - continue + ctx := sdk.UnwrapSDKContext(c) + + store := ctx.KVStore(k.storeKey) + epochDataStore := prefix.NewStore(store, types.EpochDataPrefix) + + btcNumbers := []uint64{} + btcHashes := [][]byte{} + // iterate over epochDataStore, where key is the epoch number and value is the epoch data + pageRes, err := query.Paginate(epochDataStore, req.Pagination, func(key, value []byte) error { + epochNum := sdk.BigEndianToUint64(key) + var epochData types.EpochData + k.cdc.MustUnmarshal(value, &epochData) + + // Check if we have any submission for given epoch + if len(epochData.Key) == 0 { + return errors.New("checkpoint for given epoch not yet submitted") } - if headerNumber < lowestHeaderNumber { - lowestHeaderNumber = headerNumber - lowestHeaderHash = headerHash + lowestHeaderNumber, lowestHeaderHash, err := k.lowestBtcHeightAndHashInKeys(ctx, epochData.Key) + if err != nil { + return fmt.Errorf("failed to get lowest BTC height and hash in keys of epoch %d: %w", epochNum, err) } - } + btcNumbers = append(btcNumbers, lowestHeaderNumber) + btcHashes = append(btcHashes, lowestHeaderHash) - if lowestHeaderNumber == math.MaxUint64 { - return nil, errors.New("there is no valid submission for given raw checkpoint") + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - resp := &types.QueryBtcCheckpointHeightAndHashResponse{ - EarliestBtcBlockNumber: lowestHeaderNumber, - EarliestBtcBlockHash: lowestHeaderHash, + resp := &types.QueryBtcCheckpointsHeightAndHashResponse{ + EarliestBtcBlockNumbers: btcNumbers, + EarliestBtcBlockHashes: btcHashes, + Pagination: pageRes, } return resp, nil } diff --git a/x/btccheckpoint/types/query.pb.go b/x/btccheckpoint/types/query.pb.go index 4fd32f6e3..cc88e9eb8 100644 --- a/x/btccheckpoint/types/query.pb.go +++ b/x/btccheckpoint/types/query.pb.go @@ -216,6 +216,117 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) GetEarliestBtcBlockHash() []by return nil } +type QueryBtcCheckpointsHeightAndHashRequest struct { + // pagination defines whether to have the pagination in the response + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBtcCheckpointsHeightAndHashRequest) Reset() { + *m = QueryBtcCheckpointsHeightAndHashRequest{} +} +func (m *QueryBtcCheckpointsHeightAndHashRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointsHeightAndHashRequest) ProtoMessage() {} +func (*QueryBtcCheckpointsHeightAndHashRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_009c1165ec392ace, []int{4} +} +func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest.Merge(m, src) +} +func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest proto.InternalMessageInfo + +func (m *QueryBtcCheckpointsHeightAndHashRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryBtcCheckpointsHeightAndHashResponse is response type for the Query/BtcCheckpointsHeightAndHash RPC method +type QueryBtcCheckpointsHeightAndHashResponse struct { + EarliestBtcBlockNumbers []uint64 `protobuf:"varint,1,rep,packed,name=earliest_btc_block_numbers,json=earliestBtcBlockNumbers,proto3" json:"earliest_btc_block_numbers,omitempty"` + EarliestBtcBlockHashes [][]byte `protobuf:"bytes,2,rep,name=earliest_btc_block_hashes,json=earliestBtcBlockHashes,proto3" json:"earliest_btc_block_hashes,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBtcCheckpointsHeightAndHashResponse) Reset() { + *m = QueryBtcCheckpointsHeightAndHashResponse{} +} +func (m *QueryBtcCheckpointsHeightAndHashResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointsHeightAndHashResponse) ProtoMessage() {} +func (*QueryBtcCheckpointsHeightAndHashResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_009c1165ec392ace, []int{5} +} +func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse.Merge(m, src) +} +func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse proto.InternalMessageInfo + +func (m *QueryBtcCheckpointsHeightAndHashResponse) GetEarliestBtcBlockNumbers() []uint64 { + if m != nil { + return m.EarliestBtcBlockNumbers + } + return nil +} + +func (m *QueryBtcCheckpointsHeightAndHashResponse) GetEarliestBtcBlockHashes() [][]byte { + if m != nil { + return m.EarliestBtcBlockHashes + } + return nil +} + +func (m *QueryBtcCheckpointsHeightAndHashResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + type QueryEpochSubmissionsRequest struct { // Number of epoch for which submissions are requested EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` @@ -226,7 +337,7 @@ func (m *QueryEpochSubmissionsRequest) Reset() { *m = QueryEpochSubmissi func (m *QueryEpochSubmissionsRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochSubmissionsRequest) ProtoMessage() {} func (*QueryEpochSubmissionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_009c1165ec392ace, []int{4} + return fileDescriptor_009c1165ec392ace, []int{6} } func (m *QueryEpochSubmissionsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -279,7 +390,7 @@ func (m *QueryEpochSubmissionsResponse) Reset() { *m = QueryEpochSubmiss func (m *QueryEpochSubmissionsResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochSubmissionsResponse) ProtoMessage() {} func (*QueryEpochSubmissionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_009c1165ec392ace, []int{5} + return fileDescriptor_009c1165ec392ace, []int{7} } func (m *QueryEpochSubmissionsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,6 +438,8 @@ func init() { proto.RegisterType((*QueryParamsResponse)(nil), "babylon.btccheckpoint.v1.QueryParamsResponse") proto.RegisterType((*QueryBtcCheckpointHeightAndHashRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashRequest") proto.RegisterType((*QueryBtcCheckpointHeightAndHashResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashResponse") + proto.RegisterType((*QueryBtcCheckpointsHeightAndHashRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsHeightAndHashRequest") + proto.RegisterType((*QueryBtcCheckpointsHeightAndHashResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsHeightAndHashResponse") proto.RegisterType((*QueryEpochSubmissionsRequest)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsRequest") proto.RegisterType((*QueryEpochSubmissionsResponse)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse") } @@ -334,45 +447,50 @@ func init() { func init() { proto.RegisterFile("babylon/btccheckpoint/query.proto", fileDescriptor_009c1165ec392ace) } var fileDescriptor_009c1165ec392ace = []byte{ - // 595 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcf, 0x8a, 0xd3, 0x40, - 0x18, 0xef, 0xac, 0xb5, 0xe8, 0xd4, 0x83, 0x8c, 0x45, 0x6b, 0x5c, 0x63, 0x0d, 0xb8, 0xad, 0xe2, - 0x26, 0xb4, 0xcb, 0x2a, 0x8b, 0x20, 0x6e, 0x65, 0x75, 0x41, 0x58, 0xd7, 0x88, 0x17, 0x2f, 0x65, - 0x32, 0x0e, 0x49, 0x68, 0x93, 0xc9, 0x76, 0x26, 0xc5, 0x22, 0x5e, 0xf4, 0x01, 0x14, 0x3c, 0x7a, - 0xf6, 0xe8, 0x2b, 0x78, 0xde, 0x63, 0xc1, 0x8b, 0x27, 0x91, 0xd6, 0x07, 0x91, 0x4c, 0xa6, 0xed, - 0xb6, 0x36, 0xb4, 0xe2, 0xad, 0x64, 0x7e, 0xff, 0xbe, 0xf9, 0x7e, 0x53, 0x78, 0xdd, 0xc1, 0x4e, - 0xbf, 0xc3, 0x42, 0xcb, 0x11, 0x84, 0x78, 0x94, 0xb4, 0x23, 0xe6, 0x87, 0xc2, 0x3a, 0x8a, 0x69, - 0xb7, 0x6f, 0x46, 0x5d, 0x26, 0x18, 0x2a, 0x2b, 0x88, 0x39, 0x03, 0x31, 0x7b, 0x75, 0xad, 0xe4, - 0x32, 0x97, 0x49, 0x90, 0x95, 0xfc, 0x4a, 0xf1, 0xda, 0xba, 0xcb, 0x98, 0xdb, 0xa1, 0x16, 0x8e, - 0x7c, 0x0b, 0x87, 0x21, 0x13, 0x58, 0xf8, 0x2c, 0xe4, 0xea, 0xf4, 0x16, 0x61, 0x3c, 0x60, 0xdc, - 0x72, 0x30, 0xa7, 0xa9, 0x8d, 0xd5, 0xab, 0x3b, 0x54, 0xe0, 0xba, 0x15, 0x61, 0xd7, 0x0f, 0x25, - 0x58, 0x61, 0x8d, 0xc5, 0xe1, 0x22, 0xdc, 0xc5, 0xc1, 0x58, 0xef, 0xe6, 0x62, 0xcc, 0x6c, 0x56, - 0x09, 0x35, 0x4a, 0x10, 0x3d, 0x4b, 0x0c, 0x0f, 0x25, 0xdf, 0xa6, 0x47, 0x31, 0xe5, 0xc2, 0x78, - 0x01, 0x2f, 0xcc, 0x7c, 0xe5, 0x11, 0x0b, 0x39, 0x45, 0xf7, 0x61, 0x21, 0xf5, 0x29, 0x83, 0x0a, - 0xa8, 0x15, 0x1b, 0x15, 0x33, 0xeb, 0x1a, 0xcc, 0x94, 0xd9, 0xcc, 0x1f, 0xff, 0xbc, 0x96, 0xb3, - 0x15, 0xcb, 0xd8, 0x83, 0x1b, 0x52, 0xb6, 0x29, 0xc8, 0xc3, 0x09, 0x7a, 0x9f, 0xfa, 0xae, 0x27, - 0x76, 0xc3, 0x57, 0xfb, 0x98, 0x7b, 0x2a, 0x00, 0xba, 0x02, 0xcf, 0xd2, 0x88, 0x11, 0xaf, 0x15, - 0xc6, 0x81, 0x34, 0xcb, 0xdb, 0x67, 0xe4, 0x87, 0x83, 0x38, 0x30, 0x3e, 0x03, 0x58, 0x5d, 0xaa, - 0xa3, 0x22, 0xef, 0xc0, 0xcb, 0x14, 0x77, 0x3b, 0x3e, 0xe5, 0xa2, 0xe5, 0x08, 0xd2, 0x72, 0x3a, - 0x8c, 0xb4, 0x13, 0x55, 0x87, 0x76, 0x95, 0xf0, 0xc5, 0x31, 0xa0, 0x29, 0x48, 0x33, 0x39, 0x3e, - 0x90, 0xa7, 0x68, 0x1b, 0x5e, 0x5a, 0x40, 0xf5, 0x30, 0xf7, 0xca, 0x6b, 0x15, 0x50, 0x3b, 0x67, - 0x97, 0xe6, 0x89, 0x89, 0xb3, 0xf1, 0x1e, 0xc0, 0x75, 0x99, 0x6e, 0x2f, 0xc9, 0xfb, 0x3c, 0x76, - 0x02, 0x9f, 0xf3, 0x64, 0xd9, 0xab, 0xcc, 0x86, 0x1e, 0x41, 0x38, 0x5d, 0xb9, 0xf4, 0x29, 0x36, - 0x36, 0xcc, 0xb4, 0x1f, 0x66, 0xd2, 0x0f, 0x33, 0xad, 0xa1, 0xea, 0x87, 0x79, 0x88, 0x5d, 0xaa, - 0x84, 0xed, 0x13, 0x4c, 0xe3, 0x0b, 0x80, 0x57, 0x33, 0x52, 0xa8, 0x9b, 0xb9, 0x07, 0xf3, 0x6d, - 0xda, 0x4f, 0x56, 0x79, 0xaa, 0x56, 0x6c, 0x54, 0xb3, 0x57, 0x39, 0x25, 0x3f, 0xa1, 0x7d, 0x5b, - 0x92, 0xd0, 0xe3, 0x05, 0x31, 0xab, 0x4b, 0x63, 0xa6, 0xce, 0x27, 0x73, 0x36, 0xbe, 0xe6, 0xe1, - 0x69, 0x99, 0x13, 0x7d, 0x00, 0xb0, 0x90, 0xb6, 0x06, 0xdd, 0xce, 0x0e, 0xf3, 0x77, 0x59, 0xb5, - 0xcd, 0x15, 0xd1, 0xa9, 0xbb, 0x51, 0x7b, 0xf7, 0xfd, 0xf7, 0xa7, 0x35, 0x03, 0x55, 0xac, 0xc5, - 0xaf, 0xa4, 0x57, 0x57, 0x8f, 0x09, 0x0d, 0x00, 0xd4, 0xb2, 0x2b, 0x86, 0x1e, 0x2c, 0xf1, 0x5d, - 0xda, 0x72, 0x6d, 0xf7, 0x3f, 0x14, 0xd4, 0x34, 0x9b, 0x72, 0x9a, 0x2a, 0xba, 0x91, 0x3d, 0xcd, - 0x9b, 0x49, 0xdb, 0xde, 0xa2, 0x6f, 0x00, 0x9e, 0x9f, 0x6f, 0x04, 0xba, 0xb3, 0x24, 0x46, 0x46, - 0x91, 0xb5, 0xbb, 0xff, 0xcc, 0x53, 0xa1, 0x77, 0x64, 0xe8, 0x2d, 0x54, 0x5f, 0x29, 0xb4, 0xc5, - 0xa7, 0x12, 0xcd, 0xa7, 0xc7, 0x43, 0x1d, 0x0c, 0x86, 0x3a, 0xf8, 0x35, 0xd4, 0xc1, 0xc7, 0x91, - 0x9e, 0x1b, 0x8c, 0xf4, 0xdc, 0x8f, 0x91, 0x9e, 0x7b, 0xb9, 0xed, 0xfa, 0xc2, 0x8b, 0x1d, 0x93, - 0xb0, 0x60, 0x2c, 0x4b, 0x3c, 0xec, 0x87, 0x13, 0x8f, 0xd7, 0x73, 0x2e, 0xa2, 0x1f, 0x51, 0xee, - 0x14, 0xe4, 0xff, 0xe0, 0xd6, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xac, 0x9c, 0xf6, 0x14, 0xf5, - 0x05, 0x00, 0x00, + // 683 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x4f, 0xd4, 0x4c, + 0x18, 0xdf, 0x59, 0x16, 0xf2, 0xbe, 0x23, 0x07, 0x33, 0x12, 0x59, 0x0a, 0xd6, 0xa5, 0x89, 0x50, + 0x8d, 0xb4, 0x59, 0x08, 0x1a, 0x62, 0x62, 0xa4, 0x06, 0x25, 0x31, 0x41, 0xac, 0xf1, 0xe2, 0x85, + 0x4c, 0xeb, 0xa4, 0x6d, 0xd8, 0x76, 0xca, 0xce, 0x94, 0xb8, 0x31, 0x5e, 0xf4, 0x03, 0x68, 0xe2, + 0xd1, 0xb3, 0x5f, 0xc3, 0xab, 0x1c, 0x49, 0xbc, 0x78, 0x32, 0x06, 0xbc, 0xf9, 0x01, 0xbc, 0x9a, + 0x4e, 0x67, 0x77, 0xdd, 0xa5, 0xb5, 0x0b, 0xdc, 0x36, 0x9d, 0xdf, 0xf3, 0xfc, 0xfe, 0xcc, 0xf3, + 0xcc, 0xc2, 0x79, 0x07, 0x3b, 0x9d, 0x16, 0x8d, 0x4c, 0x87, 0xbb, 0xae, 0x4f, 0xdc, 0xdd, 0x98, + 0x06, 0x11, 0x37, 0xf7, 0x12, 0xd2, 0xee, 0x18, 0x71, 0x9b, 0x72, 0x8a, 0xea, 0x12, 0x62, 0x0c, + 0x40, 0x8c, 0xfd, 0xa6, 0x32, 0xe5, 0x51, 0x8f, 0x0a, 0x90, 0x99, 0xfe, 0xca, 0xf0, 0xca, 0x9c, + 0x47, 0xa9, 0xd7, 0x22, 0x26, 0x8e, 0x03, 0x13, 0x47, 0x11, 0xe5, 0x98, 0x07, 0x34, 0x62, 0xf2, + 0xf4, 0x86, 0x4b, 0x59, 0x48, 0x99, 0xe9, 0x60, 0x46, 0x32, 0x1a, 0x73, 0xbf, 0xe9, 0x10, 0x8e, + 0x9b, 0x66, 0x8c, 0xbd, 0x20, 0x12, 0x60, 0x89, 0xd5, 0xf2, 0xc5, 0xc5, 0xb8, 0x8d, 0xc3, 0x6e, + 0xbf, 0xeb, 0xf9, 0x98, 0x41, 0xad, 0x02, 0xaa, 0x4d, 0x41, 0xf4, 0x24, 0x25, 0xdc, 0x16, 0xf5, + 0x36, 0xd9, 0x4b, 0x08, 0xe3, 0xda, 0x33, 0x78, 0x69, 0xe0, 0x2b, 0x8b, 0x69, 0xc4, 0x08, 0xba, + 0x0b, 0x27, 0x32, 0x9e, 0x3a, 0x68, 0x00, 0xfd, 0xc2, 0x72, 0xc3, 0x28, 0x8a, 0xc1, 0xc8, 0x2a, + 0xad, 0xda, 0xc1, 0xf7, 0xab, 0x15, 0x5b, 0x56, 0x69, 0x1b, 0x70, 0x41, 0xb4, 0xb5, 0xb8, 0x7b, + 0xbf, 0x87, 0xde, 0x24, 0x81, 0xe7, 0xf3, 0xf5, 0xe8, 0xc5, 0x26, 0x66, 0xbe, 0x14, 0x80, 0x66, + 0xe1, 0xff, 0x24, 0xa6, 0xae, 0xbf, 0x13, 0x25, 0xa1, 0x20, 0xab, 0xd9, 0xff, 0x89, 0x0f, 0x5b, + 0x49, 0xa8, 0x7d, 0x04, 0x70, 0xb1, 0xb4, 0x8f, 0x94, 0xbc, 0x06, 0x67, 0x08, 0x6e, 0xb7, 0x02, + 0xc2, 0xf8, 0x8e, 0xc3, 0xdd, 0x1d, 0xa7, 0x45, 0xdd, 0xdd, 0xb4, 0xab, 0x43, 0xda, 0xb2, 0xf1, + 0xe5, 0x2e, 0xc0, 0xe2, 0xae, 0x95, 0x1e, 0x6f, 0x89, 0x53, 0xb4, 0x0a, 0xa7, 0x73, 0x4a, 0x7d, + 0xcc, 0xfc, 0x7a, 0xb5, 0x01, 0xf4, 0x49, 0x7b, 0x6a, 0xb8, 0x30, 0x65, 0xd6, 0xf6, 0xf2, 0xc4, + 0xb1, 0x5c, 0x97, 0x0f, 0x20, 0xec, 0xdf, 0xaf, 0xcc, 0x74, 0xc1, 0xc8, 0x86, 0xc1, 0x48, 0x87, + 0xc1, 0xc8, 0x66, 0x4e, 0x0e, 0x83, 0xb1, 0x8d, 0x3d, 0x22, 0x6b, 0xed, 0xbf, 0x2a, 0xb5, 0x5f, + 0x00, 0xea, 0xe5, 0x9c, 0x32, 0x91, 0x3b, 0x50, 0x29, 0x4c, 0x24, 0xbd, 0xd8, 0x31, 0xbd, 0x66, + 0x4f, 0xe7, 0x47, 0xc2, 0x0a, 0xe2, 0x4c, 0x33, 0x21, 0xac, 0x5e, 0x6d, 0x8c, 0xe9, 0x93, 0x27, + 0xe3, 0xdc, 0x14, 0xa7, 0xe8, 0xe1, 0x80, 0xd9, 0x31, 0x61, 0x76, 0xb1, 0xd4, 0x6c, 0x26, 0x7a, + 0xc0, 0xed, 0x5b, 0x00, 0xe7, 0x84, 0xdb, 0x8d, 0x74, 0x20, 0x9e, 0x26, 0x4e, 0x18, 0x30, 0x96, + 0x6e, 0xd3, 0x28, 0xc3, 0x33, 0x94, 0x79, 0xf5, 0xcc, 0x99, 0x7f, 0x02, 0xf0, 0x4a, 0x81, 0x8a, + 0x5e, 0xd0, 0xb5, 0x5d, 0xd2, 0xc9, 0x22, 0x4d, 0xad, 0x16, 0xee, 0x4a, 0xbf, 0xf8, 0x11, 0xe9, + 0xd8, 0xa2, 0x68, 0x28, 0xad, 0xea, 0x99, 0xd3, 0x5a, 0xfe, 0x3d, 0x0e, 0xc7, 0x85, 0x4e, 0xf4, + 0x0e, 0xc0, 0x89, 0x6c, 0x2d, 0xd1, 0xcd, 0x62, 0x31, 0x27, 0x5f, 0x03, 0x65, 0x69, 0x44, 0x74, + 0xc6, 0xae, 0xe9, 0x6f, 0xbe, 0xfe, 0xfc, 0x50, 0xd5, 0x50, 0xc3, 0xcc, 0x7f, 0x86, 0xf6, 0x9b, + 0xf2, 0xb5, 0x42, 0x87, 0x00, 0x2a, 0xc5, 0x3b, 0x8c, 0xee, 0x95, 0xf0, 0x96, 0x3e, 0x23, 0xca, + 0xfa, 0x39, 0x3a, 0x48, 0x37, 0x4b, 0xc2, 0xcd, 0x22, 0xba, 0x56, 0xec, 0xe6, 0x55, 0x6f, 0xda, + 0x5e, 0xa3, 0x2f, 0x00, 0xce, 0xfe, 0x63, 0x0b, 0xd1, 0xa9, 0x14, 0xe5, 0xbe, 0x1a, 0x8a, 0x75, + 0x9e, 0x16, 0xd2, 0xd5, 0xbc, 0x70, 0x35, 0x8b, 0x66, 0x0a, 0x5d, 0xa1, 0xcf, 0x00, 0x5e, 0x1c, + 0x9e, 0x6d, 0x74, 0xab, 0x84, 0xbb, 0x60, 0x25, 0x95, 0xdb, 0xa7, 0xae, 0x93, 0x42, 0xd7, 0x84, + 0xd0, 0x15, 0xd4, 0x1c, 0x29, 0x7e, 0x93, 0xf5, 0x5b, 0x58, 0x8f, 0x0f, 0x8e, 0x54, 0x70, 0x78, + 0xa4, 0x82, 0x1f, 0x47, 0x2a, 0x78, 0x7f, 0xac, 0x56, 0x0e, 0x8f, 0xd5, 0xca, 0xb7, 0x63, 0xb5, + 0xf2, 0x7c, 0xd5, 0x0b, 0xb8, 0x9f, 0x38, 0x86, 0x4b, 0xc3, 0x6e, 0x5b, 0xd7, 0xc7, 0x41, 0xd4, + 0xe3, 0x78, 0x39, 0xc4, 0xc2, 0x3b, 0x31, 0x61, 0xce, 0x84, 0xf8, 0xcb, 0x5c, 0xf9, 0x13, 0x00, + 0x00, 0xff, 0xff, 0xf3, 0x16, 0xad, 0x29, 0x20, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -391,6 +509,8 @@ type QueryClient interface { Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint BtcCheckpointHeightAndHash(ctx context.Context, in *QueryBtcCheckpointHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightAndHashResponse, error) + // BtcCheckpointsHeightAndHash returns earliest block height and hash for a range of epochs + BtcCheckpointsHeightAndHash(ctx context.Context, in *QueryBtcCheckpointsHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsHeightAndHashResponse, error) EpochSubmissions(ctx context.Context, in *QueryEpochSubmissionsRequest, opts ...grpc.CallOption) (*QueryEpochSubmissionsResponse, error) } @@ -420,6 +540,15 @@ func (c *queryClient) BtcCheckpointHeightAndHash(ctx context.Context, in *QueryB return out, nil } +func (c *queryClient) BtcCheckpointsHeightAndHash(ctx context.Context, in *QueryBtcCheckpointsHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsHeightAndHashResponse, error) { + out := new(QueryBtcCheckpointsHeightAndHashResponse) + err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointsHeightAndHash", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) EpochSubmissions(ctx context.Context, in *QueryEpochSubmissionsRequest, opts ...grpc.CallOption) (*QueryEpochSubmissionsResponse, error) { out := new(QueryEpochSubmissionsResponse) err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/EpochSubmissions", in, out, opts...) @@ -435,6 +564,8 @@ type QueryServer interface { Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint BtcCheckpointHeightAndHash(context.Context, *QueryBtcCheckpointHeightAndHashRequest) (*QueryBtcCheckpointHeightAndHashResponse, error) + // BtcCheckpointsHeightAndHash returns earliest block height and hash for a range of epochs + BtcCheckpointsHeightAndHash(context.Context, *QueryBtcCheckpointsHeightAndHashRequest) (*QueryBtcCheckpointsHeightAndHashResponse, error) EpochSubmissions(context.Context, *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) } @@ -448,6 +579,9 @@ func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsReq func (*UnimplementedQueryServer) BtcCheckpointHeightAndHash(ctx context.Context, req *QueryBtcCheckpointHeightAndHashRequest) (*QueryBtcCheckpointHeightAndHashResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointHeightAndHash not implemented") } +func (*UnimplementedQueryServer) BtcCheckpointsHeightAndHash(ctx context.Context, req *QueryBtcCheckpointsHeightAndHashRequest) (*QueryBtcCheckpointsHeightAndHashResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointsHeightAndHash not implemented") +} func (*UnimplementedQueryServer) EpochSubmissions(ctx context.Context, req *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EpochSubmissions not implemented") } @@ -492,6 +626,24 @@ func _Query_BtcCheckpointHeightAndHash_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _Query_BtcCheckpointsHeightAndHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBtcCheckpointsHeightAndHashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).BtcCheckpointsHeightAndHash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointsHeightAndHash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).BtcCheckpointsHeightAndHash(ctx, req.(*QueryBtcCheckpointsHeightAndHashRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Query_EpochSubmissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryEpochSubmissionsRequest) if err := dec(in); err != nil { @@ -522,6 +674,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "BtcCheckpointHeightAndHash", Handler: _Query_BtcCheckpointHeightAndHash_Handler, }, + { + MethodName: "BtcCheckpointsHeightAndHash", + Handler: _Query_BtcCheckpointsHeightAndHash_Handler, + }, { MethodName: "EpochSubmissions", Handler: _Query_EpochSubmissions_Handler, @@ -650,6 +806,103 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) MarshalToSizedBuffer(dAtA []by return len(dAtA) - i, nil } +func (m *QueryBtcCheckpointsHeightAndHashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBtcCheckpointsHeightAndHashRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBtcCheckpointsHeightAndHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryBtcCheckpointsHeightAndHashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.EarliestBtcBlockHashes) > 0 { + for iNdEx := len(m.EarliestBtcBlockHashes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EarliestBtcBlockHashes[iNdEx]) + copy(dAtA[i:], m.EarliestBtcBlockHashes[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.EarliestBtcBlockHashes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.EarliestBtcBlockNumbers) > 0 { + dAtA5 := make([]byte, len(m.EarliestBtcBlockNumbers)*10) + var j4 int + for _, num := range m.EarliestBtcBlockNumbers { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintQuery(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *QueryEpochSubmissionsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -798,6 +1051,45 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) Size() (n int) { return n } +func (m *QueryBtcCheckpointsHeightAndHashRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBtcCheckpointsHeightAndHashResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.EarliestBtcBlockNumbers) > 0 { + l = 0 + for _, e := range m.EarliestBtcBlockNumbers { + l += sovQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } + if len(m.EarliestBtcBlockHashes) > 0 { + for _, b := range m.EarliestBtcBlockHashes { + l = len(b) + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + func (m *QueryEpochSubmissionsRequest) Size() (n int) { if m == nil { return 0 @@ -1144,6 +1436,286 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryBtcCheckpointsHeightAndHashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBtcCheckpointsHeightAndHashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EarliestBtcBlockNumbers = append(m.EarliestBtcBlockNumbers, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.EarliestBtcBlockNumbers) == 0 { + m.EarliestBtcBlockNumbers = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EarliestBtcBlockNumbers = append(m.EarliestBtcBlockNumbers, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockNumbers", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockHashes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EarliestBtcBlockHashes = append(m.EarliestBtcBlockHashes, make([]byte, postIndex-iNdEx)) + copy(m.EarliestBtcBlockHashes[len(m.EarliestBtcBlockHashes)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryEpochSubmissionsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/btccheckpoint/types/query.pb.gw.go b/x/btccheckpoint/types/query.pb.gw.go index 272c4dbeb..2cd7c74c1 100644 --- a/x/btccheckpoint/types/query.pb.gw.go +++ b/x/btccheckpoint/types/query.pb.gw.go @@ -105,6 +105,42 @@ func local_request_Query_BtcCheckpointHeightAndHash_0(ctx context.Context, marsh } +var ( + filter_Query_BtcCheckpointsHeightAndHash_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_BtcCheckpointsHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointsHeightAndHashRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsHeightAndHash_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BtcCheckpointsHeightAndHash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_BtcCheckpointsHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointsHeightAndHashRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsHeightAndHash_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BtcCheckpointsHeightAndHash(ctx, &protoReq) + return msg, metadata, err + +} + var ( filter_Query_EpochSubmissions_0 = &utilities.DoubleArray{Encoding: map[string]int{"epoch_num": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) @@ -229,6 +265,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_BtcCheckpointsHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_BtcCheckpointsHeightAndHash_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_BtcCheckpointsHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_EpochSubmissions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -333,6 +392,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_BtcCheckpointsHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_BtcCheckpointsHeightAndHash_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_BtcCheckpointsHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_EpochSubmissions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -361,6 +440,8 @@ var ( pattern_Query_BtcCheckpointHeightAndHash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "btccheckpoint", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_BtcCheckpointsHeightAndHash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"babylon", "btccheckpoint", "v1"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_EpochSubmissions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"babylon", "btccheckpoint", "v1", "epoch_num", "submissions"}, "", runtime.AssumeColonVerbOpt(false))) ) @@ -369,5 +450,7 @@ var ( forward_Query_BtcCheckpointHeightAndHash_0 = runtime.ForwardResponseMessage + forward_Query_BtcCheckpointsHeightAndHash_0 = runtime.ForwardResponseMessage + forward_Query_EpochSubmissions_0 = runtime.ForwardResponseMessage ) diff --git a/x/epoching/types/query.pb.go b/x/epoching/types/query.pb.go index b75c30e0a..21ce48ecd 100644 --- a/x/epoching/types/query.pb.go +++ b/x/epoching/types/query.pb.go @@ -1018,7 +1018,9 @@ type QueryClient interface { Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) // EpochInfo queries the information of a given epoch EpochInfo(ctx context.Context, in *QueryEpochInfoRequest, opts ...grpc.CallOption) (*QueryEpochInfoResponse, error) - // EpochsInfo range-queries the information of epochs + // EpochsInfo queries the metadata of epochs in a given range, depending on the + // parameters in the pagination request. Th main use case will be querying the + // latest epochs in time order. EpochsInfo(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) // CurrentEpoch queries the current epoch CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) @@ -1129,7 +1131,9 @@ type QueryServer interface { Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) // EpochInfo queries the information of a given epoch EpochInfo(context.Context, *QueryEpochInfoRequest) (*QueryEpochInfoResponse, error) - // EpochsInfo range-queries the information of epochs + // EpochsInfo queries the metadata of epochs in a given range, depending on the + // parameters in the pagination request. Th main use case will be querying the + // latest epochs in time order. EpochsInfo(context.Context, *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) // CurrentEpoch queries the current epoch CurrentEpoch(context.Context, *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) From cad147d0976779e6b13f10c467ecb954d23b9452 Mon Sep 17 00:00:00 2001 From: Cirrus Gai Date: Fri, 27 Jan 2023 12:10:43 +0800 Subject: [PATCH 30/37] chore: Remove checkpointing spec (#293) --- x/checkpointing/spec/01_state.md | 0 x/checkpointing/spec/02_keepers.md | 0 x/checkpointing/spec/03_messages.md | 0 x/checkpointing/spec/04_events.md | 0 x/checkpointing/spec/05_params.md | 0 x/checkpointing/spec/README.md | 0 x/checkpointing/spec/registration.md | 43 ---------------------------- 7 files changed, 43 deletions(-) delete mode 100644 x/checkpointing/spec/01_state.md delete mode 100644 x/checkpointing/spec/02_keepers.md delete mode 100644 x/checkpointing/spec/03_messages.md delete mode 100644 x/checkpointing/spec/04_events.md delete mode 100644 x/checkpointing/spec/05_params.md delete mode 100644 x/checkpointing/spec/README.md delete mode 100644 x/checkpointing/spec/registration.md diff --git a/x/checkpointing/spec/01_state.md b/x/checkpointing/spec/01_state.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/02_keepers.md b/x/checkpointing/spec/02_keepers.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/03_messages.md b/x/checkpointing/spec/03_messages.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/04_events.md b/x/checkpointing/spec/04_events.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/05_params.md b/x/checkpointing/spec/05_params.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/README.md b/x/checkpointing/spec/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/registration.md b/x/checkpointing/spec/registration.md deleted file mode 100644 index ad49cee97..000000000 --- a/x/checkpointing/spec/registration.md +++ /dev/null @@ -1,43 +0,0 @@ -# Registration - -To participate in the checkpointing, a validator needs to also register its BLS public key. - -## Register a Validator - -The original registration is done via a transaction that carries a `MsgCreateValidator` message. -To register a BLS public key, we need a wrapper message called `MsgWrappedCreateValidator` processed by the `Checkpointing` module. -This message wraps the original `MsgCreateValidator` message as well as a BLS public key and a `Proof-of-Possession` (PoP) for registering BLS public key. -The execution of `MsgWrappedCreateValidator` is as follows. - -1. The `Checkpointing` module first processes `MsgWrappedCreateValidator` to register the validator's BLS key. If success, then -2. extract `MsgCreateValidator` and deliver `MsgCreateValidator` to the epoching module's message queue, which will be processed at the end of this epoch. If success, the registration is succeeded. -3. Otherwise, the registration fails and the validator should register again with the same keys. - -## Genesis - -Genesis validators are registered via the legacy `genutil` module from the Cosmos-SDK, which processes `MsgCreateValidator` messages contained in genesis transactions. -The BLS keys are registered as `GenesisState` in the checkpointing module. -The checkpointing module's `ValidateGenesis` should ensure that each genesis validator has both an Ed25519 key and BLS key which are bonded by PoP. - -## Proof of Possession - -The purpose of PoP is to prove that one validator owns: -1. the corresponding BLS private key; -2. the corresponding Ed25519 private key associated with the public key in the `MsgCreateValidator` message. - -To achieve that, PoP is calculated as follows. - -`PoP = sign(key = BLS_sk, data = sign(key = Ed25519_sk, data = BLS_pk)]` - -Since the delegator already relates its account with the validator's Ed25519 key through the signatures in `MsgCreateValidator`, the adversary cannot do registration with the same PoP. - -## Verification - -To verify PoP, first we need to ensure that the BLS public key has never been registered by a different validator, -and that the current validator hasn't already registered a different BLS public key. Then, verify - -``` -MsgWrappedCreateValidator.BLS_pk ?= decrypt(key = Ed25519_pk, data = decrypt(key = BLS_pk, data = PoP)) -``` - -If verification passes, the `Checkpointing` module stores the BLS public key and associates it with the validator. From 59b79790e2a0fb035c472f8eca7cb5bee981c2a2 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Fri, 27 Jan 2023 18:10:58 +1100 Subject: [PATCH 31/37] API: add parameters to range queries and improve outputs (#292) --- client/docs/swagger-ui/swagger.yaml | 30 +++ proto/babylon/btccheckpoint/query.proto | 13 +- proto/babylon/epoching/v1/query.proto | 5 +- x/btccheckpoint/keeper/grpc_query.go | 18 ++ x/btccheckpoint/types/query.pb.go | 289 +++++++++++++++++++----- x/epoching/keeper/grpc_query.go | 14 ++ x/epoching/keeper/grpc_query_test.go | 37 +++ x/epoching/types/query.pb.go | 211 +++++++++++------ 8 files changed, 489 insertions(+), 128 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 18a57df5a..ecf1d71fa 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -16,6 +16,11 @@ paths: schema: type: object properties: + epoch_numbers: + type: array + items: + type: string + format: uint64 earliest_btc_block_numbers: type: array items: @@ -81,6 +86,16 @@ paths: type: string format: byte parameters: + - name: start_epoch + in: query + required: false + type: string + format: uint64 + - name: end_epoch + in: query + required: false + type: string + format: uint64 - name: pagination.key description: |- key is a value returned in PageResponse.next_key to begin @@ -1808,6 +1823,16 @@ paths: "value": "1.212s" } parameters: + - name: start_epoch + in: query + required: false + type: string + format: uint64 + - name: end_epoch + in: query + required: false + type: string + format: uint64 - name: pagination.key description: |- key is a value returned in PageResponse.next_key to begin @@ -9786,6 +9811,11 @@ definitions: babylon.btccheckpoint.v1.QueryBtcCheckpointsHeightAndHashResponse: type: object properties: + epoch_numbers: + type: array + items: + type: string + format: uint64 earliest_btc_block_numbers: type: array items: diff --git a/proto/babylon/btccheckpoint/query.proto b/proto/babylon/btccheckpoint/query.proto index f4ac070dd..33b1502fa 100644 --- a/proto/babylon/btccheckpoint/query.proto +++ b/proto/babylon/btccheckpoint/query.proto @@ -52,18 +52,23 @@ message QueryBtcCheckpointHeightAndHashResponse { bytes earliest_btc_block_hash = 2; } +// QueryBtcCheckpointsHeightAndHashRequest is request type for the Query/BtcCheckpointsHeightAndHash RPC method message QueryBtcCheckpointsHeightAndHashRequest { + uint64 start_epoch = 1; + uint64 end_epoch = 2; + // pagination defines whether to have the pagination in the response - cosmos.base.query.v1beta1.PageRequest pagination = 1; + cosmos.base.query.v1beta1.PageRequest pagination = 3; } // QueryBtcCheckpointsHeightAndHashResponse is response type for the Query/BtcCheckpointsHeightAndHash RPC method message QueryBtcCheckpointsHeightAndHashResponse { - repeated uint64 earliest_btc_block_numbers = 1; - repeated bytes earliest_btc_block_hashes = 2; + repeated uint64 epoch_numbers = 1; + repeated uint64 earliest_btc_block_numbers = 2; + repeated bytes earliest_btc_block_hashes = 3; // pagination defines the pagination in the response - cosmos.base.query.v1beta1.PageResponse pagination = 3; + cosmos.base.query.v1beta1.PageResponse pagination = 4; } message QueryEpochSubmissionsRequest { diff --git a/proto/babylon/epoching/v1/query.proto b/proto/babylon/epoching/v1/query.proto index 956f7a028..e28bf5d01 100644 --- a/proto/babylon/epoching/v1/query.proto +++ b/proto/babylon/epoching/v1/query.proto @@ -77,8 +77,11 @@ message QueryEpochInfoResponse { } message QueryEpochsInfoRequest { + uint64 start_epoch = 1; + uint64 end_epoch = 2; + // pagination defines whether to have the pagination in the response - cosmos.base.query.v1beta1.PageRequest pagination = 1; + cosmos.base.query.v1beta1.PageRequest pagination = 3; } message QueryEpochsInfoResponse { diff --git a/x/btccheckpoint/keeper/grpc_query.go b/x/btccheckpoint/keeper/grpc_query.go index c6ae6fd56..caf409287 100644 --- a/x/btccheckpoint/keeper/grpc_query.go +++ b/x/btccheckpoint/keeper/grpc_query.go @@ -109,9 +109,23 @@ func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryB ctx := sdk.UnwrapSDKContext(c) + // parse start_epoch and end_epoch and forward to the pagination request + if req.EndEpoch > 0 { + // this query uses start_epoch and end_epoch to specify range + if req.StartEpoch > req.EndEpoch { + return nil, fmt.Errorf("StartEpoch (%d) should not be larger than EndEpoch (%d)", req.StartEpoch, req.EndEpoch) + } + req.Pagination = &query.PageRequest{ + Key: sdk.Uint64ToBigEndian(req.StartEpoch), + Limit: req.EndEpoch - req.StartEpoch + 1, + Reverse: false, + } + } + store := ctx.KVStore(k.storeKey) epochDataStore := prefix.NewStore(store, types.EpochDataPrefix) + epochNumbers := []uint64{} btcNumbers := []uint64{} btcHashes := [][]byte{} // iterate over epochDataStore, where key is the epoch number and value is the epoch data @@ -129,8 +143,11 @@ func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryB if err != nil { return fmt.Errorf("failed to get lowest BTC height and hash in keys of epoch %d: %w", epochNum, err) } + + // append all lists btcNumbers = append(btcNumbers, lowestHeaderNumber) btcHashes = append(btcHashes, lowestHeaderHash) + epochNumbers = append(epochNumbers, epochNum) return nil }) @@ -139,6 +156,7 @@ func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryB } resp := &types.QueryBtcCheckpointsHeightAndHashResponse{ + EpochNumbers: epochNumbers, EarliestBtcBlockNumbers: btcNumbers, EarliestBtcBlockHashes: btcHashes, Pagination: pageRes, diff --git a/x/btccheckpoint/types/query.pb.go b/x/btccheckpoint/types/query.pb.go index cc88e9eb8..859f6aa50 100644 --- a/x/btccheckpoint/types/query.pb.go +++ b/x/btccheckpoint/types/query.pb.go @@ -216,9 +216,12 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) GetEarliestBtcBlockHash() []by return nil } +// QueryBtcCheckpointsHeightAndHashRequest is request type for the Query/BtcCheckpointsHeightAndHash RPC method type QueryBtcCheckpointsHeightAndHashRequest struct { + StartEpoch uint64 `protobuf:"varint,1,opt,name=start_epoch,json=startEpoch,proto3" json:"start_epoch,omitempty"` + EndEpoch uint64 `protobuf:"varint,2,opt,name=end_epoch,json=endEpoch,proto3" json:"end_epoch,omitempty"` // pagination defines whether to have the pagination in the response - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryBtcCheckpointsHeightAndHashRequest) Reset() { @@ -256,6 +259,20 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest proto.InternalMessageInfo +func (m *QueryBtcCheckpointsHeightAndHashRequest) GetStartEpoch() uint64 { + if m != nil { + return m.StartEpoch + } + return 0 +} + +func (m *QueryBtcCheckpointsHeightAndHashRequest) GetEndEpoch() uint64 { + if m != nil { + return m.EndEpoch + } + return 0 +} + func (m *QueryBtcCheckpointsHeightAndHashRequest) GetPagination() *query.PageRequest { if m != nil { return m.Pagination @@ -265,10 +282,11 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) GetPagination() *query.PageReq // QueryBtcCheckpointsHeightAndHashResponse is response type for the Query/BtcCheckpointsHeightAndHash RPC method type QueryBtcCheckpointsHeightAndHashResponse struct { - EarliestBtcBlockNumbers []uint64 `protobuf:"varint,1,rep,packed,name=earliest_btc_block_numbers,json=earliestBtcBlockNumbers,proto3" json:"earliest_btc_block_numbers,omitempty"` - EarliestBtcBlockHashes [][]byte `protobuf:"bytes,2,rep,name=earliest_btc_block_hashes,json=earliestBtcBlockHashes,proto3" json:"earliest_btc_block_hashes,omitempty"` + EpochNumbers []uint64 `protobuf:"varint,1,rep,packed,name=epoch_numbers,json=epochNumbers,proto3" json:"epoch_numbers,omitempty"` + EarliestBtcBlockNumbers []uint64 `protobuf:"varint,2,rep,packed,name=earliest_btc_block_numbers,json=earliestBtcBlockNumbers,proto3" json:"earliest_btc_block_numbers,omitempty"` + EarliestBtcBlockHashes [][]byte `protobuf:"bytes,3,rep,name=earliest_btc_block_hashes,json=earliestBtcBlockHashes,proto3" json:"earliest_btc_block_hashes,omitempty"` // pagination defines the pagination in the response - Pagination *query.PageResponse `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` + Pagination *query.PageResponse `protobuf:"bytes,4,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryBtcCheckpointsHeightAndHashResponse) Reset() { @@ -306,6 +324,13 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_DiscardUnknown() { var xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse proto.InternalMessageInfo +func (m *QueryBtcCheckpointsHeightAndHashResponse) GetEpochNumbers() []uint64 { + if m != nil { + return m.EpochNumbers + } + return nil +} + func (m *QueryBtcCheckpointsHeightAndHashResponse) GetEarliestBtcBlockNumbers() []uint64 { if m != nil { return m.EarliestBtcBlockNumbers @@ -447,50 +472,53 @@ func init() { func init() { proto.RegisterFile("babylon/btccheckpoint/query.proto", fileDescriptor_009c1165ec392ace) } var fileDescriptor_009c1165ec392ace = []byte{ - // 683 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x4f, 0xd4, 0x4c, - 0x18, 0xdf, 0x59, 0x16, 0xf2, 0xbe, 0x23, 0x07, 0x33, 0x12, 0x59, 0x0a, 0xd6, 0xa5, 0x89, 0x50, - 0x8d, 0xb4, 0x59, 0x08, 0x1a, 0x62, 0x62, 0xa4, 0x06, 0x25, 0x31, 0x41, 0xac, 0xf1, 0xe2, 0x85, - 0x4c, 0xeb, 0xa4, 0x6d, 0xd8, 0x76, 0xca, 0xce, 0x94, 0xb8, 0x31, 0x5e, 0xf4, 0x03, 0x68, 0xe2, - 0xd1, 0xb3, 0x5f, 0xc3, 0xab, 0x1c, 0x49, 0xbc, 0x78, 0x32, 0x06, 0xbc, 0xf9, 0x01, 0xbc, 0x9a, - 0x4e, 0x67, 0x77, 0xdd, 0xa5, 0xb5, 0x0b, 0xdc, 0x36, 0x9d, 0xdf, 0xf3, 0xfc, 0xfe, 0xcc, 0xf3, - 0xcc, 0xc2, 0x79, 0x07, 0x3b, 0x9d, 0x16, 0x8d, 0x4c, 0x87, 0xbb, 0xae, 0x4f, 0xdc, 0xdd, 0x98, - 0x06, 0x11, 0x37, 0xf7, 0x12, 0xd2, 0xee, 0x18, 0x71, 0x9b, 0x72, 0x8a, 0xea, 0x12, 0x62, 0x0c, - 0x40, 0x8c, 0xfd, 0xa6, 0x32, 0xe5, 0x51, 0x8f, 0x0a, 0x90, 0x99, 0xfe, 0xca, 0xf0, 0xca, 0x9c, - 0x47, 0xa9, 0xd7, 0x22, 0x26, 0x8e, 0x03, 0x13, 0x47, 0x11, 0xe5, 0x98, 0x07, 0x34, 0x62, 0xf2, - 0xf4, 0x86, 0x4b, 0x59, 0x48, 0x99, 0xe9, 0x60, 0x46, 0x32, 0x1a, 0x73, 0xbf, 0xe9, 0x10, 0x8e, - 0x9b, 0x66, 0x8c, 0xbd, 0x20, 0x12, 0x60, 0x89, 0xd5, 0xf2, 0xc5, 0xc5, 0xb8, 0x8d, 0xc3, 0x6e, - 0xbf, 0xeb, 0xf9, 0x98, 0x41, 0xad, 0x02, 0xaa, 0x4d, 0x41, 0xf4, 0x24, 0x25, 0xdc, 0x16, 0xf5, - 0x36, 0xd9, 0x4b, 0x08, 0xe3, 0xda, 0x33, 0x78, 0x69, 0xe0, 0x2b, 0x8b, 0x69, 0xc4, 0x08, 0xba, - 0x0b, 0x27, 0x32, 0x9e, 0x3a, 0x68, 0x00, 0xfd, 0xc2, 0x72, 0xc3, 0x28, 0x8a, 0xc1, 0xc8, 0x2a, - 0xad, 0xda, 0xc1, 0xf7, 0xab, 0x15, 0x5b, 0x56, 0x69, 0x1b, 0x70, 0x41, 0xb4, 0xb5, 0xb8, 0x7b, - 0xbf, 0x87, 0xde, 0x24, 0x81, 0xe7, 0xf3, 0xf5, 0xe8, 0xc5, 0x26, 0x66, 0xbe, 0x14, 0x80, 0x66, - 0xe1, 0xff, 0x24, 0xa6, 0xae, 0xbf, 0x13, 0x25, 0xa1, 0x20, 0xab, 0xd9, 0xff, 0x89, 0x0f, 0x5b, - 0x49, 0xa8, 0x7d, 0x04, 0x70, 0xb1, 0xb4, 0x8f, 0x94, 0xbc, 0x06, 0x67, 0x08, 0x6e, 0xb7, 0x02, - 0xc2, 0xf8, 0x8e, 0xc3, 0xdd, 0x1d, 0xa7, 0x45, 0xdd, 0xdd, 0xb4, 0xab, 0x43, 0xda, 0xb2, 0xf1, - 0xe5, 0x2e, 0xc0, 0xe2, 0xae, 0x95, 0x1e, 0x6f, 0x89, 0x53, 0xb4, 0x0a, 0xa7, 0x73, 0x4a, 0x7d, - 0xcc, 0xfc, 0x7a, 0xb5, 0x01, 0xf4, 0x49, 0x7b, 0x6a, 0xb8, 0x30, 0x65, 0xd6, 0xf6, 0xf2, 0xc4, - 0xb1, 0x5c, 0x97, 0x0f, 0x20, 0xec, 0xdf, 0xaf, 0xcc, 0x74, 0xc1, 0xc8, 0x86, 0xc1, 0x48, 0x87, - 0xc1, 0xc8, 0x66, 0x4e, 0x0e, 0x83, 0xb1, 0x8d, 0x3d, 0x22, 0x6b, 0xed, 0xbf, 0x2a, 0xb5, 0x5f, - 0x00, 0xea, 0xe5, 0x9c, 0x32, 0x91, 0x3b, 0x50, 0x29, 0x4c, 0x24, 0xbd, 0xd8, 0x31, 0xbd, 0x66, - 0x4f, 0xe7, 0x47, 0xc2, 0x0a, 0xe2, 0x4c, 0x33, 0x21, 0xac, 0x5e, 0x6d, 0x8c, 0xe9, 0x93, 0x27, - 0xe3, 0xdc, 0x14, 0xa7, 0xe8, 0xe1, 0x80, 0xd9, 0x31, 0x61, 0x76, 0xb1, 0xd4, 0x6c, 0x26, 0x7a, - 0xc0, 0xed, 0x5b, 0x00, 0xe7, 0x84, 0xdb, 0x8d, 0x74, 0x20, 0x9e, 0x26, 0x4e, 0x18, 0x30, 0x96, - 0x6e, 0xd3, 0x28, 0xc3, 0x33, 0x94, 0x79, 0xf5, 0xcc, 0x99, 0x7f, 0x02, 0xf0, 0x4a, 0x81, 0x8a, - 0x5e, 0xd0, 0xb5, 0x5d, 0xd2, 0xc9, 0x22, 0x4d, 0xad, 0x16, 0xee, 0x4a, 0xbf, 0xf8, 0x11, 0xe9, - 0xd8, 0xa2, 0x68, 0x28, 0xad, 0xea, 0x99, 0xd3, 0x5a, 0xfe, 0x3d, 0x0e, 0xc7, 0x85, 0x4e, 0xf4, - 0x0e, 0xc0, 0x89, 0x6c, 0x2d, 0xd1, 0xcd, 0x62, 0x31, 0x27, 0x5f, 0x03, 0x65, 0x69, 0x44, 0x74, - 0xc6, 0xae, 0xe9, 0x6f, 0xbe, 0xfe, 0xfc, 0x50, 0xd5, 0x50, 0xc3, 0xcc, 0x7f, 0x86, 0xf6, 0x9b, - 0xf2, 0xb5, 0x42, 0x87, 0x00, 0x2a, 0xc5, 0x3b, 0x8c, 0xee, 0x95, 0xf0, 0x96, 0x3e, 0x23, 0xca, - 0xfa, 0x39, 0x3a, 0x48, 0x37, 0x4b, 0xc2, 0xcd, 0x22, 0xba, 0x56, 0xec, 0xe6, 0x55, 0x6f, 0xda, - 0x5e, 0xa3, 0x2f, 0x00, 0xce, 0xfe, 0x63, 0x0b, 0xd1, 0xa9, 0x14, 0xe5, 0xbe, 0x1a, 0x8a, 0x75, - 0x9e, 0x16, 0xd2, 0xd5, 0xbc, 0x70, 0x35, 0x8b, 0x66, 0x0a, 0x5d, 0xa1, 0xcf, 0x00, 0x5e, 0x1c, - 0x9e, 0x6d, 0x74, 0xab, 0x84, 0xbb, 0x60, 0x25, 0x95, 0xdb, 0xa7, 0xae, 0x93, 0x42, 0xd7, 0x84, - 0xd0, 0x15, 0xd4, 0x1c, 0x29, 0x7e, 0x93, 0xf5, 0x5b, 0x58, 0x8f, 0x0f, 0x8e, 0x54, 0x70, 0x78, - 0xa4, 0x82, 0x1f, 0x47, 0x2a, 0x78, 0x7f, 0xac, 0x56, 0x0e, 0x8f, 0xd5, 0xca, 0xb7, 0x63, 0xb5, - 0xf2, 0x7c, 0xd5, 0x0b, 0xb8, 0x9f, 0x38, 0x86, 0x4b, 0xc3, 0x6e, 0x5b, 0xd7, 0xc7, 0x41, 0xd4, - 0xe3, 0x78, 0x39, 0xc4, 0xc2, 0x3b, 0x31, 0x61, 0xce, 0x84, 0xf8, 0xcb, 0x5c, 0xf9, 0x13, 0x00, - 0x00, 0xff, 0xff, 0xf3, 0x16, 0xad, 0x29, 0x20, 0x08, 0x00, 0x00, + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x4f, 0x13, 0x41, + 0x14, 0xee, 0x6c, 0x0b, 0xd1, 0x01, 0x13, 0x33, 0x12, 0x29, 0x0b, 0x96, 0xb2, 0x46, 0xa8, 0x46, + 0x76, 0x53, 0x08, 0x1a, 0x42, 0x62, 0xa4, 0x06, 0x25, 0x31, 0x41, 0x5c, 0xe3, 0xc5, 0x4b, 0x33, + 0xbb, 0x4c, 0x76, 0x37, 0xb4, 0x3b, 0xcb, 0xce, 0x94, 0xd8, 0x18, 0x2f, 0xfa, 0x07, 0x48, 0xe2, + 0xd1, 0xb3, 0x67, 0xff, 0x03, 0xaf, 0x72, 0x24, 0xf1, 0xe2, 0xc9, 0x18, 0xf0, 0x7f, 0xf0, 0x6a, + 0x66, 0x76, 0x5a, 0x6c, 0xd9, 0xb5, 0xfc, 0xb8, 0x6d, 0x66, 0xbe, 0xf7, 0xbe, 0xef, 0x7d, 0xef, + 0xcd, 0x5b, 0x38, 0xe3, 0x60, 0xa7, 0xdd, 0xa0, 0xa1, 0xe5, 0x70, 0xd7, 0xf5, 0x89, 0xbb, 0x1d, + 0xd1, 0x20, 0xe4, 0xd6, 0x4e, 0x8b, 0xc4, 0x6d, 0x33, 0x8a, 0x29, 0xa7, 0xa8, 0xa8, 0x20, 0x66, + 0x0f, 0xc4, 0xdc, 0xad, 0xea, 0x63, 0x1e, 0xf5, 0xa8, 0x04, 0x59, 0xe2, 0x2b, 0xc1, 0xeb, 0x53, + 0x1e, 0xa5, 0x5e, 0x83, 0x58, 0x38, 0x0a, 0x2c, 0x1c, 0x86, 0x94, 0x63, 0x1e, 0xd0, 0x90, 0xa9, + 0xdb, 0x3b, 0x2e, 0x65, 0x4d, 0xca, 0x2c, 0x07, 0x33, 0x92, 0xd0, 0x58, 0xbb, 0x55, 0x87, 0x70, + 0x5c, 0xb5, 0x22, 0xec, 0x05, 0xa1, 0x04, 0x2b, 0xac, 0x91, 0x2e, 0x2e, 0xc2, 0x31, 0x6e, 0x76, + 0xf2, 0xdd, 0x4e, 0xc7, 0xf4, 0x6a, 0x95, 0x50, 0x63, 0x0c, 0xa2, 0xe7, 0x82, 0x70, 0x53, 0xc6, + 0xdb, 0x64, 0xa7, 0x45, 0x18, 0x37, 0x5e, 0xc2, 0x6b, 0x3d, 0xa7, 0x2c, 0xa2, 0x21, 0x23, 0xe8, + 0x01, 0x1c, 0x4e, 0x78, 0x8a, 0xa0, 0x0c, 0x2a, 0x23, 0x0b, 0x65, 0x33, 0xcb, 0x06, 0x33, 0x89, + 0xac, 0x15, 0xf6, 0x7f, 0x4e, 0xe7, 0x6c, 0x15, 0x65, 0xac, 0xc1, 0x59, 0x99, 0xb6, 0xc6, 0xdd, + 0x47, 0x5d, 0xf4, 0x3a, 0x09, 0x3c, 0x9f, 0xaf, 0x86, 0x5b, 0xeb, 0x98, 0xf9, 0x4a, 0x00, 0x9a, + 0x84, 0x97, 0x49, 0x44, 0x5d, 0xbf, 0x1e, 0xb6, 0x9a, 0x92, 0xac, 0x60, 0x5f, 0x92, 0x07, 0x1b, + 0xad, 0xa6, 0xf1, 0x09, 0xc0, 0xb9, 0x81, 0x79, 0x94, 0xe4, 0x65, 0x38, 0x41, 0x70, 0xdc, 0x08, + 0x08, 0xe3, 0x75, 0x87, 0xbb, 0x75, 0xa7, 0x41, 0xdd, 0x6d, 0x91, 0xd5, 0x21, 0xb1, 0x4a, 0x7c, + 0xbd, 0x03, 0xa8, 0x71, 0xb7, 0x26, 0xae, 0x37, 0xe4, 0x2d, 0x5a, 0x82, 0xe3, 0x29, 0xa1, 0x3e, + 0x66, 0x7e, 0x51, 0x2b, 0x83, 0xca, 0xa8, 0x3d, 0xd6, 0x1f, 0x28, 0x98, 0x8d, 0x2f, 0xa9, 0xea, + 0x58, 0x6a, 0x99, 0xd3, 0x70, 0x84, 0x71, 0x1c, 0xf3, 0xba, 0xac, 0x4d, 0xe9, 0x81, 0xf2, 0x68, + 0x4d, 0x9c, 0x48, 0x1f, 0xc2, 0x2d, 0x75, 0xad, 0x29, 0x1f, 0xc2, 0xad, 0xe4, 0xf2, 0x31, 0x84, + 0xc7, 0xe3, 0x51, 0xcc, 0xcb, 0x96, 0xcc, 0x9a, 0xc9, 0x2c, 0x99, 0x62, 0x96, 0xcc, 0x64, 0x64, + 0xd5, 0x2c, 0x99, 0x9b, 0xd8, 0x23, 0x8a, 0xd9, 0xfe, 0x27, 0xd2, 0xd8, 0xd3, 0x60, 0x65, 0xb0, + 0x62, 0x65, 0xe8, 0x4d, 0x78, 0xa5, 0xdb, 0x19, 0x87, 0xc4, 0x62, 0x14, 0xf2, 0x95, 0x82, 0x3d, + 0xda, 0xe9, 0x8e, 0x38, 0x43, 0x2b, 0x50, 0xcf, 0x74, 0x9d, 0x15, 0x35, 0x19, 0x31, 0x9e, 0x6e, + 0x3b, 0xcb, 0x68, 0x99, 0xf0, 0x9d, 0xb0, 0x62, 0xbe, 0x9c, 0xaf, 0x8c, 0x9e, 0x6c, 0xd9, 0xba, + 0xbc, 0x45, 0x4f, 0x7a, 0x1c, 0x29, 0x48, 0x47, 0xe6, 0x06, 0x3a, 0x92, 0x54, 0xd6, 0x63, 0xc9, + 0x7b, 0x00, 0xa7, 0xa4, 0x25, 0xd2, 0xe9, 0x17, 0x2d, 0xa7, 0x19, 0x30, 0x26, 0x5e, 0xec, 0x69, + 0x06, 0xb4, 0xaf, 0x31, 0xda, 0xb9, 0x1b, 0xf3, 0x19, 0xc0, 0x1b, 0x19, 0x2a, 0x54, 0x37, 0x56, + 0x60, 0x61, 0x9b, 0xb4, 0x93, 0x26, 0x88, 0x52, 0x33, 0xdf, 0xe3, 0x71, 0xf0, 0x53, 0xd2, 0xb6, + 0x65, 0x50, 0x9f, 0x5b, 0xda, 0xb9, 0xdd, 0x5a, 0xf8, 0x33, 0x04, 0x87, 0xa4, 0x4e, 0xf4, 0x01, + 0xc0, 0xe1, 0xe4, 0xe9, 0xa3, 0xbb, 0xd9, 0x62, 0x4e, 0x6e, 0x1c, 0x7d, 0xfe, 0x94, 0xe8, 0x84, + 0xdd, 0xa8, 0xbc, 0xfb, 0xfe, 0xfb, 0xa3, 0x66, 0xa0, 0xb2, 0x95, 0xbe, 0xea, 0x76, 0xab, 0x6a, + 0x23, 0xa2, 0x03, 0x00, 0xf5, 0xec, 0x3d, 0x81, 0x1e, 0x0e, 0xe0, 0x1d, 0xb8, 0xaa, 0xf4, 0xd5, + 0x0b, 0x64, 0x50, 0xd5, 0xcc, 0xcb, 0x6a, 0xe6, 0xd0, 0xad, 0xec, 0x6a, 0xde, 0x74, 0xa7, 0xed, + 0x2d, 0xfa, 0x06, 0xe0, 0xe4, 0x7f, 0x9e, 0x2a, 0x3a, 0x93, 0xa2, 0xd4, 0xc5, 0xa4, 0xd7, 0x2e, + 0x92, 0x42, 0x55, 0x35, 0x23, 0xab, 0x9a, 0x44, 0x13, 0x99, 0x55, 0xa1, 0xaf, 0x00, 0x5e, 0xed, + 0x9f, 0x6d, 0x74, 0x6f, 0x00, 0x77, 0xc6, 0x93, 0xd4, 0xef, 0x9f, 0x39, 0x4e, 0x09, 0x5d, 0x96, + 0x42, 0x17, 0x51, 0xf5, 0x54, 0xf6, 0x5b, 0xec, 0x38, 0x45, 0xed, 0xd9, 0xfe, 0x61, 0x09, 0x1c, + 0x1c, 0x96, 0xc0, 0xaf, 0xc3, 0x12, 0xd8, 0x3b, 0x2a, 0xe5, 0x0e, 0x8e, 0x4a, 0xb9, 0x1f, 0x47, + 0xa5, 0xdc, 0xab, 0x25, 0x2f, 0xe0, 0x7e, 0xcb, 0x31, 0x5d, 0xda, 0xec, 0xa4, 0x75, 0x7d, 0x1c, + 0x84, 0x5d, 0x8e, 0xd7, 0x7d, 0x2c, 0xbc, 0x1d, 0x11, 0xe6, 0x0c, 0xcb, 0xdf, 0xf2, 0xe2, 0xdf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xe7, 0xea, 0xb4, 0x84, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -836,7 +864,17 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) MarshalToSizedBuffer(dAtA []by i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a + } + if m.EndEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EndEpoch)) + i-- + dAtA[i] = 0x10 + } + if m.StartEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.StartEpoch)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -871,7 +909,7 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalToSizedBuffer(dAtA []b i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } if len(m.EarliestBtcBlockHashes) > 0 { for iNdEx := len(m.EarliestBtcBlockHashes) - 1; iNdEx >= 0; iNdEx-- { @@ -879,7 +917,7 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalToSizedBuffer(dAtA []b copy(dAtA[i:], m.EarliestBtcBlockHashes[iNdEx]) i = encodeVarintQuery(dAtA, i, uint64(len(m.EarliestBtcBlockHashes[iNdEx]))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } } if len(m.EarliestBtcBlockNumbers) > 0 { @@ -898,6 +936,24 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalToSizedBuffer(dAtA []b copy(dAtA[i:], dAtA5[:j4]) i = encodeVarintQuery(dAtA, i, uint64(j4)) i-- + dAtA[i] = 0x12 + } + if len(m.EpochNumbers) > 0 { + dAtA7 := make([]byte, len(m.EpochNumbers)*10) + var j6 int + for _, num := range m.EpochNumbers { + for num >= 1<<7 { + dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j6++ + } + dAtA7[j6] = uint8(num) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA7[:j6]) + i = encodeVarintQuery(dAtA, i, uint64(j6)) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -1057,6 +1113,12 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) Size() (n int) { } var l int _ = l + if m.StartEpoch != 0 { + n += 1 + sovQuery(uint64(m.StartEpoch)) + } + if m.EndEpoch != 0 { + n += 1 + sovQuery(uint64(m.EndEpoch)) + } if m.Pagination != nil { l = m.Pagination.Size() n += 1 + l + sovQuery(uint64(l)) @@ -1070,6 +1132,13 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) Size() (n int) { } var l int _ = l + if len(m.EpochNumbers) > 0 { + l = 0 + for _, e := range m.EpochNumbers { + l += sovQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } if len(m.EarliestBtcBlockNumbers) > 0 { l = 0 for _, e := range m.EarliestBtcBlockNumbers { @@ -1466,6 +1535,44 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartEpoch", wireType) + } + m.StartEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndEpoch", wireType) + } + m.EndEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) } @@ -1552,6 +1659,82 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) Unmarshal(dAtA []byte) error } switch fieldNum { case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EpochNumbers = append(m.EpochNumbers, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.EpochNumbers) == 0 { + m.EpochNumbers = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EpochNumbers = append(m.EpochNumbers, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNumbers", wireType) + } + case 2: if wireType == 0 { var v uint64 for shift := uint(0); ; shift += 7 { @@ -1627,7 +1810,7 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) Unmarshal(dAtA []byte) error } else { return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockNumbers", wireType) } - case 2: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockHashes", wireType) } @@ -1659,7 +1842,7 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) Unmarshal(dAtA []byte) error m.EarliestBtcBlockHashes = append(m.EarliestBtcBlockHashes, make([]byte, postIndex-iNdEx)) copy(m.EarliestBtcBlockHashes[len(m.EarliestBtcBlockHashes)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) } diff --git a/x/epoching/keeper/grpc_query.go b/x/epoching/keeper/grpc_query.go index 270018ebc..f76c5f81a 100644 --- a/x/epoching/keeper/grpc_query.go +++ b/x/epoching/keeper/grpc_query.go @@ -3,6 +3,7 @@ package keeper import ( "context" "errors" + "fmt" "cosmossdk.io/math" @@ -58,6 +59,19 @@ func (k Keeper) EpochInfo(c context.Context, req *types.QueryEpochInfoRequest) ( func (k Keeper) EpochsInfo(c context.Context, req *types.QueryEpochsInfoRequest) (*types.QueryEpochsInfoResponse, error) { ctx := sdk.UnwrapSDKContext(c) + // parse start_epoch and end_epoch and forward to the pagination request + if req.EndEpoch > 0 { + // this query uses start_epoch and end_epoch to specify range + if req.StartEpoch > req.EndEpoch { + return nil, fmt.Errorf("StartEpoch (%d) should not be larger than EndEpoch (%d)", req.StartEpoch, req.EndEpoch) + } + req.Pagination = &query.PageRequest{ + Key: sdk.Uint64ToBigEndian(req.StartEpoch), + Limit: req.EndEpoch - req.StartEpoch + 1, + Reverse: false, + } + } + epochInfoStore := k.epochInfoStore(ctx) epochs := []*types.Epoch{} pageRes, err := query.Paginate(epochInfoStore, req.Pagination, func(key, value []byte) error { diff --git a/x/epoching/keeper/grpc_query_test.go b/x/epoching/keeper/grpc_query_test.go index 663b0aeea..2f3f2a10e 100644 --- a/x/epoching/keeper/grpc_query_test.go +++ b/x/epoching/keeper/grpc_query_test.go @@ -124,6 +124,43 @@ func FuzzEpochsInfo(f *testing.F) { }) } +func FuzzEpochsInfo_QueryParams(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + numEpochs := datagen.RandomInt(10) + 2 + + endEpoch := rand.Uint64()%(numEpochs-1) + 1 + startEpoch := rand.Uint64() % endEpoch + + helper := testepoching.NewHelper(t) + ctx, keeper, queryClient := helper.Ctx, helper.EpochingKeeper, helper.QueryClient + wctx := sdk.WrapSDKContext(ctx) + + // enque the first block of the numEpochs'th epoch + epochInterval := keeper.GetParams(ctx).EpochInterval + for i := uint64(0); i < numEpochs-1; i++ { + for j := uint64(0); j < epochInterval; j++ { + helper.GenAndApplyEmptyBlock() + } + } + + // get epoch msgs + req := types.QueryEpochsInfoRequest{ + StartEpoch: startEpoch, + EndEpoch: endEpoch, + } + resp, err := queryClient.EpochsInfo(wctx, &req) + require.NoError(t, err) + + require.Equal(t, endEpoch-startEpoch+1, uint64(len(resp.Epochs))) + for i, epoch := range resp.Epochs { + require.Equal(t, uint64(i)+startEpoch, epoch.EpochNumber) + } + }) +} + // FuzzEpochMsgsQuery fuzzes queryClient.EpochMsgs // 1. randomly generate msgs and limit in pagination // 2. check the returned msg was previously enqueued diff --git a/x/epoching/types/query.pb.go b/x/epoching/types/query.pb.go index 21ce48ecd..4285d57a6 100644 --- a/x/epoching/types/query.pb.go +++ b/x/epoching/types/query.pb.go @@ -202,8 +202,10 @@ func (m *QueryEpochInfoResponse) GetEpoch() *Epoch { } type QueryEpochsInfoRequest struct { + StartEpoch uint64 `protobuf:"varint,1,opt,name=start_epoch,json=startEpoch,proto3" json:"start_epoch,omitempty"` + EndEpoch uint64 `protobuf:"varint,2,opt,name=end_epoch,json=endEpoch,proto3" json:"end_epoch,omitempty"` // pagination defines whether to have the pagination in the response - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryEpochsInfoRequest) Reset() { *m = QueryEpochsInfoRequest{} } @@ -239,6 +241,20 @@ func (m *QueryEpochsInfoRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryEpochsInfoRequest proto.InternalMessageInfo +func (m *QueryEpochsInfoRequest) GetStartEpoch() uint64 { + if m != nil { + return m.StartEpoch + } + return 0 +} + +func (m *QueryEpochsInfoRequest) GetEndEpoch() uint64 { + if m != nil { + return m.EndEpoch + } + return 0 +} + func (m *QueryEpochsInfoRequest) GetPagination() *query.PageRequest { if m != nil { return m.Pagination @@ -932,74 +948,75 @@ func init() { func init() { proto.RegisterFile("babylon/epoching/v1/query.proto", fileDescriptor_1821b530f2ec2711) } var fileDescriptor_1821b530f2ec2711 = []byte{ - // 1071 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x97, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xb3, 0x89, 0x1b, 0x92, 0x97, 0x96, 0xc2, 0xa4, 0x40, 0xbb, 0x29, 0x4e, 0xb4, 0x85, - 0x26, 0x24, 0xcd, 0x6e, 0x9c, 0xa4, 0x45, 0xfd, 0x01, 0x88, 0x84, 0x1f, 0xa2, 0x4a, 0x51, 0xba, - 0x48, 0x39, 0x70, 0x31, 0x63, 0xef, 0x64, 0xb3, 0xd2, 0x7a, 0xc7, 0xdd, 0x1f, 0x06, 0xab, 0x04, - 0x21, 0xce, 0x1c, 0x90, 0x90, 0x40, 0xbd, 0x21, 0x71, 0xe4, 0x4f, 0x80, 0x03, 0xc7, 0x1e, 0x83, - 0xb8, 0x70, 0x42, 0x28, 0xe1, 0x0f, 0x41, 0xfb, 0x66, 0xd6, 0x5e, 0xbb, 0xb3, 0xb1, 0x13, 0x45, - 0xdc, 0xda, 0x99, 0xf7, 0xe3, 0xf3, 0xde, 0x9b, 0x7d, 0x5f, 0x07, 0x66, 0x6b, 0xb4, 0xd6, 0xf6, - 0x79, 0x60, 0xb1, 0x26, 0xaf, 0xef, 0x79, 0x81, 0x6b, 0xb5, 0x2a, 0xd6, 0xa3, 0x84, 0x85, 0x6d, - 0xb3, 0x19, 0xf2, 0x98, 0x93, 0x69, 0x69, 0x60, 0x66, 0x06, 0x66, 0xab, 0xa2, 0x5f, 0x72, 0xb9, - 0xcb, 0xf1, 0xde, 0x4a, 0xff, 0x25, 0x4c, 0xf5, 0xab, 0x2e, 0xe7, 0xae, 0xcf, 0x2c, 0xda, 0xf4, - 0x2c, 0x1a, 0x04, 0x3c, 0xa6, 0xb1, 0xc7, 0x83, 0x48, 0xde, 0x2e, 0xd6, 0x79, 0xd4, 0xe0, 0x91, - 0x55, 0xa3, 0x11, 0x13, 0x19, 0xac, 0x56, 0xa5, 0xc6, 0x62, 0x5a, 0xb1, 0x9a, 0xd4, 0xf5, 0x02, - 0x34, 0x96, 0xb6, 0x73, 0x2a, 0xaa, 0x26, 0x0d, 0x69, 0x23, 0x8b, 0x66, 0xa8, 0x2c, 0x3a, 0x88, - 0x68, 0x63, 0x5c, 0x02, 0xf2, 0x30, 0xcd, 0xb3, 0x8d, 0x8e, 0x36, 0x7b, 0x94, 0xb0, 0x28, 0x36, - 0xb6, 0x61, 0xba, 0xe7, 0x34, 0x6a, 0xf2, 0x20, 0x62, 0xe4, 0x36, 0x8c, 0x8b, 0x04, 0x97, 0xb5, - 0x39, 0x6d, 0x61, 0x6a, 0x75, 0xc6, 0x54, 0x14, 0x6e, 0x0a, 0xa7, 0x8d, 0xd2, 0xd3, 0xbf, 0x67, - 0x47, 0x6c, 0xe9, 0x60, 0xac, 0xc3, 0x4b, 0x18, 0xf1, 0xfd, 0xd4, 0xf0, 0xa3, 0x60, 0x97, 0xcb, - 0x54, 0x64, 0x06, 0x26, 0xd1, 0xb9, 0x1a, 0x24, 0x0d, 0x0c, 0x5b, 0xb2, 0x27, 0xf0, 0xe0, 0xe3, - 0xa4, 0x61, 0xdc, 0x87, 0x97, 0xfb, 0xbd, 0x24, 0xca, 0x0a, 0x9c, 0x43, 0x2b, 0x49, 0xa2, 0x2b, - 0x49, 0xd0, 0xcd, 0x16, 0x86, 0xc6, 0x67, 0xf9, 0x58, 0x51, 0x1e, 0xe1, 0x03, 0x80, 0x6e, 0x77, - 0x65, 0xc0, 0xeb, 0xa6, 0x18, 0x85, 0x99, 0x8e, 0xc2, 0x14, 0xc3, 0x96, 0xa3, 0x30, 0xb7, 0xa9, - 0xcb, 0xa4, 0xaf, 0x9d, 0xf3, 0x34, 0x7e, 0xd0, 0xe0, 0x95, 0x67, 0x52, 0x48, 0xde, 0x55, 0x18, - 0x47, 0x8c, 0xb4, 0x75, 0x63, 0x03, 0x80, 0xa5, 0x25, 0xf9, 0xb0, 0x87, 0x6b, 0x14, 0xb9, 0xe6, - 0x07, 0x72, 0x89, 0x84, 0x3d, 0x60, 0x3a, 0x5c, 0x46, 0xae, 0xcd, 0x24, 0x0c, 0x59, 0x10, 0x8b, - 0x2c, 0x72, 0xd4, 0x2e, 0x5c, 0x51, 0xdc, 0x49, 0xea, 0x6b, 0x70, 0xa1, 0x2e, 0xce, 0xab, 0xdd, - 0x6e, 0x97, 0xec, 0xf3, 0xf5, 0x9c, 0x31, 0x79, 0x1d, 0x9e, 0x17, 0x13, 0xac, 0xf1, 0x24, 0x70, - 0x68, 0xd8, 0x46, 0xd4, 0x92, 0x7d, 0x01, 0x4f, 0x37, 0xe4, 0xa1, 0xf1, 0x65, 0xfe, 0x05, 0x3c, - 0x88, 0xdc, 0x68, 0x98, 0x17, 0xd0, 0x37, 0x9b, 0xd1, 0x53, 0xcf, 0xe6, 0x89, 0x96, 0x1f, 0xbf, - 0x48, 0x2f, 0x8b, 0xbc, 0x05, 0xa5, 0x46, 0xe4, 0x66, 0x83, 0x31, 0x94, 0x83, 0x79, 0x98, 0xb0, - 0x84, 0x39, 0x0f, 0x58, 0x14, 0xa5, 0xf1, 0xd1, 0xfe, 0xec, 0xc6, 0xf3, 0xb3, 0x06, 0x33, 0xc8, - 0xb6, 0x45, 0x63, 0x16, 0xc5, 0xca, 0x06, 0x05, 0x4e, 0xcf, 0x04, 0x26, 0x58, 0xe0, 0x88, 0xee, - 0xcf, 0xc2, 0x94, 0xe8, 0x5e, 0x9d, 0x27, 0x41, 0x2c, 0x5b, 0x0f, 0x78, 0xb4, 0x99, 0x9e, 0xf4, - 0x75, 0x70, 0xec, 0xd4, 0x1d, 0xfc, 0x55, 0x83, 0xab, 0x6a, 0x4a, 0xd9, 0x47, 0x1b, 0x5e, 0xf4, - 0xf1, 0x4a, 0x90, 0x56, 0x73, 0x4d, 0xbd, 0x3e, 0xb8, 0xa9, 0x5b, 0x5e, 0x14, 0xdb, 0x17, 0xfd, - 0xde, 0xd8, 0x67, 0xd7, 0xe3, 0xbb, 0x50, 0x46, 0xf8, 0x1d, 0xea, 0x7b, 0x0e, 0x8d, 0x79, 0xb8, - 0xe5, 0xed, 0xb2, 0x7a, 0xbb, 0xee, 0x67, 0xb5, 0x92, 0x2b, 0x30, 0xd1, 0xa2, 0x7e, 0x95, 0x3a, - 0x4e, 0x88, 0x4d, 0x9e, 0xb4, 0x9f, 0x6b, 0x51, 0xff, 0x5d, 0xc7, 0x09, 0x0d, 0x06, 0xb3, 0x85, - 0xce, 0xb2, 0xf8, 0x0d, 0xe1, 0xed, 0x7b, 0xbb, 0x4c, 0x6e, 0x90, 0x79, 0x65, 0xcd, 0x8a, 0x10, - 0x69, 0x9a, 0xf4, 0x7f, 0xc6, 0x3d, 0x99, 0xe6, 0x3d, 0xe6, 0x33, 0x17, 0xb1, 0x55, 0x90, 0x0e, - 0xeb, 0x85, 0x74, 0x98, 0x80, 0x74, 0x61, 0xae, 0xd8, 0x5b, 0x52, 0x6e, 0x0a, 0xf7, 0x1c, 0xe5, - 0x82, 0x92, 0x52, 0x15, 0x23, 0x4d, 0x84, 0x98, 0x5f, 0xe5, 0xb7, 0xdc, 0x0e, 0xf5, 0x3f, 0x61, - 0xf1, 0xff, 0xfa, 0x29, 0xff, 0xa1, 0xc9, 0x75, 0xd6, 0x03, 0x20, 0x2b, 0x7c, 0x1b, 0xa0, 0x95, - 0xb5, 0x38, 0x7b, 0x7d, 0xe5, 0xe3, 0x27, 0x61, 0xe7, 0x3c, 0xc8, 0x0d, 0x20, 0x31, 0x8f, 0xa9, - 0x5f, 0x6d, 0xf1, 0xd8, 0x0b, 0xdc, 0x6a, 0x93, 0x7f, 0xce, 0x42, 0x84, 0x1d, 0xb3, 0x5f, 0xc0, - 0x9b, 0x1d, 0xbc, 0xd8, 0x4e, 0xcf, 0xfb, 0x9e, 0xe7, 0xd8, 0xa9, 0x9f, 0xe7, 0xea, 0xc1, 0x14, - 0x9c, 0xc3, 0x9a, 0xc8, 0xd7, 0x1a, 0x8c, 0x0b, 0x05, 0x25, 0xf3, 0x45, 0x5f, 0x4d, 0x9f, 0x5c, - 0xeb, 0x0b, 0x83, 0x0d, 0x45, 0x4e, 0xe3, 0xda, 0x37, 0x7f, 0xfe, 0xfb, 0xfd, 0xe8, 0xab, 0x64, - 0xc6, 0x2a, 0xfe, 0xf5, 0x40, 0x7e, 0xd4, 0x60, 0xb2, 0xa3, 0xb8, 0x64, 0xb1, 0x38, 0x78, 0xbf, - 0x98, 0xeb, 0x4b, 0x43, 0xd9, 0x4a, 0x96, 0x0a, 0xb2, 0x2c, 0x91, 0x37, 0xac, 0xc2, 0xdf, 0x29, - 0x91, 0xf5, 0xb8, 0xf3, 0x9e, 0xde, 0x5a, 0xdc, 0x27, 0xdf, 0x6a, 0x00, 0x5d, 0x71, 0x25, 0x83, - 0xd2, 0xe5, 0x55, 0x5e, 0xbf, 0x31, 0x9c, 0xf1, 0x50, 0x8d, 0x92, 0x02, 0xfd, 0x44, 0x83, 0xf3, - 0x79, 0xdd, 0x24, 0xcb, 0xc5, 0x39, 0x14, 0xda, 0xab, 0x9b, 0xc3, 0x9a, 0x4b, 0xa8, 0x45, 0x84, - 0x7a, 0x8d, 0x18, 0x4a, 0xa8, 0x1e, 0xa5, 0x26, 0x3f, 0x65, 0x43, 0xc4, 0x3d, 0x3a, 0x68, 0x88, - 0x39, 0xb9, 0x19, 0x38, 0xc4, 0xfc, 0xd2, 0x37, 0xee, 0x20, 0xd2, 0x3a, 0x59, 0x1d, 0x7a, 0x88, - 0x56, 0x43, 0x2c, 0xfc, 0x88, 0xfc, 0xa2, 0xc1, 0xc5, 0x3e, 0x31, 0x21, 0x2b, 0xc5, 0xc9, 0xd5, - 0xea, 0xa8, 0x57, 0x4e, 0xe0, 0x21, 0xa1, 0xd7, 0x10, 0x7a, 0x99, 0x2c, 0x1d, 0x03, 0x7d, 0x47, - 0x48, 0x51, 0x97, 0xf6, 0x37, 0x0d, 0xc8, 0xb3, 0xdb, 0x9b, 0xac, 0x15, 0xa7, 0x2f, 0xd4, 0x1a, - 0x7d, 0xfd, 0x64, 0x4e, 0x12, 0xfb, 0x2e, 0x62, 0xdf, 0x24, 0x6b, 0x4a, 0xec, 0xce, 0x12, 0xc3, - 0xf5, 0x8e, 0x9e, 0xd6, 0xe3, 0x4c, 0xd1, 0xf6, 0xc9, 0xef, 0x1a, 0x4c, 0x2b, 0xd6, 0x3a, 0x39, - 0x06, 0xa5, 0x58, 0x87, 0xf4, 0x9b, 0x27, 0xf4, 0x92, 0x15, 0xdc, 0xc3, 0x0a, 0x6e, 0x91, 0x75, - 0x65, 0x05, 0x4e, 0xc7, 0x33, 0x5f, 0x42, 0xa6, 0x77, 0xfb, 0xe9, 0x7b, 0x99, 0xca, 0xed, 0x7c, - 0x32, 0xe8, 0x8b, 0xee, 0xd1, 0x26, 0x7d, 0x79, 0x48, 0x6b, 0x89, 0xfa, 0x0e, 0xa2, 0xde, 0x26, - 0x6f, 0x0e, 0xff, 0xb0, 0xbb, 0x13, 0x88, 0x58, 0xbc, 0x71, 0xff, 0xe9, 0x61, 0x59, 0x3b, 0x38, - 0x2c, 0x6b, 0xff, 0x1c, 0x96, 0xb5, 0xef, 0x8e, 0xca, 0x23, 0x07, 0x47, 0xe5, 0x91, 0xbf, 0x8e, - 0xca, 0x23, 0x9f, 0xae, 0xb8, 0x5e, 0xbc, 0x97, 0xd4, 0xcc, 0x3a, 0x6f, 0x64, 0xc1, 0xeb, 0x7b, - 0xd4, 0x0b, 0x3a, 0x99, 0xbe, 0xe8, 0xe6, 0x8a, 0xdb, 0x4d, 0x16, 0xd5, 0xc6, 0xf1, 0x8f, 0xb5, - 0xb5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x76, 0x01, 0x0c, 0x73, 0x8a, 0x0e, 0x00, 0x00, + // 1088 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x97, 0xcd, 0x6f, 0xdc, 0x44, + 0x14, 0xc0, 0xe3, 0x24, 0x0d, 0xed, 0x4b, 0x4b, 0x61, 0x52, 0x20, 0x75, 0xca, 0x26, 0x72, 0xa1, + 0x09, 0x49, 0x63, 0x67, 0x93, 0xb4, 0xa8, 0x1f, 0x80, 0x48, 0xf8, 0x10, 0x55, 0x8a, 0x52, 0x23, + 0xe5, 0xc0, 0x65, 0x35, 0xbb, 0x9e, 0x38, 0x96, 0xbc, 0x9e, 0xad, 0xc7, 0x5e, 0x58, 0x95, 0x20, + 0xc4, 0x99, 0x03, 0x12, 0x12, 0xa8, 0x17, 0x84, 0xc4, 0x91, 0x3f, 0x01, 0x0e, 0x1c, 0x7b, 0x0c, + 0xe2, 0xc2, 0x09, 0xa1, 0x84, 0x3f, 0x04, 0xf9, 0xcd, 0x78, 0xd7, 0xde, 0xda, 0xd9, 0x4d, 0x15, + 0x71, 0x4b, 0x66, 0xde, 0xc7, 0xef, 0x7d, 0xf8, 0xbd, 0x59, 0x98, 0xad, 0xd3, 0x7a, 0xc7, 0xe7, + 0x81, 0xc5, 0x5a, 0xbc, 0xb1, 0xe7, 0x05, 0xae, 0xd5, 0xae, 0x5a, 0x0f, 0x63, 0x16, 0x76, 0xcc, + 0x56, 0xc8, 0x23, 0x4e, 0xa6, 0x94, 0x80, 0x99, 0x0a, 0x98, 0xed, 0xaa, 0x7e, 0xc9, 0xe5, 0x2e, + 0xc7, 0x7b, 0x2b, 0xf9, 0x4b, 0x8a, 0xea, 0x57, 0x5c, 0xce, 0x5d, 0x9f, 0x59, 0xb4, 0xe5, 0x59, + 0x34, 0x08, 0x78, 0x44, 0x23, 0x8f, 0x07, 0x42, 0xdd, 0x2e, 0x36, 0xb8, 0x68, 0x72, 0x61, 0xd5, + 0xa9, 0x60, 0xd2, 0x83, 0xd5, 0xae, 0xd6, 0x59, 0x44, 0xab, 0x56, 0x8b, 0xba, 0x5e, 0x80, 0xc2, + 0x4a, 0x76, 0xae, 0x88, 0xaa, 0x45, 0x43, 0xda, 0x4c, 0xad, 0x19, 0x45, 0x12, 0x5d, 0x44, 0x94, + 0x31, 0x2e, 0x01, 0x79, 0x90, 0xf8, 0xd9, 0x46, 0x45, 0x9b, 0x3d, 0x8c, 0x99, 0x88, 0x8c, 0x6d, + 0x98, 0xca, 0x9d, 0x8a, 0x16, 0x0f, 0x04, 0x23, 0xb7, 0x60, 0x42, 0x3a, 0x98, 0xd6, 0xe6, 0xb4, + 0x85, 0xc9, 0xd5, 0x19, 0xb3, 0x20, 0x70, 0x53, 0x2a, 0x6d, 0x8c, 0x3f, 0xf9, 0x7b, 0x76, 0xc4, + 0x56, 0x0a, 0xc6, 0x3a, 0xbc, 0x84, 0x16, 0xdf, 0x4f, 0x04, 0x3f, 0x0a, 0x76, 0xb9, 0x72, 0x45, + 0x66, 0xe0, 0x1c, 0x2a, 0xd7, 0x82, 0xb8, 0x89, 0x66, 0xc7, 0xed, 0xb3, 0x78, 0xf0, 0x71, 0xdc, + 0x34, 0xee, 0xc1, 0xcb, 0xfd, 0x5a, 0x0a, 0x65, 0x05, 0xce, 0xa0, 0x94, 0x22, 0xd1, 0x0b, 0x49, + 0x50, 0xcd, 0x96, 0x82, 0xc6, 0x8f, 0x5a, 0xd6, 0x98, 0xc8, 0x32, 0xcc, 0xc2, 0xa4, 0x88, 0x68, + 0x18, 0xd5, 0x7a, 0x26, 0xc7, 0x6d, 0xc0, 0x23, 0x14, 0x46, 0xc8, 0xc0, 0x51, 0xd7, 0xa3, 0x0a, + 0x32, 0x70, 0xe4, 0xe5, 0x07, 0x00, 0xbd, 0xe2, 0x4c, 0x8f, 0x21, 0xcf, 0x35, 0x53, 0x56, 0xd2, + 0x4c, 0x2a, 0x69, 0xca, 0x5e, 0x51, 0x95, 0x34, 0xb7, 0xa9, 0xcb, 0x94, 0x67, 0x3b, 0xa3, 0x69, + 0x7c, 0xaf, 0xc1, 0x2b, 0x4f, 0x01, 0xaa, 0x70, 0x57, 0x61, 0x02, 0x9d, 0x27, 0x99, 0x1f, 0x1b, + 0x10, 0xaf, 0x92, 0x24, 0x1f, 0xe6, 0xb8, 0x46, 0x91, 0x6b, 0x7e, 0x20, 0x97, 0x74, 0x98, 0x03, + 0xd3, 0x61, 0x1a, 0xb9, 0x36, 0xe3, 0x30, 0x64, 0x81, 0x4c, 0x49, 0xda, 0x29, 0x2e, 0x5c, 0x2e, + 0xb8, 0x53, 0xd4, 0x57, 0xe1, 0x42, 0x43, 0x9e, 0xe7, 0x32, 0x7b, 0xbe, 0x91, 0x11, 0x26, 0xaf, + 0xc3, 0xf3, 0xb2, 0x01, 0xea, 0x3c, 0x0e, 0x1c, 0x1a, 0x76, 0x54, 0x82, 0x2f, 0xe0, 0xe9, 0x86, + 0x3a, 0x34, 0xbe, 0xc8, 0x36, 0xd0, 0x7d, 0xe1, 0x8a, 0x61, 0x1a, 0xa8, 0xaf, 0x36, 0xa3, 0xcf, + 0x5c, 0x9b, 0xc7, 0xb9, 0xe6, 0x91, 0xee, 0x55, 0x90, 0x37, 0x61, 0xbc, 0x29, 0xdc, 0xb4, 0x30, + 0x46, 0x61, 0x61, 0x1e, 0xc4, 0x2c, 0x66, 0xce, 0x7d, 0x26, 0x44, 0x62, 0x1f, 0xe5, 0x4f, 0xaf, + 0x3c, 0x3f, 0x6b, 0x30, 0x83, 0x6c, 0x5b, 0x34, 0x62, 0x22, 0x2a, 0x4c, 0x50, 0xb7, 0x79, 0xb5, + 0xbe, 0xe6, 0x9d, 0x85, 0x49, 0x99, 0xbd, 0x06, 0x8f, 0x83, 0x48, 0xa5, 0x1e, 0xf0, 0x68, 0x33, + 0x39, 0x39, 0xb5, 0xee, 0xfe, 0x55, 0x83, 0x2b, 0xc5, 0x94, 0x2a, 0x8f, 0x36, 0xbc, 0xe8, 0xe3, + 0x95, 0x24, 0xad, 0x65, 0x92, 0x7a, 0x6d, 0x70, 0x52, 0xb7, 0x3c, 0x11, 0xd9, 0x17, 0xfd, 0xbc, + 0xed, 0xd3, 0xcb, 0xf1, 0x1d, 0xa8, 0x20, 0xfc, 0x0e, 0xf5, 0x3d, 0x87, 0x46, 0x3c, 0xdc, 0xf2, + 0x76, 0x59, 0xa3, 0xd3, 0xf0, 0xd3, 0x58, 0xc9, 0x65, 0x38, 0xdb, 0xa6, 0x7e, 0x8d, 0x3a, 0x4e, + 0x88, 0x49, 0x3e, 0x67, 0x3f, 0xd7, 0xa6, 0xfe, 0xbb, 0x8e, 0x13, 0x1a, 0x0c, 0x66, 0x4b, 0x95, + 0x55, 0xf0, 0x1b, 0x52, 0xdb, 0xf7, 0x76, 0x99, 0x9a, 0x68, 0xf3, 0x85, 0x31, 0x17, 0x98, 0x48, + 0xdc, 0x24, 0xff, 0x19, 0x77, 0x95, 0x9b, 0xf7, 0x98, 0xcf, 0x5c, 0xc4, 0x2e, 0x82, 0x74, 0x58, + 0x1e, 0xd2, 0x61, 0x12, 0xd2, 0x85, 0xb9, 0x72, 0x6d, 0x45, 0xb9, 0x29, 0xd5, 0x33, 0x94, 0x0b, + 0x85, 0x94, 0x45, 0x36, 0x12, 0x47, 0x88, 0xf9, 0x65, 0x76, 0xca, 0xed, 0x50, 0xff, 0x13, 0x16, + 0xfd, 0xaf, 0x9f, 0xf2, 0x1f, 0x9a, 0x1a, 0x67, 0x39, 0x00, 0x15, 0xe1, 0xdb, 0x00, 0xed, 0x34, + 0xc5, 0x69, 0xf7, 0x55, 0x8e, 0xaf, 0x84, 0x9d, 0xd1, 0x20, 0xd7, 0x81, 0x44, 0x3c, 0xa2, 0x7e, + 0xad, 0xcd, 0x23, 0x2f, 0x70, 0x6b, 0x2d, 0xfe, 0x19, 0x0b, 0x11, 0x76, 0xcc, 0x7e, 0x01, 0x6f, + 0x76, 0xf0, 0x62, 0x3b, 0x39, 0xef, 0x6b, 0xcf, 0xb1, 0x67, 0x6e, 0xcf, 0xd5, 0x83, 0x49, 0x38, + 0x83, 0x31, 0x91, 0xaf, 0x34, 0x98, 0x90, 0x0b, 0x98, 0xcc, 0x97, 0x7d, 0x35, 0x7d, 0xdb, 0x5e, + 0x5f, 0x18, 0x2c, 0x28, 0x7d, 0x1a, 0x57, 0xbf, 0xfe, 0xf3, 0xdf, 0xef, 0x46, 0x5f, 0x25, 0x33, + 0x56, 0xf9, 0xe3, 0x83, 0xfc, 0xa0, 0xc1, 0xb9, 0xee, 0xc2, 0x26, 0x8b, 0xe5, 0xc6, 0xfb, 0xdf, + 0x02, 0xfa, 0xd2, 0x50, 0xb2, 0x8a, 0xa5, 0x8a, 0x2c, 0x4b, 0xe4, 0x0d, 0xab, 0xf4, 0x99, 0x23, + 0xac, 0x47, 0xdd, 0x7e, 0x7a, 0x6b, 0x71, 0x9f, 0x7c, 0xa3, 0x01, 0xf4, 0x96, 0x2b, 0x19, 0xe4, + 0x2e, 0xfb, 0x46, 0xd0, 0xaf, 0x0f, 0x27, 0x3c, 0x54, 0xa2, 0xd4, 0x82, 0x7e, 0xac, 0xc1, 0xf9, + 0xec, 0xde, 0x24, 0xcb, 0xe5, 0x3e, 0x0a, 0x76, 0xaf, 0x6e, 0x0e, 0x2b, 0xae, 0xa0, 0x16, 0x11, + 0xea, 0x35, 0x62, 0x14, 0x42, 0xe5, 0x36, 0x35, 0xf9, 0x29, 0x2d, 0x22, 0xce, 0xd1, 0x41, 0x45, + 0xcc, 0xac, 0x9b, 0x81, 0x45, 0xcc, 0x0e, 0x7d, 0xe3, 0x36, 0x22, 0xad, 0x93, 0xd5, 0xa1, 0x8b, + 0x68, 0x35, 0xe5, 0xc0, 0x17, 0xe4, 0x17, 0x0d, 0x2e, 0xf6, 0x2d, 0x13, 0xb2, 0x52, 0xee, 0xbc, + 0x78, 0x3b, 0xea, 0xd5, 0x13, 0x68, 0x28, 0xe8, 0x35, 0x84, 0x5e, 0x26, 0x4b, 0xc7, 0x40, 0xdf, + 0x96, 0xab, 0xa8, 0x47, 0xfb, 0x9b, 0x06, 0xe4, 0xe9, 0xe9, 0x4d, 0xd6, 0xca, 0xdd, 0x97, 0xee, + 0x1a, 0x7d, 0xfd, 0x64, 0x4a, 0x0a, 0xfb, 0x0e, 0x62, 0xdf, 0x20, 0x6b, 0x85, 0xd8, 0xdd, 0x21, + 0x86, 0xe3, 0x1d, 0x35, 0xad, 0x47, 0xe9, 0x46, 0xdb, 0x27, 0xbf, 0x6b, 0x30, 0x55, 0x30, 0xd6, + 0xc9, 0x31, 0x28, 0xe5, 0x7b, 0x48, 0xbf, 0x71, 0x42, 0x2d, 0x15, 0xc1, 0x5d, 0x8c, 0xe0, 0x26, + 0x59, 0x2f, 0x8c, 0xc0, 0xe9, 0x6a, 0x66, 0x43, 0x48, 0xf7, 0xdd, 0x7e, 0xd2, 0x2f, 0x93, 0x99, + 0x99, 0x4f, 0x06, 0x7d, 0xd1, 0xb9, 0xdd, 0xa4, 0x2f, 0x0f, 0x29, 0xad, 0x50, 0xdf, 0x41, 0xd4, + 0x5b, 0xe4, 0xcd, 0xe1, 0x1b, 0xbb, 0x57, 0x01, 0xc1, 0xa2, 0x8d, 0x7b, 0x4f, 0x0e, 0x2b, 0xda, + 0xc1, 0x61, 0x45, 0xfb, 0xe7, 0xb0, 0xa2, 0x7d, 0x7b, 0x54, 0x19, 0x39, 0x38, 0xaa, 0x8c, 0xfc, + 0x75, 0x54, 0x19, 0xf9, 0x74, 0xc5, 0xf5, 0xa2, 0xbd, 0xb8, 0x6e, 0x36, 0x78, 0x33, 0x35, 0xde, + 0xd8, 0xa3, 0x5e, 0xd0, 0xf5, 0xf4, 0x79, 0xcf, 0x57, 0xd4, 0x69, 0x31, 0x51, 0x9f, 0xc0, 0xdf, + 0x7a, 0x6b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x45, 0xf9, 0x53, 0x14, 0xc9, 0x0e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1541,7 +1558,17 @@ func (m *QueryEpochsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a + } + if m.EndEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EndEpoch)) + i-- + dAtA[i] = 0x10 + } + if m.StartEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.StartEpoch)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -2120,6 +2147,12 @@ func (m *QueryEpochsInfoRequest) Size() (n int) { } var l int _ = l + if m.StartEpoch != 0 { + n += 1 + sovQuery(uint64(m.StartEpoch)) + } + if m.EndEpoch != 0 { + n += 1 + sovQuery(uint64(m.EndEpoch)) + } if m.Pagination != nil { l = m.Pagination.Size() n += 1 + l + sovQuery(uint64(l)) @@ -2657,6 +2690,44 @@ func (m *QueryEpochsInfoRequest) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartEpoch", wireType) + } + m.StartEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndEpoch", wireType) + } + m.EndEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) } From 04d3dbb375265e7b75fdfe820e8464cd4db5dba7 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Tue, 31 Jan 2023 20:37:54 +1100 Subject: [PATCH 32/37] btccheckpoint API: enriching existing APIs with `BTCCheckpointInfo` (#294) --- client/docs/swagger-ui/swagger.yaml | 278 +++++-- .../babylon/btccheckpoint/btccheckpoint.proto | 32 +- proto/babylon/btccheckpoint/query.proto | 33 +- x/btccheckpoint/client/cli/query.go | 12 +- x/btccheckpoint/keeper/grpc_query.go | 66 +- x/btccheckpoint/keeper/msg_server_test.go | 2 +- x/btccheckpoint/types/btccheckpoint.pb.go | 724 +++++++++++++++--- x/btccheckpoint/types/query.pb.go | 701 ++++++----------- x/btccheckpoint/types/query.pb.gw.go | 62 +- x/btccheckpoint/types/types.go | 14 +- 10 files changed, 1173 insertions(+), 751 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index ecf1d71fa..df090159d 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -6,31 +6,55 @@ info: paths: /babylon/btccheckpoint/v1: get: - summary: >- - BtcCheckpointsHeightAndHash returns earliest block height and hash for a - range of epochs - operationId: BtcCheckpointsHeightAndHash + summary: BtcCheckpointsInfo returns checkpoint info for a range of epochs + operationId: BtcCheckpointsInfo responses: '200': description: A successful response. schema: type: object properties: - epoch_numbers: - type: array - items: - type: string - format: uint64 - earliest_btc_block_numbers: + info_list: type: array items: - type: string - format: uint64 - earliest_btc_block_hashes: - type: array - items: - type: string - format: byte + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: >- + height of earliest BTC block that includes this + checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted + from the checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the + submissions, calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses pagination: title: pagination defines the pagination in the response type: object @@ -61,8 +85,8 @@ paths: PageResponse page = 2; } title: >- - QueryBtcCheckpointsHeightAndHashResponse is response type for the - Query/BtcCheckpointsHeightAndHash RPC method + QueryBtcCheckpointsInfoResponse is response type for the + Query/BtcCheckpointsInfo RPC method default: description: An unexpected error response. schema: @@ -223,26 +247,54 @@ paths: - Query /babylon/btccheckpoint/v1/{epoch_num}: get: - summary: >- - BtcCheckpointHeightAndHash returns earliest block height and hash for - given rawcheckpoint - operationId: BtcCheckpointHeightAndHash + summary: BtcCheckpointInfo returns checkpoint info for a given epoch + operationId: BtcCheckpointInfo responses: '200': description: A successful response. schema: type: object properties: - earliest_btc_block_number: - type: string - format: uint64 - title: Earliest btc block number containing given raw checkpoint - earliest_btc_block_hash: - type: string - format: byte + info: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from + the checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the + submissions, calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses title: >- - QueryBtcCheckpointHeightAndHashResponse is response type for the - Query/BtcCheckpointHeightAndHash RPC method + QueryBtcCheckpointInfoResponse is response type for the + Query/BtcCheckpointInfo RPC method default: description: An unexpected error response. schema: @@ -9766,6 +9818,62 @@ paths: tags: - Query definitions: + babylon.btccheckpoint.v1.BTCCheckpointInfo: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the + checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, + calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses + babylon.btccheckpoint.v1.CheckpointAddresses: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the checkpoint + itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, calculated + from + + submission message MsgInsertBTCSpvProof itself babylon.btccheckpoint.v1.Params: type: object properties: @@ -9795,37 +9903,91 @@ definitions: (w in research paper) description: Params defines the parameters for the module. - babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashResponse: + babylon.btccheckpoint.v1.QueryBtcCheckpointInfoResponse: type: object properties: - earliest_btc_block_number: - type: string - format: uint64 - title: Earliest btc block number containing given raw checkpoint - earliest_btc_block_hash: - type: string - format: byte + info: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the + checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, + calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses title: >- - QueryBtcCheckpointHeightAndHashResponse is response type for the - Query/BtcCheckpointHeightAndHash RPC method - babylon.btccheckpoint.v1.QueryBtcCheckpointsHeightAndHashResponse: + QueryBtcCheckpointInfoResponse is response type for the + Query/BtcCheckpointInfo RPC method + babylon.btccheckpoint.v1.QueryBtcCheckpointsInfoResponse: type: object properties: - epoch_numbers: - type: array - items: - type: string - format: uint64 - earliest_btc_block_numbers: + info_list: type: array items: - type: string - format: uint64 - earliest_btc_block_hashes: - type: array - items: - type: string - format: byte + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the + checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, + calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses pagination: title: pagination defines the pagination in the response type: object @@ -9854,8 +10016,8 @@ definitions: PageResponse page = 2; } title: >- - QueryBtcCheckpointsHeightAndHashResponse is response type for the - Query/BtcCheckpointsHeightAndHash RPC method + QueryBtcCheckpointsInfoResponse is response type for the + Query/BtcCheckpointsInfo RPC method babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse: type: object properties: diff --git a/proto/babylon/btccheckpoint/btccheckpoint.proto b/proto/babylon/btccheckpoint/btccheckpoint.proto index 27b60a588..84067295c 100644 --- a/proto/babylon/btccheckpoint/btccheckpoint.proto +++ b/proto/babylon/btccheckpoint/btccheckpoint.proto @@ -98,19 +98,14 @@ message TransactionInfo { // depth/block number info, without context (i.e info about chain) is pretty useless // and blockshash in enough to retrieve is from lightclient message SubmissionData { - // TODO: this could probably be better typed - // Address of the vigiliatne which submitted the submissions, calculated from - // submission message itself - bytes vigilante_address = 1; - - // Address of the checkpoint submitter, extracted from the checkpoint itself. - bytes submitter_address = 2; + // address of the submitter and reporter + CheckpointAddresses vigilante_addresses = 1; // txs_info is the two `TransactionInfo`s corresponding to the submission // It is used for // - recovering address of sender of btc transction to payup the reward. // - allowing the ZoneConcierge module to prove the checkpoint is submitted to BTC - repeated TransactionInfo txs_info = 3; - uint64 epoch = 4; + repeated TransactionInfo txs_info = 2; + uint64 epoch = 3; } // Data stored in db and indexed by epoch number @@ -124,3 +119,22 @@ message EpochData { BtcStatus status = 2; } +message CheckpointAddresses { + // TODO: this could probably be better typed + // Address of the checkpoint submitter, extracted from the checkpoint itself. + bytes submitter = 1; + // Address of the reporter which reported the submissions, calculated from + // submission message MsgInsertBTCSpvProof itself + bytes reporter = 2; +} + +message BTCCheckpointInfo { + // epoch number of this checkpoint + uint64 epoch_number = 1; + // height of earliest BTC block that includes this checkpoint + uint64 earliest_btc_block_number = 2; + // hash of earliest BTC block that includes this checkpoint + bytes earliest_btc_block_hash = 3; + // list of vigilantes' addresses + repeated CheckpointAddresses vigilante_address_list = 4; +} diff --git a/proto/babylon/btccheckpoint/query.proto b/proto/babylon/btccheckpoint/query.proto index 33b1502fa..763f6640c 100644 --- a/proto/babylon/btccheckpoint/query.proto +++ b/proto/babylon/btccheckpoint/query.proto @@ -16,13 +16,13 @@ service Query { option (google.api.http).get = "/babylon/btccheckpoint/v1/params"; } - // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint - rpc BtcCheckpointHeightAndHash(QueryBtcCheckpointHeightAndHashRequest) returns (QueryBtcCheckpointHeightAndHashResponse) { + // BtcCheckpointInfo returns checkpoint info for a given epoch + rpc BtcCheckpointInfo(QueryBtcCheckpointInfoRequest) returns (QueryBtcCheckpointInfoResponse) { option (google.api.http).get = "/babylon/btccheckpoint/v1/{epoch_num}"; } - // BtcCheckpointsHeightAndHash returns earliest block height and hash for a range of epochs - rpc BtcCheckpointsHeightAndHash(QueryBtcCheckpointsHeightAndHashRequest) returns (QueryBtcCheckpointsHeightAndHashResponse) { + // BtcCheckpointsInfo returns checkpoint info for a range of epochs + rpc BtcCheckpointsInfo(QueryBtcCheckpointsInfoRequest) returns (QueryBtcCheckpointsInfoResponse) { option (google.api.http).get = "/babylon/btccheckpoint/v1"; } @@ -40,20 +40,18 @@ message QueryParamsResponse { Params params = 1 [ (gogoproto.nullable) = false ]; } -message QueryBtcCheckpointHeightAndHashRequest { +message QueryBtcCheckpointInfoRequest { // Number of epoch for which the earliest checkpointing btc height is requested uint64 epoch_num = 1; } -// QueryBtcCheckpointHeightAndHashResponse is response type for the Query/BtcCheckpointHeightAndHash RPC method -message QueryBtcCheckpointHeightAndHashResponse { - // Earliest btc block number containing given raw checkpoint - uint64 earliest_btc_block_number = 1; - bytes earliest_btc_block_hash = 2; +// QueryBtcCheckpointInfoResponse is response type for the Query/BtcCheckpointInfo RPC method +message QueryBtcCheckpointInfoResponse { + BTCCheckpointInfo info = 1; } -// QueryBtcCheckpointsHeightAndHashRequest is request type for the Query/BtcCheckpointsHeightAndHash RPC method -message QueryBtcCheckpointsHeightAndHashRequest { +// QueryBtcCheckpointsInfoRequest is request type for the Query/BtcCheckpointsInfo RPC method +message QueryBtcCheckpointsInfoRequest { uint64 start_epoch = 1; uint64 end_epoch = 2; @@ -61,14 +59,11 @@ message QueryBtcCheckpointsHeightAndHashRequest { cosmos.base.query.v1beta1.PageRequest pagination = 3; } -// QueryBtcCheckpointsHeightAndHashResponse is response type for the Query/BtcCheckpointsHeightAndHash RPC method -message QueryBtcCheckpointsHeightAndHashResponse { - repeated uint64 epoch_numbers = 1; - repeated uint64 earliest_btc_block_numbers = 2; - repeated bytes earliest_btc_block_hashes = 3; - +// QueryBtcCheckpointsInfoResponse is response type for the Query/BtcCheckpointsInfo RPC method +message QueryBtcCheckpointsInfoResponse { + repeated BTCCheckpointInfo info_list = 1; // pagination defines the pagination in the response - cosmos.base.query.v1beta1.PageResponse pagination = 4; + cosmos.base.query.v1beta1.PageResponse pagination = 2; } message QueryEpochSubmissionsRequest { diff --git a/x/btccheckpoint/client/cli/query.go b/x/btccheckpoint/client/cli/query.go index de284a7cc..73696536e 100644 --- a/x/btccheckpoint/client/cli/query.go +++ b/x/btccheckpoint/client/cli/query.go @@ -41,15 +41,15 @@ func CmdBtcCheckpointHeightAndHash() *cobra.Command { queryClient := types.NewQueryClient(clientCtx) - epoch_num, err := strconv.ParseUint(args[0], 10, 64) + epochNum, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err } - req := types.QueryBtcCheckpointHeightAndHashRequest{EpochNum: epoch_num} + req := types.QueryBtcCheckpointInfoRequest{EpochNum: epochNum} - resp, err := queryClient.BtcCheckpointHeightAndHash(context.Background(), &req) + resp, err := queryClient.BtcCheckpointInfo(context.Background(), &req) if err != nil { return err @@ -65,7 +65,7 @@ func CmdBtcCheckpointHeightAndHash() *cobra.Command { func CmdEpochSubmissions() *cobra.Command { cmd := &cobra.Command{ - Use: "epoch-submissions ", + Use: "epoch-submissions ", Short: "all checkpoint submissions for given epoch", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -73,7 +73,7 @@ func CmdEpochSubmissions() *cobra.Command { queryClient := types.NewQueryClient(clientCtx) - epoch_num, err := strconv.ParseUint(args[0], 10, 64) + epochNum, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err @@ -84,7 +84,7 @@ func CmdEpochSubmissions() *cobra.Command { return err } - params := types.QueryEpochSubmissionsRequest{EpochNum: epoch_num, Pagination: pageReq} + params := types.QueryEpochSubmissionsRequest{EpochNum: epochNum, Pagination: pageReq} res, err := queryClient.EpochSubmissions(context.Background(), ¶ms) diff --git a/x/btccheckpoint/keeper/grpc_query.go b/x/btccheckpoint/keeper/grpc_query.go index caf409287..3b1834d51 100644 --- a/x/btccheckpoint/keeper/grpc_query.go +++ b/x/btccheckpoint/keeper/grpc_query.go @@ -45,14 +45,16 @@ func (k Keeper) lowestBtcHeightAndHash(ctx sdk.Context, subKey *types.Submission return lowestHeaderNumber, lowestHeaderHash, nil } -func (k Keeper) lowestBtcHeightAndHashInKeys(ctx sdk.Context, subKeys []*types.SubmissionKey) (uint64, []byte, error) { +func (k Keeper) getCheckpointInfo(ctx sdk.Context, epochNum uint64, subKeys []*types.SubmissionKey) (*types.BTCCheckpointInfo, error) { if len(subKeys) == 0 { - return 0, nil, errors.New("empty subKeys") + return nil, errors.New("empty subKeys") } - // initializing to max, as then every header height will be smaller - var lowestHeaderNumber uint64 = math.MaxUint64 - var lowestHeaderHash []byte + info := types.BTCCheckpointInfo{ + EpochNumber: epochNum, + EarliestBtcBlockNumber: math.MaxUint64, // initializing to max, as then every header height will be smaller + VigilanteAddressList: []*types.CheckpointAddresses{}, + } for _, subKey := range subKeys { headerNumber, headerHash, err := k.lowestBtcHeightAndHash(ctx, subKey) @@ -61,20 +63,31 @@ func (k Keeper) lowestBtcHeightAndHashInKeys(ctx sdk.Context, subKeys []*types.S continue } - if headerNumber < lowestHeaderNumber { - lowestHeaderNumber = headerNumber - lowestHeaderHash = headerHash + // get vigilante address + sd := k.GetSubmissionData(ctx, *subKey) + if sd == nil { + // submission is not valid for some reason, ignore it + continue } + + // ensure lowest header number and hash + if headerNumber < info.EarliestBtcBlockNumber { + info.EarliestBtcBlockNumber = headerNumber + info.EarliestBtcBlockHash = headerHash + } + // append vigilante addresses + vAddrs := *sd.VigilanteAddresses // make a new copy + info.VigilanteAddressList = append(info.VigilanteAddressList, &vAddrs) } - if lowestHeaderNumber == math.MaxUint64 { - return 0, nil, errors.New("there is no valid submission for given raw checkpoint") + if info.EarliestBtcBlockNumber == math.MaxUint64 { + return nil, errors.New("there is no valid submission for given raw checkpoint") } - return lowestHeaderNumber, lowestHeaderHash, nil + return &info, nil } -func (k Keeper) BtcCheckpointHeightAndHash(c context.Context, req *types.QueryBtcCheckpointHeightAndHashRequest) (*types.QueryBtcCheckpointHeightAndHashResponse, error) { +func (k Keeper) BtcCheckpointInfo(c context.Context, req *types.QueryBtcCheckpointInfoRequest) (*types.QueryBtcCheckpointInfoResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") } @@ -90,19 +103,18 @@ func (k Keeper) BtcCheckpointHeightAndHash(c context.Context, req *types.QueryBt return nil, errors.New("checkpoint for given epoch not yet submitted") } - lowestHeaderNumber, lowestHeaderHash, err := k.lowestBtcHeightAndHashInKeys(ctx, epochData.Key) + ckptInfo, err := k.getCheckpointInfo(ctx, checkpointEpoch, epochData.Key) if err != nil { return nil, fmt.Errorf("failed to get lowest BTC height and hash in keys of epoch %d: %w", req.EpochNum, err) } - resp := &types.QueryBtcCheckpointHeightAndHashResponse{ - EarliestBtcBlockNumber: lowestHeaderNumber, - EarliestBtcBlockHash: lowestHeaderHash, + resp := &types.QueryBtcCheckpointInfoResponse{ + Info: ckptInfo, } return resp, nil } -func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryBtcCheckpointsHeightAndHashRequest) (*types.QueryBtcCheckpointsHeightAndHashResponse, error) { +func (k Keeper) BtcCheckpointsInfo(c context.Context, req *types.QueryBtcCheckpointsInfoRequest) (*types.QueryBtcCheckpointsInfoResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") } @@ -125,9 +137,7 @@ func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryB store := ctx.KVStore(k.storeKey) epochDataStore := prefix.NewStore(store, types.EpochDataPrefix) - epochNumbers := []uint64{} - btcNumbers := []uint64{} - btcHashes := [][]byte{} + ckptInfoList := []*types.BTCCheckpointInfo{} // iterate over epochDataStore, where key is the epoch number and value is the epoch data pageRes, err := query.Paginate(epochDataStore, req.Pagination, func(key, value []byte) error { epochNum := sdk.BigEndianToUint64(key) @@ -139,15 +149,13 @@ func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryB return errors.New("checkpoint for given epoch not yet submitted") } - lowestHeaderNumber, lowestHeaderHash, err := k.lowestBtcHeightAndHashInKeys(ctx, epochData.Key) + ckptInfo, err := k.getCheckpointInfo(ctx, epochNum, epochData.Key) if err != nil { return fmt.Errorf("failed to get lowest BTC height and hash in keys of epoch %d: %w", epochNum, err) } - // append all lists - btcNumbers = append(btcNumbers, lowestHeaderNumber) - btcHashes = append(btcHashes, lowestHeaderHash) - epochNumbers = append(epochNumbers, epochNum) + // append ckpt info + ckptInfoList = append(ckptInfoList, ckptInfo) return nil }) @@ -155,11 +163,9 @@ func (k Keeper) BtcCheckpointsHeightAndHash(c context.Context, req *types.QueryB return nil, status.Error(codes.Internal, err.Error()) } - resp := &types.QueryBtcCheckpointsHeightAndHashResponse{ - EpochNumbers: epochNumbers, - EarliestBtcBlockNumbers: btcNumbers, - EarliestBtcBlockHashes: btcHashes, - Pagination: pageRes, + resp := &types.QueryBtcCheckpointsInfoResponse{ + InfoList: ckptInfoList, + Pagination: pageRes, } return resp, nil } diff --git a/x/btccheckpoint/keeper/msg_server_test.go b/x/btccheckpoint/keeper/msg_server_test.go index e6983452e..d473af083 100644 --- a/x/btccheckpoint/keeper/msg_server_test.go +++ b/x/btccheckpoint/keeper/msg_server_test.go @@ -222,7 +222,7 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { t.Errorf("Submission data with invalid TransactionInfo") } - if !bytes.Equal(rawBtcCheckpoint.SubmitterAddress, submissionData.SubmitterAddress) { + if !bytes.Equal(rawBtcCheckpoint.SubmitterAddress, submissionData.VigilanteAddresses.Submitter) { t.Errorf("Submission data does not contain expected submitter address") } diff --git a/x/btccheckpoint/types/btccheckpoint.pb.go b/x/btccheckpoint/types/btccheckpoint.pb.go index 43054ceb5..f85ff42ef 100644 --- a/x/btccheckpoint/types/btccheckpoint.pb.go +++ b/x/btccheckpoint/types/btccheckpoint.pb.go @@ -322,18 +322,14 @@ func (m *TransactionInfo) GetProof() []byte { // depth/block number info, without context (i.e info about chain) is pretty useless // and blockshash in enough to retrieve is from lightclient type SubmissionData struct { - // TODO: this could probably be better typed - // Address of the vigiliatne which submitted the submissions, calculated from - // submission message itself - VigilanteAddress []byte `protobuf:"bytes,1,opt,name=vigilante_address,json=vigilanteAddress,proto3" json:"vigilante_address,omitempty"` - // Address of the checkpoint submitter, extracted from the checkpoint itself. - SubmitterAddress []byte `protobuf:"bytes,2,opt,name=submitter_address,json=submitterAddress,proto3" json:"submitter_address,omitempty"` + // address of the submitter and reporter + VigilanteAddresses *CheckpointAddresses `protobuf:"bytes,1,opt,name=vigilante_addresses,json=vigilanteAddresses,proto3" json:"vigilante_addresses,omitempty"` // txs_info is the two `TransactionInfo`s corresponding to the submission // It is used for // - recovering address of sender of btc transction to payup the reward. // - allowing the ZoneConcierge module to prove the checkpoint is submitted to BTC - TxsInfo []*TransactionInfo `protobuf:"bytes,3,rep,name=txs_info,json=txsInfo,proto3" json:"txs_info,omitempty"` - Epoch uint64 `protobuf:"varint,4,opt,name=epoch,proto3" json:"epoch,omitempty"` + TxsInfo []*TransactionInfo `protobuf:"bytes,2,rep,name=txs_info,json=txsInfo,proto3" json:"txs_info,omitempty"` + Epoch uint64 `protobuf:"varint,3,opt,name=epoch,proto3" json:"epoch,omitempty"` } func (m *SubmissionData) Reset() { *m = SubmissionData{} } @@ -369,16 +365,9 @@ func (m *SubmissionData) XXX_DiscardUnknown() { var xxx_messageInfo_SubmissionData proto.InternalMessageInfo -func (m *SubmissionData) GetVigilanteAddress() []byte { - if m != nil { - return m.VigilanteAddress - } - return nil -} - -func (m *SubmissionData) GetSubmitterAddress() []byte { +func (m *SubmissionData) GetVigilanteAddresses() *CheckpointAddresses { if m != nil { - return m.SubmitterAddress + return m.VigilanteAddresses } return nil } @@ -454,6 +443,134 @@ func (m *EpochData) GetStatus() BtcStatus { return Submitted } +type CheckpointAddresses struct { + // TODO: this could probably be better typed + // Address of the checkpoint submitter, extracted from the checkpoint itself. + Submitter []byte `protobuf:"bytes,1,opt,name=submitter,proto3" json:"submitter,omitempty"` + // Address of the reporter which reported the submissions, calculated from + // submission message MsgInsertBTCSpvProof itself + Reporter []byte `protobuf:"bytes,2,opt,name=reporter,proto3" json:"reporter,omitempty"` +} + +func (m *CheckpointAddresses) Reset() { *m = CheckpointAddresses{} } +func (m *CheckpointAddresses) String() string { return proto.CompactTextString(m) } +func (*CheckpointAddresses) ProtoMessage() {} +func (*CheckpointAddresses) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{6} +} +func (m *CheckpointAddresses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckpointAddresses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckpointAddresses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckpointAddresses) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckpointAddresses.Merge(m, src) +} +func (m *CheckpointAddresses) XXX_Size() int { + return m.Size() +} +func (m *CheckpointAddresses) XXX_DiscardUnknown() { + xxx_messageInfo_CheckpointAddresses.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckpointAddresses proto.InternalMessageInfo + +func (m *CheckpointAddresses) GetSubmitter() []byte { + if m != nil { + return m.Submitter + } + return nil +} + +func (m *CheckpointAddresses) GetReporter() []byte { + if m != nil { + return m.Reporter + } + return nil +} + +type BTCCheckpointInfo struct { + // epoch number of this checkpoint + EpochNumber uint64 `protobuf:"varint,1,opt,name=epoch_number,json=epochNumber,proto3" json:"epoch_number,omitempty"` + // height of earliest BTC block that includes this checkpoint + EarliestBtcBlockNumber uint64 `protobuf:"varint,2,opt,name=earliest_btc_block_number,json=earliestBtcBlockNumber,proto3" json:"earliest_btc_block_number,omitempty"` + // hash of earliest BTC block that includes this checkpoint + EarliestBtcBlockHash []byte `protobuf:"bytes,3,opt,name=earliest_btc_block_hash,json=earliestBtcBlockHash,proto3" json:"earliest_btc_block_hash,omitempty"` + // list of vigilantes' addresses + VigilanteAddressList []*CheckpointAddresses `protobuf:"bytes,4,rep,name=vigilante_address_list,json=vigilanteAddressList,proto3" json:"vigilante_address_list,omitempty"` +} + +func (m *BTCCheckpointInfo) Reset() { *m = BTCCheckpointInfo{} } +func (m *BTCCheckpointInfo) String() string { return proto.CompactTextString(m) } +func (*BTCCheckpointInfo) ProtoMessage() {} +func (*BTCCheckpointInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{7} +} +func (m *BTCCheckpointInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BTCCheckpointInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BTCCheckpointInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BTCCheckpointInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BTCCheckpointInfo.Merge(m, src) +} +func (m *BTCCheckpointInfo) XXX_Size() int { + return m.Size() +} +func (m *BTCCheckpointInfo) XXX_DiscardUnknown() { + xxx_messageInfo_BTCCheckpointInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_BTCCheckpointInfo proto.InternalMessageInfo + +func (m *BTCCheckpointInfo) GetEpochNumber() uint64 { + if m != nil { + return m.EpochNumber + } + return 0 +} + +func (m *BTCCheckpointInfo) GetEarliestBtcBlockNumber() uint64 { + if m != nil { + return m.EarliestBtcBlockNumber + } + return 0 +} + +func (m *BTCCheckpointInfo) GetEarliestBtcBlockHash() []byte { + if m != nil { + return m.EarliestBtcBlockHash + } + return nil +} + +func (m *BTCCheckpointInfo) GetVigilanteAddressList() []*CheckpointAddresses { + if m != nil { + return m.VigilanteAddressList + } + return nil +} + func init() { proto.RegisterEnum("babylon.btccheckpoint.v1.BtcStatus", BtcStatus_name, BtcStatus_value) proto.RegisterType((*BTCSpvProof)(nil), "babylon.btccheckpoint.v1.BTCSpvProof") @@ -462,6 +579,8 @@ func init() { proto.RegisterType((*TransactionInfo)(nil), "babylon.btccheckpoint.v1.TransactionInfo") proto.RegisterType((*SubmissionData)(nil), "babylon.btccheckpoint.v1.SubmissionData") proto.RegisterType((*EpochData)(nil), "babylon.btccheckpoint.v1.EpochData") + proto.RegisterType((*CheckpointAddresses)(nil), "babylon.btccheckpoint.v1.CheckpointAddresses") + proto.RegisterType((*BTCCheckpointInfo)(nil), "babylon.btccheckpoint.v1.BTCCheckpointInfo") } func init() { @@ -469,47 +588,55 @@ func init() { } var fileDescriptor_da8b9af3dbd18a36 = []byte{ - // 640 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x4f, 0x13, 0x41, - 0x14, 0xef, 0xd0, 0x82, 0x32, 0x85, 0x52, 0x17, 0x30, 0x1b, 0x0e, 0x4b, 0xad, 0x07, 0x8a, 0x26, - 0x6d, 0x44, 0x4d, 0xf0, 0xe3, 0xc2, 0xb6, 0x25, 0x34, 0xc8, 0x47, 0x76, 0x97, 0x0b, 0x97, 0xcd, - 0xec, 0xee, 0xb4, 0x3b, 0xa1, 0xdd, 0x69, 0x76, 0x06, 0xd2, 0x7a, 0x35, 0x26, 0xc6, 0x93, 0xf1, - 0xee, 0xc9, 0xff, 0xc5, 0x78, 0xe4, 0x68, 0x38, 0x10, 0x03, 0x7f, 0x86, 0x17, 0x33, 0x33, 0x4b, - 0x3f, 0x50, 0xa2, 0xdc, 0xf6, 0xbd, 0xf7, 0x7b, 0x1f, 0xbf, 0xdf, 0x7b, 0xb3, 0x70, 0xd5, 0x43, - 0x5e, 0xbf, 0x4d, 0xa3, 0x8a, 0xc7, 0x7d, 0x3f, 0xc4, 0xfe, 0x51, 0x97, 0x92, 0x88, 0x8f, 0x5b, - 0xe5, 0x6e, 0x4c, 0x39, 0xd5, 0xf4, 0x04, 0x5a, 0x1e, 0x0f, 0x9e, 0x3c, 0x59, 0x5a, 0x68, 0xd1, - 0x16, 0x95, 0xa0, 0x8a, 0xf8, 0x52, 0xf8, 0xe2, 0x2f, 0x00, 0xb3, 0xa6, 0x53, 0xb5, 0xbb, 0x27, - 0xfb, 0x31, 0xa5, 0x4d, 0x6d, 0x05, 0xce, 0x79, 0xdc, 0x77, 0x79, 0x8c, 0x22, 0x86, 0x7c, 0x4e, - 0x68, 0xa4, 0x83, 0x02, 0x28, 0xcd, 0x58, 0x39, 0x8f, 0xfb, 0xce, 0xd0, 0xab, 0xad, 0xc1, 0xc5, - 0x6b, 0x40, 0x97, 0x44, 0x01, 0xee, 0xe9, 0x13, 0x05, 0x50, 0x9a, 0xb5, 0xe6, 0xc7, 0xe1, 0x0d, - 0x11, 0xd2, 0x1e, 0xc0, 0x99, 0x0e, 0x8e, 0x8f, 0xda, 0xd8, 0x8d, 0x68, 0x80, 0x99, 0x9e, 0x96, - 0x95, 0xb3, 0xca, 0xb7, 0x2b, 0x5c, 0x5a, 0x1b, 0x2e, 0xfa, 0x34, 0x6a, 0x92, 0xb8, 0x43, 0xa2, - 0x96, 0x2b, 0x3a, 0x84, 0x18, 0x05, 0x38, 0xd6, 0x33, 0x02, 0x6b, 0xae, 0x9f, 0x9d, 0x2f, 0x3f, - 0x6b, 0x11, 0x1e, 0x1e, 0x7b, 0x65, 0x9f, 0x76, 0x2a, 0x09, 0x5b, 0x3f, 0x44, 0x24, 0xba, 0x32, - 0x2a, 0xbc, 0xdf, 0xc5, 0xac, 0x6c, 0x3a, 0xd5, 0x2d, 0x99, 0x6a, 0xf6, 0x39, 0x66, 0xd6, 0xfc, - 0xb0, 0xac, 0xc9, 0x7d, 0x15, 0x29, 0xf6, 0x60, 0x6e, 0x64, 0xc8, 0x6d, 0xdc, 0xd7, 0x16, 0xe0, - 0xa4, 0xa2, 0x01, 0x24, 0x0d, 0x65, 0x68, 0xfb, 0x30, 0x13, 0x22, 0x16, 0x4a, 0x6e, 0x33, 0xe6, - 0xeb, 0xb3, 0xf3, 0xe5, 0xf5, 0x5b, 0x0e, 0xb1, 0x85, 0x58, 0xa8, 0x06, 0x91, 0x95, 0x8a, 0xdb, - 0x70, 0xd6, 0x3e, 0xf6, 0x3a, 0x84, 0xb1, 0xa4, 0xf1, 0x4b, 0x98, 0x3e, 0xc2, 0x7d, 0x1d, 0x14, - 0xd2, 0xa5, 0xec, 0x5a, 0xa9, 0x7c, 0xd3, 0x1a, 0xcb, 0xe3, 0xf3, 0x5a, 0x22, 0xa9, 0xf8, 0x1e, - 0xc0, 0xb9, 0x31, 0xb1, 0x9b, 0x74, 0x58, 0x0f, 0xdc, 0xba, 0x9e, 0x56, 0x80, 0xd9, 0xd1, 0x03, - 0x98, 0x50, 0x6b, 0x1a, 0x71, 0x09, 0x99, 0xba, 0xe2, 0x5e, 0x92, 0x15, 0x2a, 0xa3, 0xf8, 0x0d, - 0xc0, 0xdc, 0x90, 0x55, 0x0d, 0x71, 0xa4, 0x3d, 0x86, 0xf7, 0x4e, 0x48, 0x8b, 0xb4, 0x51, 0xc4, - 0xb1, 0x8b, 0x82, 0x20, 0xc6, 0x8c, 0x25, 0x17, 0x95, 0x1f, 0x04, 0x36, 0x94, 0x5f, 0x80, 0x99, - 0x48, 0xe7, 0x1c, 0xc7, 0x03, 0xb0, 0xea, 0x9e, 0x1f, 0x04, 0xae, 0xc0, 0x35, 0x78, 0x97, 0xf7, - 0x98, 0x4b, 0xa2, 0x26, 0xd5, 0xd3, 0x52, 0xb5, 0xd5, 0xff, 0x62, 0x29, 0xd4, 0xb1, 0xee, 0xf0, - 0x1e, 0x93, 0x32, 0x2d, 0xc0, 0x49, 0xdc, 0xa5, 0x7e, 0x28, 0xef, 0x2b, 0x63, 0x29, 0xa3, 0xf8, - 0x0e, 0xc0, 0xe9, 0xba, 0xf8, 0x92, 0x1c, 0x5e, 0x8c, 0xae, 0x66, 0xe5, 0xe6, 0x26, 0x63, 0x0b, - 0x55, 0x4a, 0xbe, 0x82, 0x53, 0x8c, 0x23, 0x7e, 0xac, 0x68, 0xe4, 0xd6, 0x1e, 0xde, 0x9c, 0x6d, - 0x72, 0xdf, 0x96, 0x50, 0x2b, 0x49, 0x79, 0xf4, 0x19, 0xc0, 0xe9, 0x81, 0x57, 0x5b, 0x85, 0xf7, - 0xeb, 0xfb, 0x7b, 0xd5, 0x2d, 0xd7, 0x76, 0x36, 0x9c, 0x03, 0xdb, 0xb5, 0x0f, 0xcc, 0x9d, 0x86, - 0xe3, 0xd4, 0x6b, 0xf9, 0xd4, 0xd2, 0xec, 0xc7, 0x2f, 0x85, 0x69, 0x3b, 0x51, 0x28, 0xf8, 0x03, - 0x5a, 0xdd, 0xdb, 0xdd, 0x6c, 0x58, 0x3b, 0xf5, 0x5a, 0x1e, 0x28, 0x68, 0x55, 0xbd, 0x85, 0xbf, - 0x40, 0x37, 0x1b, 0xbb, 0x1b, 0x6f, 0x1a, 0x87, 0xf5, 0x5a, 0x7e, 0x42, 0x41, 0x37, 0x49, 0x84, - 0xda, 0xe4, 0x2d, 0x0e, 0x96, 0x32, 0x1f, 0xbe, 0x1a, 0x29, 0x73, 0xef, 0xfb, 0x85, 0x01, 0x4e, - 0x2f, 0x0c, 0xf0, 0xf3, 0xc2, 0x00, 0x9f, 0x2e, 0x8d, 0xd4, 0xe9, 0xa5, 0x91, 0xfa, 0x71, 0x69, - 0xa4, 0x0e, 0x9f, 0xff, 0xeb, 0x49, 0xf4, 0xae, 0xfd, 0xbf, 0xe4, 0x13, 0xf1, 0xa6, 0xe4, 0x8f, - 0xe8, 0xe9, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x52, 0x2f, 0x67, 0xe5, 0x04, 0x00, 0x00, + // 768 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcf, 0x4f, 0xdb, 0x48, + 0x14, 0x8e, 0x43, 0x60, 0xc9, 0x24, 0x04, 0x76, 0x12, 0x58, 0x6f, 0xb4, 0x0a, 0xc1, 0x7b, 0x20, + 0xac, 0xb4, 0x89, 0x96, 0x5d, 0x24, 0xd8, 0xdd, 0x0b, 0x4e, 0x82, 0x88, 0x80, 0x04, 0x39, 0xe6, + 0xc2, 0xa1, 0x96, 0xed, 0x4c, 0xe2, 0x51, 0x1c, 0x4f, 0xe4, 0x99, 0xa0, 0xa4, 0xd7, 0xaa, 0x52, + 0x55, 0xa9, 0x52, 0xd5, 0x7b, 0x4f, 0xfd, 0x67, 0x7a, 0xe8, 0x81, 0x63, 0xc5, 0x01, 0x55, 0xf0, + 0x67, 0xf4, 0x52, 0x79, 0xec, 0xfc, 0x84, 0xa8, 0xe5, 0xe6, 0xf7, 0xde, 0xf7, 0xde, 0xbc, 0xef, + 0x7b, 0x6f, 0xc6, 0x60, 0xc7, 0xd0, 0x8d, 0x81, 0x4d, 0x9c, 0x82, 0xc1, 0x4c, 0xd3, 0x42, 0x66, + 0xbb, 0x4b, 0xb0, 0xc3, 0xa6, 0xad, 0x7c, 0xd7, 0x25, 0x8c, 0x40, 0x31, 0x80, 0xe6, 0xa7, 0x83, + 0x57, 0x7f, 0xa5, 0x53, 0x2d, 0xd2, 0x22, 0x1c, 0x54, 0xf0, 0xbe, 0x7c, 0xbc, 0xf4, 0x55, 0x00, + 0x31, 0x59, 0x2d, 0xd6, 0xbb, 0x57, 0xe7, 0x2e, 0x21, 0x4d, 0xb8, 0x0d, 0x56, 0x0d, 0x66, 0x6a, + 0xcc, 0xd5, 0x1d, 0xaa, 0x9b, 0x0c, 0x13, 0x47, 0x14, 0xb2, 0x42, 0x2e, 0xae, 0x24, 0x0c, 0x66, + 0xaa, 0x63, 0x2f, 0xdc, 0x05, 0xeb, 0x33, 0x40, 0x0d, 0x3b, 0x0d, 0xd4, 0x17, 0xc3, 0x59, 0x21, + 0xb7, 0xa2, 0x24, 0xa7, 0xe1, 0x15, 0x2f, 0x04, 0xb7, 0x40, 0xbc, 0x83, 0xdc, 0xb6, 0x8d, 0x34, + 0x87, 0x34, 0x10, 0x15, 0x17, 0x78, 0xe5, 0x98, 0xef, 0xab, 0x7a, 0x2e, 0x68, 0x83, 0x75, 0x93, + 0x38, 0x4d, 0xec, 0x76, 0xb0, 0xd3, 0xd2, 0xbc, 0x13, 0x2c, 0xa4, 0x37, 0x90, 0x2b, 0x46, 0x3c, + 0xac, 0xbc, 0x7f, 0x73, 0xbb, 0xf9, 0x4f, 0x0b, 0x33, 0xab, 0x67, 0xe4, 0x4d, 0xd2, 0x29, 0x04, + 0x6c, 0x4d, 0x4b, 0xc7, 0xce, 0xd0, 0x28, 0xb0, 0x41, 0x17, 0xd1, 0xbc, 0xac, 0x16, 0x8f, 0x79, + 0xaa, 0x3c, 0x60, 0x88, 0x2a, 0xc9, 0x71, 0x59, 0x99, 0x99, 0x7e, 0x44, 0xea, 0x83, 0xc4, 0x44, + 0x93, 0x27, 0x68, 0x00, 0x53, 0x60, 0xd1, 0xa7, 0x21, 0x70, 0x1a, 0xbe, 0x01, 0xcf, 0x41, 0xc4, + 0xd2, 0xa9, 0xc5, 0xb9, 0xc5, 0xe5, 0xff, 0x6f, 0x6e, 0x37, 0xf7, 0x9f, 0xd8, 0xc4, 0xb1, 0x4e, + 0x2d, 0xbf, 0x11, 0x5e, 0x49, 0x3a, 0x01, 0x2b, 0xf5, 0x9e, 0xd1, 0xc1, 0x94, 0x06, 0x07, 0xff, + 0x0b, 0x16, 0xda, 0x68, 0x20, 0x0a, 0xd9, 0x85, 0x5c, 0x6c, 0x37, 0x97, 0x9f, 0x37, 0xc6, 0xfc, + 0x74, 0xbf, 0x8a, 0x97, 0x24, 0xbd, 0x14, 0xc0, 0xea, 0x94, 0xd8, 0x4d, 0x32, 0xae, 0x27, 0x3c, + 0xb9, 0x1e, 0xcc, 0x82, 0xd8, 0xe4, 0x02, 0x84, 0xfd, 0x31, 0x4d, 0xb8, 0x3c, 0x99, 0xba, 0xde, + 0xbe, 0x04, 0x23, 0xf4, 0x0d, 0xe9, 0x93, 0x00, 0x12, 0x63, 0x56, 0x25, 0x9d, 0xe9, 0xf0, 0x19, + 0x48, 0x5e, 0xe1, 0x16, 0xb6, 0x75, 0x87, 0x21, 0x4d, 0x6f, 0x34, 0x5c, 0x44, 0x29, 0xa2, 0x41, + 0x5b, 0x7f, 0xce, 0x6f, 0xab, 0x38, 0xb2, 0x0e, 0x87, 0x49, 0x0a, 0x1c, 0x55, 0x1a, 0xf9, 0x60, + 0x09, 0x2c, 0xb3, 0x3e, 0xd5, 0xb0, 0xd3, 0x24, 0x62, 0x98, 0x6b, 0xb7, 0xf3, 0x43, 0x5c, 0x3d, + 0x8d, 0x94, 0x9f, 0x58, 0x9f, 0x72, 0xb1, 0x52, 0x60, 0x11, 0x75, 0x89, 0x69, 0x71, 0x3a, 0x11, + 0xc5, 0x37, 0xa4, 0x17, 0x02, 0x88, 0x96, 0xbd, 0x2f, 0xce, 0xe4, 0x60, 0x72, 0x40, 0xdb, 0xf3, + 0x0f, 0x99, 0x1a, 0xab, 0xaf, 0xe7, 0x7f, 0x60, 0x89, 0x32, 0x9d, 0xf5, 0x28, 0x97, 0x32, 0xb1, + 0xfb, 0xfb, 0xfc, 0x6c, 0x99, 0x99, 0x75, 0x0e, 0x55, 0x82, 0x14, 0xa9, 0x06, 0x92, 0x8f, 0x88, + 0x01, 0x7f, 0x03, 0x51, 0xea, 0x9d, 0xc4, 0x18, 0x72, 0x83, 0x2b, 0x3a, 0x76, 0xc0, 0x34, 0x58, + 0x76, 0x51, 0x97, 0xb8, 0x5e, 0xd0, 0x1f, 0xdf, 0xc8, 0x96, 0xde, 0x84, 0xc1, 0xcf, 0xb2, 0x5a, + 0x1c, 0x17, 0xe5, 0x12, 0x6c, 0x81, 0x38, 0x67, 0xad, 0x39, 0xbd, 0x8e, 0x11, 0x94, 0x8c, 0x28, + 0x31, 0xee, 0xab, 0x72, 0x17, 0x3c, 0x00, 0xbf, 0x22, 0xdd, 0xb5, 0x31, 0xa2, 0x8c, 0xdf, 0x4c, + 0xc3, 0x26, 0x66, 0x7b, 0x88, 0x0f, 0x73, 0xfc, 0xc6, 0x10, 0x20, 0x33, 0x53, 0xf6, 0xc2, 0x41, + 0xea, 0x1e, 0xf8, 0xe5, 0x91, 0x54, 0x7e, 0xa7, 0xfc, 0x0d, 0x4a, 0xcd, 0x26, 0x7a, 0x17, 0x06, + 0x9a, 0x60, 0xe3, 0xc1, 0xf6, 0x68, 0x36, 0xa6, 0x4c, 0x8c, 0xf0, 0x31, 0x3c, 0x71, 0x81, 0x52, + 0xb3, 0x0b, 0x74, 0x8a, 0x29, 0xfb, 0xe3, 0x9d, 0x00, 0xa2, 0x23, 0xd9, 0xe1, 0x0e, 0xd8, 0x28, + 0x9f, 0xd7, 0x8a, 0xc7, 0x5a, 0x5d, 0x3d, 0x54, 0x2f, 0xea, 0x5a, 0xfd, 0x42, 0x3e, 0xab, 0xa8, + 0x6a, 0xb9, 0xb4, 0x16, 0x4a, 0xaf, 0xbc, 0x7e, 0x9f, 0x8d, 0xd6, 0x03, 0x91, 0x1b, 0x0f, 0xa0, + 0xc5, 0x5a, 0xf5, 0xa8, 0xa2, 0x9c, 0x95, 0x4b, 0x6b, 0x82, 0x0f, 0x2d, 0xfa, 0x4f, 0xce, 0x23, + 0xd0, 0xa3, 0x4a, 0xf5, 0xf0, 0xb4, 0x72, 0x59, 0x2e, 0xad, 0x85, 0x7d, 0xe8, 0x11, 0x76, 0x74, + 0x1b, 0x3f, 0x47, 0x8d, 0x74, 0xe4, 0xd5, 0x87, 0x4c, 0x48, 0xae, 0x7d, 0xbc, 0xcb, 0x08, 0xd7, + 0x77, 0x19, 0xe1, 0xcb, 0x5d, 0x46, 0x78, 0x7b, 0x9f, 0x09, 0x5d, 0xdf, 0x67, 0x42, 0x9f, 0xef, + 0x33, 0xa1, 0xcb, 0xbd, 0xef, 0xbd, 0x3c, 0xfd, 0x99, 0xdf, 0x04, 0x7f, 0x89, 0x8c, 0x25, 0xfe, + 0xde, 0xff, 0xfd, 0x2d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x76, 0xd4, 0x3c, 0x4c, 0x06, 0x00, 0x00, } func (m *BTCSpvProof) Marshal() (dAtA []byte, err error) { @@ -715,7 +842,7 @@ func (m *SubmissionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.Epoch != 0 { i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.Epoch)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x18 } if len(m.TxsInfo) > 0 { for iNdEx := len(m.TxsInfo) - 1; iNdEx >= 0; iNdEx-- { @@ -728,20 +855,18 @@ func (m *SubmissionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } } - if len(m.SubmitterAddress) > 0 { - i -= len(m.SubmitterAddress) - copy(dAtA[i:], m.SubmitterAddress) - i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.SubmitterAddress))) - i-- - dAtA[i] = 0x12 - } - if len(m.VigilanteAddress) > 0 { - i -= len(m.VigilanteAddress) - copy(dAtA[i:], m.VigilanteAddress) - i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.VigilanteAddress))) + if m.VigilanteAddresses != nil { + { + size, err := m.VigilanteAddresses.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } @@ -790,6 +915,97 @@ func (m *EpochData) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CheckpointAddresses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointAddresses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckpointAddresses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Reporter) > 0 { + i -= len(m.Reporter) + copy(dAtA[i:], m.Reporter) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Reporter))) + i-- + dAtA[i] = 0x12 + } + if len(m.Submitter) > 0 { + i -= len(m.Submitter) + copy(dAtA[i:], m.Submitter) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Submitter))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BTCCheckpointInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BTCCheckpointInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BTCCheckpointInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VigilanteAddressList) > 0 { + for iNdEx := len(m.VigilanteAddressList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VigilanteAddressList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.EarliestBtcBlockHash) > 0 { + i -= len(m.EarliestBtcBlockHash) + copy(dAtA[i:], m.EarliestBtcBlockHash) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.EarliestBtcBlockHash))) + i-- + dAtA[i] = 0x1a + } + if m.EarliestBtcBlockNumber != 0 { + i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.EarliestBtcBlockNumber)) + i-- + dAtA[i] = 0x10 + } + if m.EpochNumber != 0 { + i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.EpochNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintBtccheckpoint(dAtA []byte, offset int, v uint64) int { offset -= sovBtccheckpoint(v) base := offset @@ -883,12 +1099,8 @@ func (m *SubmissionData) Size() (n int) { } var l int _ = l - l = len(m.VigilanteAddress) - if l > 0 { - n += 1 + l + sovBtccheckpoint(uint64(l)) - } - l = len(m.SubmitterAddress) - if l > 0 { + if m.VigilanteAddresses != nil { + l = m.VigilanteAddresses.Size() n += 1 + l + sovBtccheckpoint(uint64(l)) } if len(m.TxsInfo) > 0 { @@ -921,6 +1133,48 @@ func (m *EpochData) Size() (n int) { return n } +func (m *CheckpointAddresses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Submitter) + if l > 0 { + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + l = len(m.Reporter) + if l > 0 { + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + return n +} + +func (m *BTCCheckpointInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNumber != 0 { + n += 1 + sovBtccheckpoint(uint64(m.EpochNumber)) + } + if m.EarliestBtcBlockNumber != 0 { + n += 1 + sovBtccheckpoint(uint64(m.EarliestBtcBlockNumber)) + } + l = len(m.EarliestBtcBlockHash) + if l > 0 { + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + if len(m.VigilanteAddressList) > 0 { + for _, e := range m.VigilanteAddressList { + l = e.Size() + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + } + return n +} + func sovBtccheckpoint(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1472,9 +1726,9 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VigilanteAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VigilanteAddresses", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowBtccheckpoint @@ -1484,31 +1738,33 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthBtccheckpoint } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthBtccheckpoint } if postIndex > l { return io.ErrUnexpectedEOF } - m.VigilanteAddress = append(m.VigilanteAddress[:0], dAtA[iNdEx:postIndex]...) - if m.VigilanteAddress == nil { - m.VigilanteAddress = []byte{} + if m.VigilanteAddresses == nil { + m.VigilanteAddresses = &CheckpointAddresses{} + } + if err := m.VigilanteAddresses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubmitterAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TxsInfo", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowBtccheckpoint @@ -1518,29 +1774,98 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthBtccheckpoint } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthBtccheckpoint } if postIndex > l { return io.ErrUnexpectedEOF } - m.SubmitterAddress = append(m.SubmitterAddress[:0], dAtA[iNdEx:postIndex]...) - if m.SubmitterAddress == nil { - m.SubmitterAddress = []byte{} + m.TxsInfo = append(m.TxsInfo, &TransactionInfo{}) + if err := m.TxsInfo[len(m.TxsInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + } + m.Epoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Epoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EpochData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TxsInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1567,16 +1892,16 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TxsInfo = append(m.TxsInfo, &TransactionInfo{}) - if err := m.TxsInfo[len(m.TxsInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Key = append(m.Key, &SubmissionKey{}) + if err := m.Key[len(m.Key)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.Epoch = 0 + m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowBtccheckpoint @@ -1586,7 +1911,7 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Epoch |= uint64(b&0x7F) << shift + m.Status |= BtcStatus(b&0x7F) << shift if b < 0x80 { break } @@ -1612,7 +1937,7 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { } return nil } -func (m *EpochData) Unmarshal(dAtA []byte) error { +func (m *CheckpointAddresses) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1635,17 +1960,17 @@ func (m *EpochData) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EpochData: wiretype end group for non-group") + return fmt.Errorf("proto: CheckpointAddresses: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EpochData: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CheckpointAddresses: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Submitter", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowBtccheckpoint @@ -1655,31 +1980,134 @@ func (m *EpochData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthBtccheckpoint } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthBtccheckpoint } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = append(m.Key, &SubmissionKey{}) - if err := m.Key[len(m.Key)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Submitter = append(m.Submitter[:0], dAtA[iNdEx:postIndex]...) + if m.Submitter == nil { + m.Submitter = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reporter", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reporter = append(m.Reporter[:0], dAtA[iNdEx:postIndex]...) + if m.Reporter == nil { + m.Reporter = []byte{} } iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BTCCheckpointInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BTCCheckpointInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BTCCheckpointInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNumber", wireType) + } + m.EpochNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockNumber", wireType) } - m.Status = 0 + m.EarliestBtcBlockNumber = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowBtccheckpoint @@ -1689,11 +2117,79 @@ func (m *EpochData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Status |= BtcStatus(b&0x7F) << shift + m.EarliestBtcBlockNumber |= uint64(b&0x7F) << shift if b < 0x80 { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EarliestBtcBlockHash = append(m.EarliestBtcBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.EarliestBtcBlockHash == nil { + m.EarliestBtcBlockHash = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VigilanteAddressList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VigilanteAddressList = append(m.VigilanteAddressList, &CheckpointAddresses{}) + if err := m.VigilanteAddressList[len(m.VigilanteAddressList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) diff --git a/x/btccheckpoint/types/query.pb.go b/x/btccheckpoint/types/query.pb.go index 859f6aa50..d7ccced04 100644 --- a/x/btccheckpoint/types/query.pb.go +++ b/x/btccheckpoint/types/query.pb.go @@ -113,25 +113,23 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } -type QueryBtcCheckpointHeightAndHashRequest struct { +type QueryBtcCheckpointInfoRequest struct { // Number of epoch for which the earliest checkpointing btc height is requested EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` } -func (m *QueryBtcCheckpointHeightAndHashRequest) Reset() { - *m = QueryBtcCheckpointHeightAndHashRequest{} -} -func (m *QueryBtcCheckpointHeightAndHashRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointHeightAndHashRequest) ProtoMessage() {} -func (*QueryBtcCheckpointHeightAndHashRequest) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointInfoRequest) Reset() { *m = QueryBtcCheckpointInfoRequest{} } +func (m *QueryBtcCheckpointInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointInfoRequest) ProtoMessage() {} +func (*QueryBtcCheckpointInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{2} } -func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -141,46 +139,42 @@ func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Marshal(b []byte, determini return b[:n], nil } } -func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest.Merge(m, src) +func (m *QueryBtcCheckpointInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointInfoRequest.Merge(m, src) } -func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_Size() int { +func (m *QueryBtcCheckpointInfoRequest) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointHeightAndHashRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest.DiscardUnknown(m) +func (m *QueryBtcCheckpointInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointHeightAndHashRequest proto.InternalMessageInfo +var xxx_messageInfo_QueryBtcCheckpointInfoRequest proto.InternalMessageInfo -func (m *QueryBtcCheckpointHeightAndHashRequest) GetEpochNum() uint64 { +func (m *QueryBtcCheckpointInfoRequest) GetEpochNum() uint64 { if m != nil { return m.EpochNum } return 0 } -// QueryBtcCheckpointHeightAndHashResponse is response type for the Query/BtcCheckpointHeightAndHash RPC method -type QueryBtcCheckpointHeightAndHashResponse struct { - // Earliest btc block number containing given raw checkpoint - EarliestBtcBlockNumber uint64 `protobuf:"varint,1,opt,name=earliest_btc_block_number,json=earliestBtcBlockNumber,proto3" json:"earliest_btc_block_number,omitempty"` - EarliestBtcBlockHash []byte `protobuf:"bytes,2,opt,name=earliest_btc_block_hash,json=earliestBtcBlockHash,proto3" json:"earliest_btc_block_hash,omitempty"` +// QueryBtcCheckpointInfoResponse is response type for the Query/BtcCheckpointInfo RPC method +type QueryBtcCheckpointInfoResponse struct { + Info *BTCCheckpointInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *QueryBtcCheckpointHeightAndHashResponse) Reset() { - *m = QueryBtcCheckpointHeightAndHashResponse{} -} -func (m *QueryBtcCheckpointHeightAndHashResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointHeightAndHashResponse) ProtoMessage() {} -func (*QueryBtcCheckpointHeightAndHashResponse) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointInfoResponse) Reset() { *m = QueryBtcCheckpointInfoResponse{} } +func (m *QueryBtcCheckpointInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointInfoResponse) ProtoMessage() {} +func (*QueryBtcCheckpointInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{3} } -func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -190,54 +184,45 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Marshal(b []byte, determin return b[:n], nil } } -func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse.Merge(m, src) +func (m *QueryBtcCheckpointInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointInfoResponse.Merge(m, src) } -func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_Size() int { +func (m *QueryBtcCheckpointInfoResponse) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointHeightAndHashResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse.DiscardUnknown(m) +func (m *QueryBtcCheckpointInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointHeightAndHashResponse proto.InternalMessageInfo - -func (m *QueryBtcCheckpointHeightAndHashResponse) GetEarliestBtcBlockNumber() uint64 { - if m != nil { - return m.EarliestBtcBlockNumber - } - return 0 -} +var xxx_messageInfo_QueryBtcCheckpointInfoResponse proto.InternalMessageInfo -func (m *QueryBtcCheckpointHeightAndHashResponse) GetEarliestBtcBlockHash() []byte { +func (m *QueryBtcCheckpointInfoResponse) GetInfo() *BTCCheckpointInfo { if m != nil { - return m.EarliestBtcBlockHash + return m.Info } return nil } -// QueryBtcCheckpointsHeightAndHashRequest is request type for the Query/BtcCheckpointsHeightAndHash RPC method -type QueryBtcCheckpointsHeightAndHashRequest struct { +// QueryBtcCheckpointsInfoRequest is request type for the Query/BtcCheckpointsInfo RPC method +type QueryBtcCheckpointsInfoRequest struct { StartEpoch uint64 `protobuf:"varint,1,opt,name=start_epoch,json=startEpoch,proto3" json:"start_epoch,omitempty"` EndEpoch uint64 `protobuf:"varint,2,opt,name=end_epoch,json=endEpoch,proto3" json:"end_epoch,omitempty"` // pagination defines whether to have the pagination in the response Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` } -func (m *QueryBtcCheckpointsHeightAndHashRequest) Reset() { - *m = QueryBtcCheckpointsHeightAndHashRequest{} -} -func (m *QueryBtcCheckpointsHeightAndHashRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointsHeightAndHashRequest) ProtoMessage() {} -func (*QueryBtcCheckpointsHeightAndHashRequest) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointsInfoRequest) Reset() { *m = QueryBtcCheckpointsInfoRequest{} } +func (m *QueryBtcCheckpointsInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointsInfoRequest) ProtoMessage() {} +func (*QueryBtcCheckpointsInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{4} } -func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointsInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointsInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -247,62 +232,58 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Marshal(b []byte, determin return b[:n], nil } } -func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest.Merge(m, src) +func (m *QueryBtcCheckpointsInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointsInfoRequest.Merge(m, src) } -func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_Size() int { +func (m *QueryBtcCheckpointsInfoRequest) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointsHeightAndHashRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest.DiscardUnknown(m) +func (m *QueryBtcCheckpointsInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointsInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointsHeightAndHashRequest proto.InternalMessageInfo +var xxx_messageInfo_QueryBtcCheckpointsInfoRequest proto.InternalMessageInfo -func (m *QueryBtcCheckpointsHeightAndHashRequest) GetStartEpoch() uint64 { +func (m *QueryBtcCheckpointsInfoRequest) GetStartEpoch() uint64 { if m != nil { return m.StartEpoch } return 0 } -func (m *QueryBtcCheckpointsHeightAndHashRequest) GetEndEpoch() uint64 { +func (m *QueryBtcCheckpointsInfoRequest) GetEndEpoch() uint64 { if m != nil { return m.EndEpoch } return 0 } -func (m *QueryBtcCheckpointsHeightAndHashRequest) GetPagination() *query.PageRequest { +func (m *QueryBtcCheckpointsInfoRequest) GetPagination() *query.PageRequest { if m != nil { return m.Pagination } return nil } -// QueryBtcCheckpointsHeightAndHashResponse is response type for the Query/BtcCheckpointsHeightAndHash RPC method -type QueryBtcCheckpointsHeightAndHashResponse struct { - EpochNumbers []uint64 `protobuf:"varint,1,rep,packed,name=epoch_numbers,json=epochNumbers,proto3" json:"epoch_numbers,omitempty"` - EarliestBtcBlockNumbers []uint64 `protobuf:"varint,2,rep,packed,name=earliest_btc_block_numbers,json=earliestBtcBlockNumbers,proto3" json:"earliest_btc_block_numbers,omitempty"` - EarliestBtcBlockHashes [][]byte `protobuf:"bytes,3,rep,name=earliest_btc_block_hashes,json=earliestBtcBlockHashes,proto3" json:"earliest_btc_block_hashes,omitempty"` +// QueryBtcCheckpointsInfoResponse is response type for the Query/BtcCheckpointsInfo RPC method +type QueryBtcCheckpointsInfoResponse struct { + InfoList []*BTCCheckpointInfo `protobuf:"bytes,1,rep,name=info_list,json=infoList,proto3" json:"info_list,omitempty"` // pagination defines the pagination in the response - Pagination *query.PageResponse `protobuf:"bytes,4,opt,name=pagination,proto3" json:"pagination,omitempty"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } -func (m *QueryBtcCheckpointsHeightAndHashResponse) Reset() { - *m = QueryBtcCheckpointsHeightAndHashResponse{} -} -func (m *QueryBtcCheckpointsHeightAndHashResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointsHeightAndHashResponse) ProtoMessage() {} -func (*QueryBtcCheckpointsHeightAndHashResponse) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointsInfoResponse) Reset() { *m = QueryBtcCheckpointsInfoResponse{} } +func (m *QueryBtcCheckpointsInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointsInfoResponse) ProtoMessage() {} +func (*QueryBtcCheckpointsInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{5} } -func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointsInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointsInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -312,40 +293,26 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Marshal(b []byte, determi return b[:n], nil } } -func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse.Merge(m, src) +func (m *QueryBtcCheckpointsInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointsInfoResponse.Merge(m, src) } -func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_Size() int { +func (m *QueryBtcCheckpointsInfoResponse) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointsHeightAndHashResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse.DiscardUnknown(m) +func (m *QueryBtcCheckpointsInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointsInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointsHeightAndHashResponse proto.InternalMessageInfo +var xxx_messageInfo_QueryBtcCheckpointsInfoResponse proto.InternalMessageInfo -func (m *QueryBtcCheckpointsHeightAndHashResponse) GetEpochNumbers() []uint64 { +func (m *QueryBtcCheckpointsInfoResponse) GetInfoList() []*BTCCheckpointInfo { if m != nil { - return m.EpochNumbers + return m.InfoList } return nil } -func (m *QueryBtcCheckpointsHeightAndHashResponse) GetEarliestBtcBlockNumbers() []uint64 { - if m != nil { - return m.EarliestBtcBlockNumbers - } - return nil -} - -func (m *QueryBtcCheckpointsHeightAndHashResponse) GetEarliestBtcBlockHashes() [][]byte { - if m != nil { - return m.EarliestBtcBlockHashes - } - return nil -} - -func (m *QueryBtcCheckpointsHeightAndHashResponse) GetPagination() *query.PageResponse { +func (m *QueryBtcCheckpointsInfoResponse) GetPagination() *query.PageResponse { if m != nil { return m.Pagination } @@ -461,10 +428,10 @@ func (m *QueryEpochSubmissionsResponse) GetPagination() *query.PageResponse { func init() { proto.RegisterType((*QueryParamsRequest)(nil), "babylon.btccheckpoint.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.btccheckpoint.v1.QueryParamsResponse") - proto.RegisterType((*QueryBtcCheckpointHeightAndHashRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashRequest") - proto.RegisterType((*QueryBtcCheckpointHeightAndHashResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightAndHashResponse") - proto.RegisterType((*QueryBtcCheckpointsHeightAndHashRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsHeightAndHashRequest") - proto.RegisterType((*QueryBtcCheckpointsHeightAndHashResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsHeightAndHashResponse") + proto.RegisterType((*QueryBtcCheckpointInfoRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointInfoRequest") + proto.RegisterType((*QueryBtcCheckpointInfoResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointInfoResponse") + proto.RegisterType((*QueryBtcCheckpointsInfoRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsInfoRequest") + proto.RegisterType((*QueryBtcCheckpointsInfoResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsInfoResponse") proto.RegisterType((*QueryEpochSubmissionsRequest)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsRequest") proto.RegisterType((*QueryEpochSubmissionsResponse)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse") } @@ -472,53 +439,48 @@ func init() { func init() { proto.RegisterFile("babylon/btccheckpoint/query.proto", fileDescriptor_009c1165ec392ace) } var fileDescriptor_009c1165ec392ace = []byte{ - // 733 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x4f, 0x13, 0x41, - 0x14, 0xee, 0x6c, 0x0b, 0xd1, 0x01, 0x13, 0x33, 0x12, 0x29, 0x0b, 0x96, 0xb2, 0x46, 0xa8, 0x46, - 0x76, 0x53, 0x08, 0x1a, 0x42, 0x62, 0xa4, 0x06, 0x25, 0x31, 0x41, 0x5c, 0xe3, 0xc5, 0x4b, 0x33, - 0xbb, 0x4c, 0x76, 0x37, 0xb4, 0x3b, 0xcb, 0xce, 0x94, 0xd8, 0x18, 0x2f, 0xfa, 0x07, 0x48, 0xe2, - 0xd1, 0xb3, 0x67, 0xff, 0x03, 0xaf, 0x72, 0x24, 0xf1, 0xe2, 0xc9, 0x18, 0xf0, 0x7f, 0xf0, 0x6a, - 0x66, 0x76, 0x5a, 0x6c, 0xd9, 0xb5, 0xfc, 0xb8, 0x6d, 0x66, 0xbe, 0xf7, 0xbe, 0xef, 0x7d, 0xef, - 0xcd, 0x5b, 0x38, 0xe3, 0x60, 0xa7, 0xdd, 0xa0, 0xa1, 0xe5, 0x70, 0xd7, 0xf5, 0x89, 0xbb, 0x1d, - 0xd1, 0x20, 0xe4, 0xd6, 0x4e, 0x8b, 0xc4, 0x6d, 0x33, 0x8a, 0x29, 0xa7, 0xa8, 0xa8, 0x20, 0x66, - 0x0f, 0xc4, 0xdc, 0xad, 0xea, 0x63, 0x1e, 0xf5, 0xa8, 0x04, 0x59, 0xe2, 0x2b, 0xc1, 0xeb, 0x53, - 0x1e, 0xa5, 0x5e, 0x83, 0x58, 0x38, 0x0a, 0x2c, 0x1c, 0x86, 0x94, 0x63, 0x1e, 0xd0, 0x90, 0xa9, - 0xdb, 0x3b, 0x2e, 0x65, 0x4d, 0xca, 0x2c, 0x07, 0x33, 0x92, 0xd0, 0x58, 0xbb, 0x55, 0x87, 0x70, - 0x5c, 0xb5, 0x22, 0xec, 0x05, 0xa1, 0x04, 0x2b, 0xac, 0x91, 0x2e, 0x2e, 0xc2, 0x31, 0x6e, 0x76, - 0xf2, 0xdd, 0x4e, 0xc7, 0xf4, 0x6a, 0x95, 0x50, 0x63, 0x0c, 0xa2, 0xe7, 0x82, 0x70, 0x53, 0xc6, - 0xdb, 0x64, 0xa7, 0x45, 0x18, 0x37, 0x5e, 0xc2, 0x6b, 0x3d, 0xa7, 0x2c, 0xa2, 0x21, 0x23, 0xe8, - 0x01, 0x1c, 0x4e, 0x78, 0x8a, 0xa0, 0x0c, 0x2a, 0x23, 0x0b, 0x65, 0x33, 0xcb, 0x06, 0x33, 0x89, - 0xac, 0x15, 0xf6, 0x7f, 0x4e, 0xe7, 0x6c, 0x15, 0x65, 0xac, 0xc1, 0x59, 0x99, 0xb6, 0xc6, 0xdd, - 0x47, 0x5d, 0xf4, 0x3a, 0x09, 0x3c, 0x9f, 0xaf, 0x86, 0x5b, 0xeb, 0x98, 0xf9, 0x4a, 0x00, 0x9a, - 0x84, 0x97, 0x49, 0x44, 0x5d, 0xbf, 0x1e, 0xb6, 0x9a, 0x92, 0xac, 0x60, 0x5f, 0x92, 0x07, 0x1b, - 0xad, 0xa6, 0xf1, 0x09, 0xc0, 0xb9, 0x81, 0x79, 0x94, 0xe4, 0x65, 0x38, 0x41, 0x70, 0xdc, 0x08, - 0x08, 0xe3, 0x75, 0x87, 0xbb, 0x75, 0xa7, 0x41, 0xdd, 0x6d, 0x91, 0xd5, 0x21, 0xb1, 0x4a, 0x7c, - 0xbd, 0x03, 0xa8, 0x71, 0xb7, 0x26, 0xae, 0x37, 0xe4, 0x2d, 0x5a, 0x82, 0xe3, 0x29, 0xa1, 0x3e, - 0x66, 0x7e, 0x51, 0x2b, 0x83, 0xca, 0xa8, 0x3d, 0xd6, 0x1f, 0x28, 0x98, 0x8d, 0x2f, 0xa9, 0xea, - 0x58, 0x6a, 0x99, 0xd3, 0x70, 0x84, 0x71, 0x1c, 0xf3, 0xba, 0xac, 0x4d, 0xe9, 0x81, 0xf2, 0x68, - 0x4d, 0x9c, 0x48, 0x1f, 0xc2, 0x2d, 0x75, 0xad, 0x29, 0x1f, 0xc2, 0xad, 0xe4, 0xf2, 0x31, 0x84, - 0xc7, 0xe3, 0x51, 0xcc, 0xcb, 0x96, 0xcc, 0x9a, 0xc9, 0x2c, 0x99, 0x62, 0x96, 0xcc, 0x64, 0x64, - 0xd5, 0x2c, 0x99, 0x9b, 0xd8, 0x23, 0x8a, 0xd9, 0xfe, 0x27, 0xd2, 0xd8, 0xd3, 0x60, 0x65, 0xb0, - 0x62, 0x65, 0xe8, 0x4d, 0x78, 0xa5, 0xdb, 0x19, 0x87, 0xc4, 0x62, 0x14, 0xf2, 0x95, 0x82, 0x3d, - 0xda, 0xe9, 0x8e, 0x38, 0x43, 0x2b, 0x50, 0xcf, 0x74, 0x9d, 0x15, 0x35, 0x19, 0x31, 0x9e, 0x6e, - 0x3b, 0xcb, 0x68, 0x99, 0xf0, 0x9d, 0xb0, 0x62, 0xbe, 0x9c, 0xaf, 0x8c, 0x9e, 0x6c, 0xd9, 0xba, - 0xbc, 0x45, 0x4f, 0x7a, 0x1c, 0x29, 0x48, 0x47, 0xe6, 0x06, 0x3a, 0x92, 0x54, 0xd6, 0x63, 0xc9, - 0x7b, 0x00, 0xa7, 0xa4, 0x25, 0xd2, 0xe9, 0x17, 0x2d, 0xa7, 0x19, 0x30, 0x26, 0x5e, 0xec, 0x69, - 0x06, 0xb4, 0xaf, 0x31, 0xda, 0xb9, 0x1b, 0xf3, 0x19, 0xc0, 0x1b, 0x19, 0x2a, 0x54, 0x37, 0x56, - 0x60, 0x61, 0x9b, 0xb4, 0x93, 0x26, 0x88, 0x52, 0x33, 0xdf, 0xe3, 0x71, 0xf0, 0x53, 0xd2, 0xb6, - 0x65, 0x50, 0x9f, 0x5b, 0xda, 0xb9, 0xdd, 0x5a, 0xf8, 0x33, 0x04, 0x87, 0xa4, 0x4e, 0xf4, 0x01, - 0xc0, 0xe1, 0xe4, 0xe9, 0xa3, 0xbb, 0xd9, 0x62, 0x4e, 0x6e, 0x1c, 0x7d, 0xfe, 0x94, 0xe8, 0x84, - 0xdd, 0xa8, 0xbc, 0xfb, 0xfe, 0xfb, 0xa3, 0x66, 0xa0, 0xb2, 0x95, 0xbe, 0xea, 0x76, 0xab, 0x6a, - 0x23, 0xa2, 0x03, 0x00, 0xf5, 0xec, 0x3d, 0x81, 0x1e, 0x0e, 0xe0, 0x1d, 0xb8, 0xaa, 0xf4, 0xd5, - 0x0b, 0x64, 0x50, 0xd5, 0xcc, 0xcb, 0x6a, 0xe6, 0xd0, 0xad, 0xec, 0x6a, 0xde, 0x74, 0xa7, 0xed, - 0x2d, 0xfa, 0x06, 0xe0, 0xe4, 0x7f, 0x9e, 0x2a, 0x3a, 0x93, 0xa2, 0xd4, 0xc5, 0xa4, 0xd7, 0x2e, - 0x92, 0x42, 0x55, 0x35, 0x23, 0xab, 0x9a, 0x44, 0x13, 0x99, 0x55, 0xa1, 0xaf, 0x00, 0x5e, 0xed, - 0x9f, 0x6d, 0x74, 0x6f, 0x00, 0x77, 0xc6, 0x93, 0xd4, 0xef, 0x9f, 0x39, 0x4e, 0x09, 0x5d, 0x96, - 0x42, 0x17, 0x51, 0xf5, 0x54, 0xf6, 0x5b, 0xec, 0x38, 0x45, 0xed, 0xd9, 0xfe, 0x61, 0x09, 0x1c, - 0x1c, 0x96, 0xc0, 0xaf, 0xc3, 0x12, 0xd8, 0x3b, 0x2a, 0xe5, 0x0e, 0x8e, 0x4a, 0xb9, 0x1f, 0x47, - 0xa5, 0xdc, 0xab, 0x25, 0x2f, 0xe0, 0x7e, 0xcb, 0x31, 0x5d, 0xda, 0xec, 0xa4, 0x75, 0x7d, 0x1c, - 0x84, 0x5d, 0x8e, 0xd7, 0x7d, 0x2c, 0xbc, 0x1d, 0x11, 0xe6, 0x0c, 0xcb, 0xdf, 0xf2, 0xe2, 0xdf, - 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xe7, 0xea, 0xb4, 0x84, 0x08, 0x00, 0x00, + // 653 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x4d, 0x6f, 0xd3, 0x30, + 0x18, 0xc7, 0xeb, 0xae, 0x9b, 0x36, 0xef, 0x02, 0x66, 0x87, 0x92, 0x8d, 0xac, 0x8b, 0x04, 0x1d, + 0x2f, 0x4b, 0xd4, 0x4d, 0x30, 0x26, 0x10, 0x48, 0x9d, 0x78, 0x13, 0x08, 0x46, 0x81, 0x0b, 0x97, + 0xca, 0xc9, 0xbc, 0x34, 0x5a, 0x63, 0x67, 0xb5, 0x53, 0x51, 0x21, 0x2e, 0xf0, 0x01, 0x40, 0xe2, + 0x33, 0xc0, 0x89, 0x23, 0x5c, 0x91, 0xb8, 0xed, 0x38, 0x89, 0x0b, 0x27, 0x84, 0x5a, 0x3e, 0x08, + 0x8a, 0xe3, 0xf5, 0x3d, 0x6a, 0x3b, 0x71, 0xab, 0x9c, 0xe7, 0xef, 0xff, 0xcf, 0x7f, 0x3f, 0x8f, + 0x0b, 0x57, 0x6c, 0x6c, 0x37, 0xaa, 0x8c, 0x5a, 0xb6, 0x70, 0x9c, 0x0a, 0x71, 0xf6, 0x03, 0xe6, + 0x51, 0x61, 0x1d, 0x84, 0xa4, 0xd6, 0x30, 0x83, 0x1a, 0x13, 0x0c, 0x65, 0x55, 0x89, 0xd9, 0x53, + 0x62, 0xd6, 0x0b, 0xda, 0x82, 0xcb, 0x5c, 0x26, 0x8b, 0xac, 0xe8, 0x57, 0x5c, 0xaf, 0x2d, 0xb9, + 0x8c, 0xb9, 0x55, 0x62, 0xe1, 0xc0, 0xb3, 0x30, 0xa5, 0x4c, 0x60, 0xe1, 0x31, 0xca, 0xd5, 0xd7, + 0x4b, 0x0e, 0xe3, 0x3e, 0xe3, 0x96, 0x8d, 0x39, 0x89, 0x6d, 0xac, 0x7a, 0xc1, 0x26, 0x02, 0x17, + 0xac, 0x00, 0xbb, 0x1e, 0x95, 0xc5, 0xaa, 0xd6, 0x18, 0x0e, 0x17, 0xe0, 0x1a, 0xf6, 0x8f, 0xf7, + 0xbb, 0x38, 0xbc, 0xa6, 0x97, 0x55, 0x96, 0x1a, 0x0b, 0x10, 0x3d, 0x8d, 0x0c, 0x77, 0xa4, 0xbe, + 0x44, 0x0e, 0x42, 0xc2, 0x85, 0xf1, 0x02, 0x9e, 0xe9, 0x59, 0xe5, 0x01, 0xa3, 0x9c, 0xa0, 0x5b, + 0x70, 0x26, 0xf6, 0xc9, 0x82, 0x1c, 0x58, 0x9d, 0x5f, 0xcf, 0x99, 0x49, 0x31, 0x98, 0xb1, 0xb2, + 0x98, 0x39, 0xfc, 0xbd, 0x9c, 0x2a, 0x29, 0x95, 0x71, 0x13, 0x9e, 0x93, 0xdb, 0x16, 0x85, 0xb3, + 0xdd, 0xae, 0x7e, 0x40, 0xf7, 0x98, 0xf2, 0x45, 0x8b, 0x70, 0x8e, 0x04, 0xcc, 0xa9, 0x94, 0x69, + 0xe8, 0x4b, 0x8f, 0x4c, 0x69, 0x56, 0x2e, 0x3c, 0x0e, 0x7d, 0x03, 0x43, 0x3d, 0x49, 0xad, 0xf8, + 0x6e, 0xc3, 0x8c, 0x47, 0xf7, 0x98, 0xa2, 0xbb, 0x9c, 0x4c, 0x57, 0x7c, 0xbe, 0xdd, 0xb7, 0x85, + 0x14, 0x1a, 0x9f, 0xc1, 0x30, 0x0f, 0xde, 0x8d, 0xb8, 0x0c, 0xe7, 0xb9, 0xc0, 0x35, 0x51, 0x96, + 0x5c, 0x0a, 0x12, 0xca, 0xa5, 0x3b, 0xd1, 0x8a, 0x3c, 0x03, 0xdd, 0x55, 0x9f, 0xd3, 0xea, 0x0c, + 0x74, 0x37, 0xfe, 0x78, 0x17, 0xc2, 0xce, 0x8d, 0x66, 0xa7, 0x24, 0xe7, 0x05, 0x33, 0xbe, 0x7e, + 0x33, 0xba, 0x7e, 0x33, 0xee, 0x32, 0x75, 0xfd, 0xe6, 0x0e, 0x76, 0x89, 0x72, 0x2e, 0x75, 0x29, + 0x8d, 0xaf, 0x00, 0x2e, 0x27, 0x82, 0xaa, 0x34, 0xee, 0xc3, 0xb9, 0xe8, 0x50, 0xe5, 0xaa, 0xc7, + 0x45, 0x16, 0xe4, 0xa6, 0x26, 0x8d, 0x64, 0x36, 0x52, 0x3f, 0xf2, 0xb8, 0x40, 0xf7, 0x7a, 0xa8, + 0xd3, 0x92, 0x3a, 0x3f, 0x92, 0x3a, 0xc6, 0xe8, 0xc1, 0x7e, 0x07, 0xe0, 0x92, 0xc4, 0x96, 0x69, + 0x3c, 0x0b, 0x6d, 0xdf, 0xe3, 0x3c, 0x1a, 0x84, 0x71, 0x1a, 0xa0, 0x2f, 0xbc, 0xf4, 0x89, 0xc3, + 0xfb, 0x04, 0x54, 0x1f, 0x0e, 0x52, 0xa8, 0xe8, 0x6e, 0xc0, 0xcc, 0x3e, 0x69, 0x70, 0x95, 0x5a, + 0x3e, 0x39, 0xb5, 0x8e, 0xf8, 0x21, 0x69, 0x94, 0xa4, 0xe8, 0xbf, 0xa5, 0xb5, 0xfe, 0x63, 0x1a, + 0x4e, 0x4b, 0x4e, 0xf4, 0x1e, 0xc0, 0x99, 0x78, 0xa2, 0xd0, 0x95, 0x64, 0x98, 0xc1, 0x41, 0xd6, + 0xd6, 0xc6, 0xac, 0x8e, 0xdd, 0x8d, 0xd5, 0xb7, 0x3f, 0xff, 0x7e, 0x4c, 0x1b, 0x28, 0x67, 0x0d, + 0x7f, 0x41, 0xea, 0x05, 0xf5, 0xd0, 0xa0, 0x6f, 0x00, 0x9e, 0x1e, 0x18, 0x44, 0xb4, 0x39, 0xc2, + 0x2e, 0x69, 0xf0, 0xb5, 0xeb, 0x93, 0x0b, 0x15, 0xf2, 0x9a, 0x44, 0xce, 0xa3, 0xf3, 0xc9, 0xc8, + 0xaf, 0xdb, 0x2d, 0xf5, 0x06, 0x7d, 0x01, 0x10, 0x0d, 0xce, 0x0c, 0x9a, 0xc8, 0xbf, 0xfb, 0x3d, + 0xd0, 0xb6, 0x4e, 0xa0, 0x54, 0xe8, 0x2b, 0x12, 0x7d, 0x11, 0x9d, 0x4d, 0x44, 0x47, 0xdf, 0x01, + 0x3c, 0xd5, 0xdf, 0xa5, 0xe8, 0xda, 0x08, 0xcb, 0x84, 0xe1, 0xd2, 0x36, 0x27, 0xd6, 0x29, 0xd0, + 0x2d, 0x09, 0xba, 0x81, 0x0a, 0x63, 0x65, 0x6c, 0xf1, 0xce, 0x16, 0xc5, 0x27, 0x87, 0x4d, 0x1d, + 0x1c, 0x35, 0x75, 0xf0, 0xa7, 0xa9, 0x83, 0x0f, 0x2d, 0x3d, 0x75, 0xd4, 0xd2, 0x53, 0xbf, 0x5a, + 0x7a, 0xea, 0xe5, 0x55, 0xd7, 0x13, 0x95, 0xd0, 0x36, 0x1d, 0xe6, 0x1f, 0x6f, 0xeb, 0x54, 0xb0, + 0x47, 0xdb, 0x1e, 0xaf, 0xfa, 0x5c, 0x44, 0x23, 0x20, 0xdc, 0x9e, 0x91, 0xff, 0x5b, 0x1b, 0xff, + 0x02, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x86, 0x0a, 0xe9, 0xa5, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -535,10 +497,10 @@ const _ = grpc.SupportPackageIsVersion4 type QueryClient interface { // Parameters queries the parameters of the module. Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) - // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint - BtcCheckpointHeightAndHash(ctx context.Context, in *QueryBtcCheckpointHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightAndHashResponse, error) - // BtcCheckpointsHeightAndHash returns earliest block height and hash for a range of epochs - BtcCheckpointsHeightAndHash(ctx context.Context, in *QueryBtcCheckpointsHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsHeightAndHashResponse, error) + // BtcCheckpointInfo returns checkpoint info for a given epoch + BtcCheckpointInfo(ctx context.Context, in *QueryBtcCheckpointInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointInfoResponse, error) + // BtcCheckpointsInfo returns checkpoint info for a range of epochs + BtcCheckpointsInfo(ctx context.Context, in *QueryBtcCheckpointsInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsInfoResponse, error) EpochSubmissions(ctx context.Context, in *QueryEpochSubmissionsRequest, opts ...grpc.CallOption) (*QueryEpochSubmissionsResponse, error) } @@ -559,18 +521,18 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts . return out, nil } -func (c *queryClient) BtcCheckpointHeightAndHash(ctx context.Context, in *QueryBtcCheckpointHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightAndHashResponse, error) { - out := new(QueryBtcCheckpointHeightAndHashResponse) - err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeightAndHash", in, out, opts...) +func (c *queryClient) BtcCheckpointInfo(ctx context.Context, in *QueryBtcCheckpointInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointInfoResponse, error) { + out := new(QueryBtcCheckpointInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *queryClient) BtcCheckpointsHeightAndHash(ctx context.Context, in *QueryBtcCheckpointsHeightAndHashRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsHeightAndHashResponse, error) { - out := new(QueryBtcCheckpointsHeightAndHashResponse) - err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointsHeightAndHash", in, out, opts...) +func (c *queryClient) BtcCheckpointsInfo(ctx context.Context, in *QueryBtcCheckpointsInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsInfoResponse, error) { + out := new(QueryBtcCheckpointsInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointsInfo", in, out, opts...) if err != nil { return nil, err } @@ -590,10 +552,10 @@ func (c *queryClient) EpochSubmissions(ctx context.Context, in *QueryEpochSubmis type QueryServer interface { // Parameters queries the parameters of the module. Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) - // BtcCheckpointHeightAndHash returns earliest block height and hash for given rawcheckpoint - BtcCheckpointHeightAndHash(context.Context, *QueryBtcCheckpointHeightAndHashRequest) (*QueryBtcCheckpointHeightAndHashResponse, error) - // BtcCheckpointsHeightAndHash returns earliest block height and hash for a range of epochs - BtcCheckpointsHeightAndHash(context.Context, *QueryBtcCheckpointsHeightAndHashRequest) (*QueryBtcCheckpointsHeightAndHashResponse, error) + // BtcCheckpointInfo returns checkpoint info for a given epoch + BtcCheckpointInfo(context.Context, *QueryBtcCheckpointInfoRequest) (*QueryBtcCheckpointInfoResponse, error) + // BtcCheckpointsInfo returns checkpoint info for a range of epochs + BtcCheckpointsInfo(context.Context, *QueryBtcCheckpointsInfoRequest) (*QueryBtcCheckpointsInfoResponse, error) EpochSubmissions(context.Context, *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) } @@ -604,11 +566,11 @@ type UnimplementedQueryServer struct { func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") } -func (*UnimplementedQueryServer) BtcCheckpointHeightAndHash(ctx context.Context, req *QueryBtcCheckpointHeightAndHashRequest) (*QueryBtcCheckpointHeightAndHashResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointHeightAndHash not implemented") +func (*UnimplementedQueryServer) BtcCheckpointInfo(ctx context.Context, req *QueryBtcCheckpointInfoRequest) (*QueryBtcCheckpointInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointInfo not implemented") } -func (*UnimplementedQueryServer) BtcCheckpointsHeightAndHash(ctx context.Context, req *QueryBtcCheckpointsHeightAndHashRequest) (*QueryBtcCheckpointsHeightAndHashResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointsHeightAndHash not implemented") +func (*UnimplementedQueryServer) BtcCheckpointsInfo(ctx context.Context, req *QueryBtcCheckpointsInfoRequest) (*QueryBtcCheckpointsInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointsInfo not implemented") } func (*UnimplementedQueryServer) EpochSubmissions(ctx context.Context, req *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EpochSubmissions not implemented") @@ -636,38 +598,38 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _Query_BtcCheckpointHeightAndHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBtcCheckpointHeightAndHashRequest) +func _Query_BtcCheckpointInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBtcCheckpointInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).BtcCheckpointHeightAndHash(ctx, in) + return srv.(QueryServer).BtcCheckpointInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeightAndHash", + FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).BtcCheckpointHeightAndHash(ctx, req.(*QueryBtcCheckpointHeightAndHashRequest)) + return srv.(QueryServer).BtcCheckpointInfo(ctx, req.(*QueryBtcCheckpointInfoRequest)) } return interceptor(ctx, in, info, handler) } -func _Query_BtcCheckpointsHeightAndHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBtcCheckpointsHeightAndHashRequest) +func _Query_BtcCheckpointsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBtcCheckpointsInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).BtcCheckpointsHeightAndHash(ctx, in) + return srv.(QueryServer).BtcCheckpointsInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointsHeightAndHash", + FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointsInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).BtcCheckpointsHeightAndHash(ctx, req.(*QueryBtcCheckpointsHeightAndHashRequest)) + return srv.(QueryServer).BtcCheckpointsInfo(ctx, req.(*QueryBtcCheckpointsInfoRequest)) } return interceptor(ctx, in, info, handler) } @@ -699,12 +661,12 @@ var _Query_serviceDesc = grpc.ServiceDesc{ Handler: _Query_Params_Handler, }, { - MethodName: "BtcCheckpointHeightAndHash", - Handler: _Query_BtcCheckpointHeightAndHash_Handler, + MethodName: "BtcCheckpointInfo", + Handler: _Query_BtcCheckpointInfo_Handler, }, { - MethodName: "BtcCheckpointsHeightAndHash", - Handler: _Query_BtcCheckpointsHeightAndHash_Handler, + MethodName: "BtcCheckpointsInfo", + Handler: _Query_BtcCheckpointsInfo_Handler, }, { MethodName: "EpochSubmissions", @@ -771,7 +733,7 @@ func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointHeightAndHashRequest) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -781,12 +743,12 @@ func (m *QueryBtcCheckpointHeightAndHashRequest) Marshal() (dAtA []byte, err err return dAtA[:n], nil } -func (m *QueryBtcCheckpointHeightAndHashRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointHeightAndHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -799,7 +761,7 @@ func (m *QueryBtcCheckpointHeightAndHashRequest) MarshalToSizedBuffer(dAtA []byt return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointHeightAndHashResponse) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -809,32 +771,32 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) Marshal() (dAtA []byte, err er return dAtA[:n], nil } -func (m *QueryBtcCheckpointHeightAndHashResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointHeightAndHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.EarliestBtcBlockHash) > 0 { - i -= len(m.EarliestBtcBlockHash) - copy(dAtA[i:], m.EarliestBtcBlockHash) - i = encodeVarintQuery(dAtA, i, uint64(len(m.EarliestBtcBlockHash))) - i-- - dAtA[i] = 0x12 - } - if m.EarliestBtcBlockNumber != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.EarliestBtcBlockNumber)) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointsHeightAndHashRequest) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointsInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -844,12 +806,12 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) Marshal() (dAtA []byte, err er return dAtA[:n], nil } -func (m *QueryBtcCheckpointsHeightAndHashRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointsInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointsHeightAndHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -879,7 +841,7 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) MarshalToSizedBuffer(dAtA []by return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointsHeightAndHashResponse) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointsInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -889,12 +851,12 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) Marshal() (dAtA []byte, err e return dAtA[:n], nil } -func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointsInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -909,52 +871,21 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) MarshalToSizedBuffer(dAtA []b i = encodeVarintQuery(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 - } - if len(m.EarliestBtcBlockHashes) > 0 { - for iNdEx := len(m.EarliestBtcBlockHashes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EarliestBtcBlockHashes[iNdEx]) - copy(dAtA[i:], m.EarliestBtcBlockHashes[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.EarliestBtcBlockHashes[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.EarliestBtcBlockNumbers) > 0 { - dAtA5 := make([]byte, len(m.EarliestBtcBlockNumbers)*10) - var j4 int - for _, num := range m.EarliestBtcBlockNumbers { - for num >= 1<<7 { - dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j4++ - } - dAtA5[j4] = uint8(num) - j4++ - } - i -= j4 - copy(dAtA[i:], dAtA5[:j4]) - i = encodeVarintQuery(dAtA, i, uint64(j4)) - i-- dAtA[i] = 0x12 } - if len(m.EpochNumbers) > 0 { - dAtA7 := make([]byte, len(m.EpochNumbers)*10) - var j6 int - for _, num := range m.EpochNumbers { - for num >= 1<<7 { - dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j6++ - } - dAtA7[j6] = uint8(num) - j6++ + if len(m.InfoList) > 0 { + for iNdEx := len(m.InfoList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InfoList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i -= j6 - copy(dAtA[i:], dAtA7[:j6]) - i = encodeVarintQuery(dAtA, i, uint64(j6)) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -1079,7 +1010,7 @@ func (m *QueryParamsResponse) Size() (n int) { return n } -func (m *QueryBtcCheckpointHeightAndHashRequest) Size() (n int) { +func (m *QueryBtcCheckpointInfoRequest) Size() (n int) { if m == nil { return 0 } @@ -1091,23 +1022,20 @@ func (m *QueryBtcCheckpointHeightAndHashRequest) Size() (n int) { return n } -func (m *QueryBtcCheckpointHeightAndHashResponse) Size() (n int) { +func (m *QueryBtcCheckpointInfoResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.EarliestBtcBlockNumber != 0 { - n += 1 + sovQuery(uint64(m.EarliestBtcBlockNumber)) - } - l = len(m.EarliestBtcBlockHash) - if l > 0 { + if m.Info != nil { + l = m.Info.Size() n += 1 + l + sovQuery(uint64(l)) } return n } -func (m *QueryBtcCheckpointsHeightAndHashRequest) Size() (n int) { +func (m *QueryBtcCheckpointsInfoRequest) Size() (n int) { if m == nil { return 0 } @@ -1126,29 +1054,15 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) Size() (n int) { return n } -func (m *QueryBtcCheckpointsHeightAndHashResponse) Size() (n int) { +func (m *QueryBtcCheckpointsInfoResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.EpochNumbers) > 0 { - l = 0 - for _, e := range m.EpochNumbers { - l += sovQuery(uint64(e)) - } - n += 1 + sovQuery(uint64(l)) + l - } - if len(m.EarliestBtcBlockNumbers) > 0 { - l = 0 - for _, e := range m.EarliestBtcBlockNumbers { - l += sovQuery(uint64(e)) - } - n += 1 + sovQuery(uint64(l)) + l - } - if len(m.EarliestBtcBlockHashes) > 0 { - for _, b := range m.EarliestBtcBlockHashes { - l = len(b) + if len(m.InfoList) > 0 { + for _, e := range m.InfoList { + l = e.Size() n += 1 + l + sovQuery(uint64(l)) } } @@ -1333,7 +1247,7 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointHeightAndHashRequest) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1356,10 +1270,10 @@ func (m *QueryBtcCheckpointHeightAndHashRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1402,7 +1316,7 @@ func (m *QueryBtcCheckpointHeightAndHashRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointHeightAndHashResponse) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointInfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1425,36 +1339,17 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightAndHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockNumber", wireType) - } - m.EarliestBtcBlockNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EarliestBtcBlockNumber |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -1464,24 +1359,26 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - m.EarliestBtcBlockHash = append(m.EarliestBtcBlockHash[:0], dAtA[iNdEx:postIndex]...) - if m.EarliestBtcBlockHash == nil { - m.EarliestBtcBlockHash = []byte{} + if m.Info == nil { + m.Info = &BTCCheckpointInfo{} + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -1505,7 +1402,7 @@ func (m *QueryBtcCheckpointHeightAndHashResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointsHeightAndHashRequest) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointsInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1528,10 +1425,10 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointsInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1629,7 +1526,7 @@ func (m *QueryBtcCheckpointsHeightAndHashRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointsHeightAndHashResponse) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointsInfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1652,169 +1549,17 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) Unmarshal(dAtA []byte) error fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointsInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointsHeightAndHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EpochNumbers = append(m.EpochNumbers, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.EpochNumbers) == 0 { - m.EpochNumbers = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EpochNumbers = append(m.EpochNumbers, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field EpochNumbers", wireType) - } - case 2: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EarliestBtcBlockNumbers = append(m.EarliestBtcBlockNumbers, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.EarliestBtcBlockNumbers) == 0 { - m.EarliestBtcBlockNumbers = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EarliestBtcBlockNumbers = append(m.EarliestBtcBlockNumbers, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockNumbers", wireType) - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockHashes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InfoList", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -1824,25 +1569,27 @@ func (m *QueryBtcCheckpointsHeightAndHashResponse) Unmarshal(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - m.EarliestBtcBlockHashes = append(m.EarliestBtcBlockHashes, make([]byte, postIndex-iNdEx)) - copy(m.EarliestBtcBlockHashes[len(m.EarliestBtcBlockHashes)-1], dAtA[iNdEx:postIndex]) + m.InfoList = append(m.InfoList, &BTCCheckpointInfo{}) + if err := m.InfoList[len(m.InfoList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) } diff --git a/x/btccheckpoint/types/query.pb.gw.go b/x/btccheckpoint/types/query.pb.gw.go index 2cd7c74c1..06ee42575 100644 --- a/x/btccheckpoint/types/query.pb.gw.go +++ b/x/btccheckpoint/types/query.pb.gw.go @@ -51,8 +51,8 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal } -func request_Query_BtcCheckpointHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointHeightAndHashRequest +func request_Query_BtcCheckpointInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointInfoRequest var metadata runtime.ServerMetadata var ( @@ -73,13 +73,13 @@ func request_Query_BtcCheckpointHeightAndHash_0(ctx context.Context, marshaler r return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := client.BtcCheckpointHeightAndHash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.BtcCheckpointInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_BtcCheckpointHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointHeightAndHashRequest +func local_request_Query_BtcCheckpointInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointInfoRequest var metadata runtime.ServerMetadata var ( @@ -100,43 +100,43 @@ func local_request_Query_BtcCheckpointHeightAndHash_0(ctx context.Context, marsh return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := server.BtcCheckpointHeightAndHash(ctx, &protoReq) + msg, err := server.BtcCheckpointInfo(ctx, &protoReq) return msg, metadata, err } var ( - filter_Query_BtcCheckpointsHeightAndHash_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} + filter_Query_BtcCheckpointsInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) -func request_Query_BtcCheckpointsHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointsHeightAndHashRequest +func request_Query_BtcCheckpointsInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointsInfoRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsHeightAndHash_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsInfo_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.BtcCheckpointsHeightAndHash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.BtcCheckpointsInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_BtcCheckpointsHeightAndHash_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointsHeightAndHashRequest +func local_request_Query_BtcCheckpointsInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointsInfoRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsHeightAndHash_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsInfo_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.BtcCheckpointsHeightAndHash(ctx, &protoReq) + msg, err := server.BtcCheckpointsInfo(ctx, &protoReq) return msg, metadata, err } @@ -242,7 +242,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_BtcCheckpointHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -253,7 +253,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_BtcCheckpointHeightAndHash_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_BtcCheckpointInfo_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -261,11 +261,11 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_BtcCheckpointHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_BtcCheckpointsHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -276,7 +276,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_BtcCheckpointsHeightAndHash_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_BtcCheckpointsInfo_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -284,7 +284,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_BtcCheckpointsHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -372,7 +372,7 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_BtcCheckpointHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -381,18 +381,18 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_BtcCheckpointHeightAndHash_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_BtcCheckpointInfo_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_BtcCheckpointHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_BtcCheckpointsHeightAndHash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -401,14 +401,14 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_BtcCheckpointsHeightAndHash_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_BtcCheckpointsInfo_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_BtcCheckpointsHeightAndHash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -438,9 +438,9 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie var ( pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "btccheckpoint", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_BtcCheckpointHeightAndHash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "btccheckpoint", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_BtcCheckpointInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "btccheckpoint", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_BtcCheckpointsHeightAndHash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"babylon", "btccheckpoint", "v1"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_BtcCheckpointsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"babylon", "btccheckpoint", "v1"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_EpochSubmissions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"babylon", "btccheckpoint", "v1", "epoch_num", "submissions"}, "", runtime.AssumeColonVerbOpt(false))) ) @@ -448,9 +448,9 @@ var ( var ( forward_Query_Params_0 = runtime.ForwardResponseMessage - forward_Query_BtcCheckpointHeightAndHash_0 = runtime.ForwardResponseMessage + forward_Query_BtcCheckpointInfo_0 = runtime.ForwardResponseMessage - forward_Query_BtcCheckpointsHeightAndHash_0 = runtime.ForwardResponseMessage + forward_Query_BtcCheckpointsInfo_0 = runtime.ForwardResponseMessage forward_Query_EpochSubmissions_0 = runtime.ForwardResponseMessage ) diff --git a/x/btccheckpoint/types/types.go b/x/btccheckpoint/types/types.go index 1cd12952e..97dfc1211 100644 --- a/x/btccheckpoint/types/types.go +++ b/x/btccheckpoint/types/types.go @@ -16,7 +16,7 @@ import ( // Modelling proofs as separate Proof1 and Proof2, as this is more explicit than // []*ParsedProof. type RawCheckpointSubmission struct { - Submitter sdk.AccAddress + Reporter sdk.AccAddress Proof1 ParsedProof Proof2 ParsedProof CheckpointData btctxformatter.RawBtcCheckpoint @@ -43,7 +43,7 @@ func NewRawCheckpointSubmission( checkpointData btctxformatter.RawBtcCheckpoint, ) RawCheckpointSubmission { r := RawCheckpointSubmission{ - Submitter: a, + Reporter: a, Proof1: p1, Proof2: p2, CheckpointData: checkpointData, @@ -91,10 +91,12 @@ func (rsc *RawCheckpointSubmission) GetSubmissionKey() SubmissionKey { func (rsc *RawCheckpointSubmission) GetSubmissionData(epochNum uint64, txsInfo []*TransactionInfo) SubmissionData { return SubmissionData{ - VigilanteAddress: rsc.Submitter.Bytes(), - SubmitterAddress: rsc.CheckpointData.SubmitterAddress, - TxsInfo: txsInfo, - Epoch: epochNum, + VigilanteAddresses: &CheckpointAddresses{ + Reporter: rsc.Reporter.Bytes(), + Submitter: rsc.CheckpointData.SubmitterAddress, + }, + TxsInfo: txsInfo, + Epoch: epochNum, } } From e65d5426d78e375f04e053616a9e16a85b80b513 Mon Sep 17 00:00:00 2001 From: Vitalis Salis Date: Tue, 31 Jan 2023 13:11:05 +0300 Subject: [PATCH 33/37] fix: add BLST_PORTABLE flag before build instruction (#295) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1d448bd7f..123435446 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ build-linux: GOOS=linux GOARCH=$(if $(findstring aarch64,$(shell uname -m)) || $(findstring arm64,$(shell uname -m)),arm64,amd64) LEDGER_ENABLED=false $(MAKE) build $(BUILD_TARGETS): go.sum $(BUILDDIR)/ - go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./... + CGO_CFLAGS="-O -D__BLST_PORTABLE__" go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./... $(BUILDDIR)/: mkdir -p $(BUILDDIR)/ From 3ac1222a2714e97df4b91519d02c59fffed12d37 Mon Sep 17 00:00:00 2001 From: Vitalis Salis Date: Thu, 2 Feb 2023 10:10:49 +0300 Subject: [PATCH 34/37] Add Apache 2.0 licence (#298) --- LICENSE | 201 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..a441aa146 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Babylonchain, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From f039053f8a3145d636e43dba6901ee3b972e39d9 Mon Sep 17 00:00:00 2001 From: Vitalis Salis Date: Thu, 2 Feb 2023 11:01:30 +0300 Subject: [PATCH 35/37] Clean and split up README (#297) Co-authored-by: Cirrus Gai --- README.md | 161 ++++++----------------------------------------- docs/README.md | 10 +++ docs/dev-reqs.md | 5 ++ docs/run-node.md | 135 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+), 141 deletions(-) create mode 100644 docs/README.md create mode 100644 docs/dev-reqs.md create mode 100644 docs/run-node.md diff --git a/README.md b/README.md index 4f6321038..e853020a2 100644 --- a/README.md +++ b/README.md @@ -1,162 +1,41 @@ # Babylon -## Requirements +Bringing Bitcoin security to Cosmos and beyond. -- Go 1.19 +[![Website](https://badgen.net/badge/icon/website?label=)](https://babylonchain.io) +[![Whitepaper](https://badgen.net/badge/icon/whitepaper?label=)](https://arxiv.org/abs/2207.08392) +[![Twitter](https://badgen.net/badge/icon/twitter?icon=twitter&label)](https://twitter.com/babylon_chain) +[![Discord](https://badgen.net/badge/icon/discord?icon=discord&label)](https://discord.gg/babylonchain) -## Development requirements +## Build and install -- Go 1.19 -- Docker +The babylond application based on the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is the main application of the Babylon network. +This repository is used to build the Babylon core application to join the Babylon network. -## Building +### Requirements +To build and install, you need to have Go 1.19 available. +Follow the instructions on the [Golang page](https://go.dev/doc/install) to do that. -To build the chain, simply: +To build the binary: ```console make build ``` -This will lead to the creation of a `babylond` executable under the `build` -directory. +The binary will then be available at `./build/babylond` . -## Installing - -To build the chain and install a babylon executable: +To install: ```console make install ``` -## Testing - -```console -make test -``` - -## Running a node - -The following commands assume that the `babylond` executable has been -installed. If the repository was only built, then `./build/babylond` should be -used in its place. - -### Generating the node configuration -The configuration for a single node can be created through the `testnet` -command. While the testnet command can create an arbitrary number of nodes that -communicate on a testnet, here we focus on the setup of a single node. -```console -babylond testnet \ - --v 1 \ - --output-dir ./.testnet \ - --starting-ip-address 192.168.10.2 \ - --keyring-backend test \ - --chain-id chain-test -``` - -The flags specify the following: -- `--output-dir `: Specifies that the testnet files should - reside under this directory. -- `--v `: Leads to the creation of `N` nodes, each one residing under the - `/node{i}`. In this case `i={0..N-1}`. -- `--starting-ip-address `: Specifies the IP address for the nodes. For example, - `192.168.10.2` leads to the first node running on `192.168.10.2:46656`, the - second one on `192.168.10.3:46656` etc. -- `--keyring-backend {os,file,test}`: Specifies the backend to use for the keyring. Available - choices include `os`, `file`, and `test`. We use `test` for convenience. -- `--chain-id`: An identifier for the chain. Useful when perrforming operations - later. - -In this case, we generated a single node. If we take a look under `.testnet`: -```console -$ ls .testnet -gentxs node0 -``` - -The `gentxs` directory contains the genesis transactions. It contains -transactions that assign bbn tokens to a single address that is defined for each -node. - -The `node0` directory contains the the following, -```console -$ ls .testnet/node0/babylond -config data key_seed.json keyring-test -``` - -A brief description of the contents: -- `config`: Contains the configuration files for the node. -- `data`: Contains the database storage for the node. -- `key_seed.json`: Seed to generate the keys maintained by the keyring. -- `keyring-test`: Contains the test keyring. This directory was created because - we provided the `--keyring-backend test` flag. The `testnet` command, creates - a validator node named `node{i}` (depends on the node name), and assigns - bbn tokens to it through a transaction written to `.testnet/gentxs/node{i}.json`. - The keys for this node can be pointed to by the `node{i}` name. - -### Running the node -```console -babylond start --home ./.testnet/node0/babylond -``` - -### Logs - -The logs for a particular node can be found under -`.testnets/node{id}/babylond/babylond.log`. - -### Performing queries +## Documentation -After building a node and starting it, you can perform queries. -```console -babylond --home .testnet/node{i}/babylond/ --chain-id \ - query -``` +For the most up-to-date documentation please visit [docs.babylonchain.io](https://docs.babylonchain.io) -For example, in order to get the hashes maintained by the `btcligthclient` -module: -```console -$ babylond --home .testnet/node0/babylond/ --chain-id chain-test query btclightclient hashes +## Joining the testnet -hashes: -- 00000000000000000002bf1c218853bc920f41f74491e6c92c6bc6fdc881ab47 -pagination: - next_key: null - total: "1" -``` - -### Submitting transactions - -After building a node and running it, one can send transactions as follows: -```console -babylond --home .testnet/node{i}/babylond --chain-id \ - --keyring-backend {os,file,test} --fees \ - --from --broadcast-mode {sync,async,block} \ - tx [data] -``` +Please follow the instructions on the [Joining the Testnet documentation page](https://docs.babylonchain.io/docs/testnet/overview). -The `--fees` flag specifies the amount of fees that we are willing to pay and -the denomination and the `--from` flag denotes the name of the key that we want -to use to sign the transaction (i.e. from which account we want this -transaction to happen). The `--broadcast-mode` specifies how long we want to -wait until we receive a response from the CLI: `async` means immediately, -`sync` means after the transaction has been validated through `CheckTx`, -and `block` means after the transaction has been processed by the next block. +## Contributing -For example, in the `btclightclient` module, in order -to submit a header, one should: -```console -babylond --home .testnet/node0/babylond --chain-id chain-test \ - --keyring-backend test --fees 100bbn \ - --from node0 --broadcast-mode block \ - tx btclightclient insert-header -``` - -## Running a multi-node testnet - -We provide support for running a multi-node testnet using Docker. To build it - -```console -make localnet-start -``` - -The corresponding node directories can be found under `.testnets` -```console -$ ls .testnets -gentxs node0 node1 node2 node3 -``` +The [docs](./docs) directory contains the necessary information on how to get started using the babylond executable for development purposes. diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..4ac29268d --- /dev/null +++ b/docs/README.md @@ -0,0 +1,10 @@ +# Babylon Developer Docs + +This page contains documentation targeted at individuals that want to contribute to the Babylon repository. + +For user-facing docs, visit the [Babylon documentation page](https://docs.babylonchain.io). + +## Contents + +- [Development Requirements](./dev-reqs.md) +- [Running a node for testing purposes](./run-node.md) diff --git a/docs/dev-reqs.md b/docs/dev-reqs.md new file mode 100644 index 000000000..0e04112f7 --- /dev/null +++ b/docs/dev-reqs.md @@ -0,0 +1,5 @@ +## Development Requirements + +To develop the Babylon repository, the following requirements are recommended: +- Golang version 1.19 +- Docker diff --git a/docs/run-node.md b/docs/run-node.md new file mode 100644 index 000000000..705bb72f1 --- /dev/null +++ b/docs/run-node.md @@ -0,0 +1,135 @@ +## Running a node + +The following commands assume that the `babylond` executable has been +installed. If the repository was only built, then `./build/babylond` should be +used in its place. + +### Generating the node configuration +The configuration for a single node can be created through the `testnet` +command. While the `testnet` command can create an arbitrary number of nodes that +communicate on a testnet, here we focus on the setup of a single node. +```console +babylond testnet \ + --v 1 \ + --output-dir ./.testnet \ + --starting-ip-address 192.168.10.2 \ + --keyring-backend test \ + --chain-id chain-test +``` + +The flags specify the following: +- `--output-dir `: Specifies that the testnet files should + reside under this directory. +- `--v `: Leads to the creation of `N` nodes, each one residing under the + `/node{i}`. In this case `i={0..N-1}`. +- `--starting-ip-address `: Specifies the IP address for the nodes. For example, + `192.168.10.2` leads to the first node running on `192.168.10.2:46656`, the + second one on `192.168.10.3:46656` etc. +- `--keyring-backend {os,file,test}`: Specifies the backend to use for the keyring. Available + choices include `os`, `file`, and `test`. We use `test` for convenience. +- `--chain-id`: An identifier for the chain. Useful when perrforming operations + later. + +In this case, we generated a single node. If we take a look under `.testnet`: +```console +$ ls .testnet +gentxs node0 +``` + +The `gentxs` directory contains the genesis transactions. It contains +transactions that assign bbn tokens to a single address that is defined for each +node. + +The `node0` directory contains the the following, +```console +$ ls .testnet/node0/babylond +config data key_seed.json keyring-test +``` + +A brief description of the contents: +- `config`: Contains the configuration files for the node. +- `data`: Contains the database storage for the node. +- `key_seed.json`: Seed to generate the keys maintained by the keyring. +- `keyring-test`: Contains the test keyring. This directory was created because + we provided the `--keyring-backend test` flag. The `testnet` command, creates + a validator node named `node{i}` (depends on the node name), and assigns + bbn tokens to it through a transaction written to `.testnet/gentxs/node{i}.json`. + The keys for this node can be pointed to by the `node{i}` name. + +### Running the node +```console +babylond start --home ./.testnet/node0/babylond +``` + +### Logs + +The logs for a particular node can be found under +`.testnets/node{id}/babylond/babylond.log`. + +### Performing queries + +After building a node and starting it, you can perform queries. +```console +babylond --home .testnet/node{i}/babylond/ --chain-id \ + query +``` + +For example, in order to get the hashes maintained by the `btcligthclient` +module: +```console +$ babylond --home .testnet/node0/babylond/ --chain-id chain-test query btclightclient hashes + +hashes: +- 00000000000000000002bf1c218853bc920f41f74491e6c92c6bc6fdc881ab47 +pagination: + next_key: null + total: "1" +``` + +### Submitting transactions + +After building a node and running it, one can send transactions as follows: +```console +babylond --home .testnet/node{i}/babylond --chain-id \ + --keyring-backend {os,file,test} --fees \ + --from --broadcast-mode {sync,async,block} \ + tx [data] +``` + +The `--fees` flag specifies the amount of fees that we are willing to pay and +the denomination and the `--from` flag denotes the name of the key that we want +to use to sign the transaction (i.e. from which account we want this +transaction to happen). The `--broadcast-mode` specifies how long we want to +wait until we receive a response from the CLI: `async` means immediately, +`sync` means after the transaction has been validated through `CheckTx`, +and `block` means after the transaction has been processed by the next block. + +For example, in the `btclightclient` module, in order +to submit a header, one should: +```console +babylond --home .testnet/node0/babylond --chain-id chain-test \ + --keyring-backend test --fees 100bbn \ + --from node0 --broadcast-mode block \ + tx btclightclient insert-header +``` + +## Running a multi-node testnet + +We provide support for running a multi-node testnet using Docker. To build it + +```console +make localnet-start +``` + +The corresponding node directories can be found under `.testnets` +```console +$ ls .testnets +gentxs node0 node1 node2 node3 +``` + +## Testing + +```console +make test +``` + From 0a0c300146e2c42bfb2e9febce265d0f1bd9ea07 Mon Sep 17 00:00:00 2001 From: Cirrus Gai Date: Thu, 2 Feb 2023 20:29:02 +0800 Subject: [PATCH 36/37] Fix: Monitor/fix reported checkpoint BTC height query bugs (#299) --- app/app.go | 2 +- x/checkpointing/abci.go | 2 +- x/checkpointing/keeper/keeper.go | 12 +- x/checkpointing/types/hooks.go | 4 +- x/monitor/keeper/grpc_query_params.go | 7 ++ x/monitor/keeper/grpc_query_test.go | 158 ++++++++++++++++++++++++++ x/monitor/keeper/hooks.go | 3 +- x/monitor/keeper/keeper.go | 16 ++- x/monitor/types/expected_keepers.go | 1 + 9 files changed, 192 insertions(+), 13 deletions(-) create mode 100644 x/monitor/keeper/grpc_query_test.go diff --git a/app/app.go b/app/app.go index c964260b7..6790210f7 100644 --- a/app/app.go +++ b/app/app.go @@ -508,7 +508,7 @@ func NewBabylonApp( privSigner.ClientCtx, ) app.CheckpointingKeeper = *checkpointingKeeper.SetHooks( - checkpointingtypes.NewMultiCheckpointingHooks(app.EpochingKeeper.Hooks(), app.ZoneConciergeKeeper.Hooks()), + checkpointingtypes.NewMultiCheckpointingHooks(app.EpochingKeeper.Hooks(), app.ZoneConciergeKeeper.Hooks(), app.MonitorKeeper.Hooks()), ) app.ZoneConciergeKeeper.SetCheckpointingKeeper(app.CheckpointingKeeper) diff --git a/x/checkpointing/abci.go b/x/checkpointing/abci.go index feba0e8d0..c7348f161 100644 --- a/x/checkpointing/abci.go +++ b/x/checkpointing/abci.go @@ -26,7 +26,7 @@ func BeginBlocker(ctx sdk.Context, k keeper.Keeper, req abci.RequestBeginBlock) if epoch.IsFirstBlock(ctx) { err := k.InitValidatorBLSSet(ctx) if err != nil { - panic(fmt.Errorf("failed to store validator BLS set")) + panic(fmt.Errorf("failed to store validator BLS set: %w", err)) } } if epoch.IsSecondBlock(ctx) { diff --git a/x/checkpointing/keeper/keeper.go b/x/checkpointing/keeper/keeper.go index 63246c5e7..69c0e0647 100644 --- a/x/checkpointing/keeper/keeper.go +++ b/x/checkpointing/keeper/keeper.go @@ -3,7 +3,6 @@ package keeper import ( "errors" "fmt" - txformat "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/crypto/bls12381" @@ -210,6 +209,11 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcC // can skip the checks if it is identical with the local checkpoint that is not accumulating if ckptWithMeta.Ckpt.Equal(ckpt) && ckptWithMeta.Status != types.Accumulating { + // record verified checkpoint + err = k.AfterRawCheckpointBlsSigVerified(ctx, ckpt) + if err != nil { + return nil, fmt.Errorf("failed to record verified checkpoint of epoch %d for monitoring: %w", ckpt.EpochNum, err) + } return ckptWithMeta, nil } @@ -244,7 +248,7 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcC // record verified checkpoint err = k.AfterRawCheckpointBlsSigVerified(ctx, ckpt) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to record verified checkpoint of epoch %d for monitoring: %w", ckpt.EpochNum, err) } // now the checkpoint's multi-sig is valid, if the lastcommithash is the @@ -270,6 +274,10 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcC return nil, types.ErrConflictingCheckpoint } +func (k *Keeper) SetEpochingKeeper(ek types.EpochingKeeper) { + k.epochingKeeper = ek +} + // SetCheckpointSubmitted sets the status of a checkpoint to SUBMITTED, // and records the associated state update in lifecycle func (k Keeper) SetCheckpointSubmitted(ctx sdk.Context, epoch uint64) { diff --git a/x/checkpointing/types/hooks.go b/x/checkpointing/types/hooks.go index d003f59eb..50a97c406 100644 --- a/x/checkpointing/types/hooks.go +++ b/x/checkpointing/types/hooks.go @@ -49,7 +49,9 @@ func (h MultiCheckpointingHooks) AfterRawCheckpointFinalized(ctx sdk.Context, ep func (h MultiCheckpointingHooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *RawCheckpoint) error { for i := range h { - return h[i].AfterRawCheckpointBlsSigVerified(ctx, ckpt) + if err := h[i].AfterRawCheckpointBlsSigVerified(ctx, ckpt); err != nil { + return err + } } return nil } diff --git a/x/monitor/keeper/grpc_query_params.go b/x/monitor/keeper/grpc_query_params.go index 4cf229c7a..9ba4a1b00 100644 --- a/x/monitor/keeper/grpc_query_params.go +++ b/x/monitor/keeper/grpc_query_params.go @@ -9,6 +9,13 @@ import ( "google.golang.org/grpc/status" ) +// Querier is used as Keeper will have duplicate methods if used directly, and gRPC names take precedence over keeper +type Querier struct { + Keeper +} + +var _ types.QueryServer = Querier{} + func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") diff --git a/x/monitor/keeper/grpc_query_test.go b/x/monitor/keeper/grpc_query_test.go new file mode 100644 index 000000000..44dc9ee11 --- /dev/null +++ b/x/monitor/keeper/grpc_query_test.go @@ -0,0 +1,158 @@ +package keeper_test + +import ( + "github.com/babylonchain/babylon/btctxformatter" + "github.com/babylonchain/babylon/testutil/datagen" + "github.com/babylonchain/babylon/testutil/mocks" + btclightclienttypes "github.com/babylonchain/babylon/x/btclightclient/types" + ckpttypes "github.com/babylonchain/babylon/x/checkpointing/types" + "github.com/babylonchain/babylon/x/epoching/testepoching" + types2 "github.com/babylonchain/babylon/x/epoching/types" + monitorkeeper "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "math/rand" + "testing" +) + +func FuzzQueryEndedEpochBtcHeight(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + // a genesis validator is generated for setup + helper := testepoching.NewHelper(t) + lck := helper.App.BTCLightClientKeeper + mk := helper.App.MonitorKeeper + ek := helper.EpochingKeeper + querier := monitorkeeper.Querier{Keeper: mk} + queryHelper := baseapp.NewQueryServerTestHelper(helper.Ctx, helper.App.InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, querier) + queryClient := types.NewQueryClient(queryHelper) + + // BeginBlock of block 1, and thus entering epoch 1 + ctx := helper.BeginBlock() + epoch := ek.GetEpoch(ctx) + require.Equal(t, uint64(1), epoch.EpochNumber) + + // Insert header tree + tree := datagen.NewBTCHeaderTree() + root := lck.GetBaseBTCHeader(ctx) + tree.Add(root, nil) + tree.GenRandomBTCHeaderTree(1, 10, root, func(header *btclightclienttypes.BTCHeaderInfo) bool { + err := lck.InsertHeader(ctx, header.Header) + require.NoError(t, err) + return true + }) + + // EndBlock of block 1 + ctx = helper.EndBlock() + + // go to BeginBlock of block 11, and thus entering epoch 2 + for i := uint64(0); i < ek.GetParams(ctx).EpochInterval; i++ { + ctx = helper.GenAndApplyEmptyBlock() + } + epoch = ek.GetEpoch(ctx) + require.Equal(t, uint64(2), epoch.EpochNumber) + + // query epoch 0 ended BTC light client height, should return base height + req := types.QueryEndedEpochBtcHeightRequest{ + EpochNum: 0, + } + resp, err := queryClient.EndedEpochBtcHeight(ctx, &req) + require.NoError(t, err) + require.Equal(t, lck.GetBaseBTCHeader(ctx).Height, resp.BtcLightClientHeight) + + // query epoch 1 ended BTC light client height, should return tip height + req = types.QueryEndedEpochBtcHeightRequest{ + EpochNum: 1, + } + resp, err = queryClient.EndedEpochBtcHeight(ctx, &req) + require.NoError(t, err) + require.Equal(t, lck.GetTipInfo(ctx).Height, resp.BtcLightClientHeight) + }) +} + +func FuzzQueryReportedCheckpointBtcHeight(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + // a genesis validator is generated for setup + helper := testepoching.NewHelper(t) + ctl := gomock.NewController(t) + defer ctl.Finish() + lck := helper.App.BTCLightClientKeeper + mk := helper.App.MonitorKeeper + ek := helper.EpochingKeeper + ck := helper.App.CheckpointingKeeper + mockEk := mocks.NewMockEpochingKeeper(ctl) + ck.SetEpochingKeeper(mockEk) + querier := monitorkeeper.Querier{Keeper: mk} + queryHelper := baseapp.NewQueryServerTestHelper(helper.Ctx, helper.App.InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, querier) + queryClient := types.NewQueryClient(queryHelper) + + // BeginBlock of block 1, and thus entering epoch 1 + ctx := helper.BeginBlock() + epoch := ek.GetEpoch(ctx) + require.Equal(t, uint64(1), epoch.EpochNumber) + + // Insert header tree + tree := datagen.NewBTCHeaderTree() + root := lck.GetBaseBTCHeader(ctx) + tree.Add(root, nil) + tree.GenRandomBTCHeaderTree(1, 10, root, func(header *btclightclienttypes.BTCHeaderInfo) bool { + err := lck.InsertHeader(ctx, header.Header) + require.NoError(t, err) + return true + }) + + // Add checkpoint + valBlsSet, privKeys := datagen.GenerateValidatorSetWithBLSPrivKeys(int(datagen.RandomIntOtherThan(0, 10))) + valSet := make([]types2.Validator, len(valBlsSet.ValSet)) + for i, val := range valBlsSet.ValSet { + valSet[i] = types2.Validator{ + Addr: []byte(val.ValidatorAddress), + Power: int64(val.VotingPower), + } + err := ck.CreateRegistration(ctx, val.BlsPubKey, []byte(val.ValidatorAddress)) + require.NoError(t, err) + } + mockCkptWithMeta := &ckpttypes.RawCheckpointWithMeta{Ckpt: datagen.GenerateLegitimateRawCheckpoint(privKeys)} + mockEk.EXPECT().GetValidatorSet(gomock.Any(), gomock.Eq(mockCkptWithMeta.Ckpt.EpochNum)).Return(valSet).AnyTimes() + // make sure voting power is always sufficient + mockEk.EXPECT().GetTotalVotingPower(gomock.Any(), gomock.Eq(mockCkptWithMeta.Ckpt.EpochNum)).Return(int64(0)).AnyTimes() + err := ck.AddRawCheckpoint( + ctx, + mockCkptWithMeta, + ) + require.NoError(t, err) + + // Verify checkpoint + btcCkpt := btctxformatter.RawBtcCheckpoint{ + Epoch: mockCkptWithMeta.Ckpt.EpochNum, + LastCommitHash: *mockCkptWithMeta.Ckpt.LastCommitHash, + BitMap: mockCkptWithMeta.Ckpt.Bitmap, + SubmitterAddress: datagen.GenRandomByteArray(btctxformatter.AddressLength), + BlsSig: *mockCkptWithMeta.Ckpt.BlsMultiSig, + } + err = ck.VerifyCheckpoint(ctx, btcCkpt) + require.NoError(t, err) + + // query reported checkpoint BTC light client height + req := types.QueryReportedCheckpointBtcHeightRequest{ + CkptHash: mockCkptWithMeta.Ckpt.HashStr(), + } + resp, err := queryClient.ReportedCheckpointBtcHeight(ctx, &req) + require.NoError(t, err) + require.Equal(t, lck.GetTipInfo(ctx).Height, resp.BtcLightClientHeight) + + // query not reported checkpoint BTC light client height, should expect an ErrCheckpointNotReported + req = types.QueryReportedCheckpointBtcHeightRequest{ + CkptHash: datagen.GenRandomHexStr(32), + } + _, err = queryClient.ReportedCheckpointBtcHeight(ctx, &req) + require.ErrorIs(t, err, types.ErrCheckpointNotReported) + }) +} diff --git a/x/monitor/keeper/hooks.go b/x/monitor/keeper/hooks.go index 91bd8437d..49a24e0e7 100644 --- a/x/monitor/keeper/hooks.go +++ b/x/monitor/keeper/hooks.go @@ -16,8 +16,7 @@ type Hooks struct { k Keeper } -var _ HandledHooks = Hooks{} - +// Create new distribution hooks func (k Keeper) Hooks() Hooks { return Hooks{k} } func (h Hooks) AfterEpochBegins(ctx sdk.Context, epoch uint64) {} diff --git a/x/monitor/keeper/keeper.go b/x/monitor/keeper/keeper.go index 156ebf28f..308ee45e8 100644 --- a/x/monitor/keeper/keeper.go +++ b/x/monitor/keeper/keeper.go @@ -65,18 +65,18 @@ func (k Keeper) updateBtcLightClientHeightForCheckpoint(ctx sdk.Context, ckpt *c store := ctx.KVStore(k.storeKey) ckptHashStr := ckpt.HashStr() + storeKey, err := types.GetCheckpointReportedLightClientHeightKey(ckptHashStr) + if err != nil { + return err + } + // if the checkpoint exists, meaning an earlier checkpoint with a lower btc height is already recorded // we should keep the lower btc height in the store - if store.Has([]byte(ckptHashStr)) { + if store.Has(storeKey) { k.Logger(ctx).With("module", fmt.Sprintf("checkpoint %s is already recorded", ckptHashStr)) return nil } - storeKey, err := types.GetCheckpointReportedLightClientHeightKey(ckptHashStr) - if err != nil { - return err - } - currentTipHeight := k.btcLightClientKeeper.GetTipInfo(ctx).Height store.Set(storeKey, sdk.Uint64ToBigEndian(currentTipHeight)) @@ -98,6 +98,10 @@ func (k Keeper) removeCheckpointRecord(ctx sdk.Context, ckpt *ckpttypes.RawCheck } func (k Keeper) LightclientHeightAtEpochEnd(ctx sdk.Context, epoch uint64) (uint64, error) { + if epoch == 0 { + return k.btcLightClientKeeper.GetBaseBTCHeader(ctx).Height, nil + } + store := ctx.KVStore(k.storeKey) btcHeightBytes := store.Get(types.GetEpochEndLightClientHeightKey(epoch)) diff --git a/x/monitor/types/expected_keepers.go b/x/monitor/types/expected_keepers.go index 6777fbff5..6f3566003 100644 --- a/x/monitor/types/expected_keepers.go +++ b/x/monitor/types/expected_keepers.go @@ -20,4 +20,5 @@ type BankKeeper interface { type BTCLightClientKeeper interface { GetTipInfo(ctx sdk.Context) *lc.BTCHeaderInfo + GetBaseBTCHeader(ctx sdk.Context) *lc.BTCHeaderInfo } From c960956de6bdb8d3faf624b641c0107e23584855 Mon Sep 17 00:00:00 2001 From: Runchao Han Date: Fri, 3 Feb 2023 14:52:02 +1100 Subject: [PATCH 37/37] zoneconcierge API: pagtinating chain IDs API (#300) --- client/docs/swagger-ui/swagger.yaml | 117 ++++++++++ proto/babylon/btccheckpoint/query.proto | 2 +- proto/babylon/epoching/v1/query.proto | 4 +- proto/babylon/zoneconcierge/query.proto | 10 +- x/btccheckpoint/types/query.pb.go | 2 +- x/epoching/types/query.pb.go | 4 +- x/zoneconcierge/keeper/grpc_query.go | 19 +- x/zoneconcierge/keeper/grpc_query_test.go | 29 +-- x/zoneconcierge/types/query.pb.go | 272 ++++++++++++++++------ x/zoneconcierge/types/query.pb.gw.go | 18 ++ 10 files changed, 379 insertions(+), 98 deletions(-) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index df090159d..a4c2ae549 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -6740,6 +6740,38 @@ paths: type: array items: type: string + title: >- + chain_ids are IDs of the chains in ascending alphabetical + order + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } title: >- QueryChainListResponse is response type for the Query/ChainList RPC method @@ -6935,6 +6967,63 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}: @@ -15289,6 +15378,34 @@ definitions: type: array items: type: string + title: chain_ids are IDs of the chains in ascending alphabetical order + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } title: QueryChainListResponse is response type for the Query/ChainList RPC method babylon.zoneconcierge.v1.QueryEpochChainInfoResponse: type: object diff --git a/proto/babylon/btccheckpoint/query.proto b/proto/babylon/btccheckpoint/query.proto index 763f6640c..6cad44bab 100644 --- a/proto/babylon/btccheckpoint/query.proto +++ b/proto/babylon/btccheckpoint/query.proto @@ -55,7 +55,7 @@ message QueryBtcCheckpointsInfoRequest { uint64 start_epoch = 1; uint64 end_epoch = 2; - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request cosmos.base.query.v1beta1.PageRequest pagination = 3; } diff --git a/proto/babylon/epoching/v1/query.proto b/proto/babylon/epoching/v1/query.proto index e28bf5d01..18fcead52 100644 --- a/proto/babylon/epoching/v1/query.proto +++ b/proto/babylon/epoching/v1/query.proto @@ -80,7 +80,7 @@ message QueryEpochsInfoRequest { uint64 start_epoch = 1; uint64 end_epoch = 2; - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request cosmos.base.query.v1beta1.PageRequest pagination = 3; } @@ -107,7 +107,7 @@ message QueryEpochMsgsRequest { // epoch_num is the number of epoch of the requested msg queue uint64 epoch_num = 1; - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request cosmos.base.query.v1beta1.PageRequest pagination = 2; } diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 400f94053..92d755eb6 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -76,11 +76,17 @@ message QueryHeaderResponse { } // QueryChainListRequest is request type for the Query/ChainList RPC method -message QueryChainListRequest {} +message QueryChainListRequest { + // pagination defines whether to have the pagination in the request + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} // QueryChainListResponse is response type for the Query/ChainList RPC method message QueryChainListResponse { + // chain_ids are IDs of the chains in ascending alphabetical order repeated string chain_ids = 1; + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 2; } // QueryChainInfoRequest is request type for the Query/ChainInfo RPC method. @@ -109,7 +115,7 @@ message QueryEpochChainInfoResponse { // QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. message QueryListHeadersRequest { string chain_id = 1; - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request cosmos.base.query.v1beta1.PageRequest pagination = 2; } diff --git a/x/btccheckpoint/types/query.pb.go b/x/btccheckpoint/types/query.pb.go index d7ccced04..c1227dbfd 100644 --- a/x/btccheckpoint/types/query.pb.go +++ b/x/btccheckpoint/types/query.pb.go @@ -207,7 +207,7 @@ func (m *QueryBtcCheckpointInfoResponse) GetInfo() *BTCCheckpointInfo { type QueryBtcCheckpointsInfoRequest struct { StartEpoch uint64 `protobuf:"varint,1,opt,name=start_epoch,json=startEpoch,proto3" json:"start_epoch,omitempty"` EndEpoch uint64 `protobuf:"varint,2,opt,name=end_epoch,json=endEpoch,proto3" json:"end_epoch,omitempty"` - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` } diff --git a/x/epoching/types/query.pb.go b/x/epoching/types/query.pb.go index 4285d57a6..8d90b59d9 100644 --- a/x/epoching/types/query.pb.go +++ b/x/epoching/types/query.pb.go @@ -204,7 +204,7 @@ func (m *QueryEpochInfoResponse) GetEpoch() *Epoch { type QueryEpochsInfoRequest struct { StartEpoch uint64 `protobuf:"varint,1,opt,name=start_epoch,json=startEpoch,proto3" json:"start_epoch,omitempty"` EndEpoch uint64 `protobuf:"varint,2,opt,name=end_epoch,json=endEpoch,proto3" json:"end_epoch,omitempty"` - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` } @@ -411,7 +411,7 @@ func (m *QueryCurrentEpochResponse) GetEpochBoundary() uint64 { type QueryEpochMsgsRequest struct { // epoch_num is the number of epoch of the requested msg queue EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index 057e79bed..e1aac72ff 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -18,9 +18,22 @@ func (k Keeper) ChainList(c context.Context, req *types.QueryChainListRequest) ( } ctx := sdk.UnwrapSDKContext(c) - chainIDs := k.GetAllChainIDs(ctx) - // TODO: pagination for this API - resp := &types.QueryChainListResponse{ChainIds: chainIDs} + + chainIDs := []string{} + store := k.chainInfoStore(ctx) + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + chainID := string(key) + chainIDs = append(chainIDs, chainID) + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + resp := &types.QueryChainListResponse{ + ChainIds: chainIDs, + Pagination: pageRes, + } return resp, nil } diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index b787dfc32..cb910ebfa 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -2,7 +2,6 @@ package keeper_test import ( "math/rand" - "sort" "testing" "github.com/babylonchain/babylon/testutil/datagen" @@ -20,7 +19,7 @@ import ( ) func FuzzChainList(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 100) + datagen.AddRandomSeedsToFuzzer(f, 10) f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) @@ -32,32 +31,36 @@ func FuzzChainList(f *testing.F) { hooks := zcKeeper.Hooks() // invoke the hook a random number of times with random chain IDs - numHeaders := datagen.RandomInt(100) - expectedChainIDs := []string{} + numHeaders := datagen.RandomInt(100) + 1 + allChainIDs := []string{} for i := uint64(0); i < numHeaders; i++ { var chainID string // simulate the scenario that some headers belong to the same chain if i > 0 && datagen.OneInN(2) { - chainID = expectedChainIDs[rand.Intn(len(expectedChainIDs))] + chainID = allChainIDs[rand.Intn(len(allChainIDs))] } else { chainID = datagen.GenRandomHexStr(30) - expectedChainIDs = append(expectedChainIDs, chainID) + allChainIDs = append(allChainIDs, chainID) } header := datagen.GenRandomIBCTMHeader(chainID, 0) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, false) } + limit := datagen.RandomInt(len(allChainIDs)) + 1 + // make query to get actual chain IDs - resp, err := zcKeeper.ChainList(ctx, &zctypes.QueryChainListRequest{}) + resp, err := zcKeeper.ChainList(ctx, &zctypes.QueryChainListRequest{ + Pagination: &query.PageRequest{ + Limit: limit, + }, + }) require.NoError(t, err) actualChainIDs := resp.ChainIds - // sort them and assert equality - sort.Strings(expectedChainIDs) - sort.Strings(actualChainIDs) - require.Equal(t, len(expectedChainIDs), len(actualChainIDs)) - for i := 0; i < len(expectedChainIDs); i++ { - require.Equal(t, expectedChainIDs[i], actualChainIDs[i]) + require.Equal(t, limit, uint64(len(actualChainIDs))) + allChainIDs = zcKeeper.GetAllChainIDs(ctx) + for i := uint64(0); i < limit; i++ { + require.Equal(t, allChainIDs[i], actualChainIDs[i]) } }) } diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index c0bd55bdb..aa2c0e391 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -224,6 +224,8 @@ func (m *QueryHeaderResponse) GetForkHeaders() *Forks { // QueryChainListRequest is request type for the Query/ChainList RPC method type QueryChainListRequest struct { + // pagination defines whether to have the pagination in the request + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryChainListRequest) Reset() { *m = QueryChainListRequest{} } @@ -259,9 +261,19 @@ func (m *QueryChainListRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryChainListRequest proto.InternalMessageInfo +func (m *QueryChainListRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + // QueryChainListResponse is response type for the Query/ChainList RPC method type QueryChainListResponse struct { + // chain_ids are IDs of the chains in ascending alphabetical order ChainIds []string `protobuf:"bytes,1,rep,name=chain_ids,json=chainIds,proto3" json:"chain_ids,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryChainListResponse) Reset() { *m = QueryChainListResponse{} } @@ -304,6 +316,13 @@ func (m *QueryChainListResponse) GetChainIds() []string { return nil } +func (m *QueryChainListResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + // QueryChainInfoRequest is request type for the Query/ChainInfo RPC method. type QueryChainInfoRequest struct { ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` @@ -497,7 +516,7 @@ func (m *QueryEpochChainInfoResponse) GetChainInfo() *ChainInfo { // QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. type QueryListHeadersRequest struct { ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } @@ -1018,79 +1037,80 @@ func init() { func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } var fileDescriptor_2caab7ee15063236 = []byte{ - // 1146 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x5f, 0x6f, 0xdb, 0x54, - 0x14, 0xaf, 0x9b, 0x36, 0x6b, 0x4e, 0xd9, 0x34, 0x5d, 0xca, 0x16, 0xdc, 0x2d, 0x2d, 0x46, 0xda, - 0xba, 0x69, 0xd8, 0x38, 0xac, 0x8c, 0x0a, 0x89, 0xa9, 0xe9, 0x68, 0x57, 0x86, 0xc6, 0x6a, 0x28, - 0x0f, 0x08, 0x29, 0xb2, 0x9d, 0x1b, 0xc7, 0x6a, 0xe3, 0x9b, 0xd9, 0x4e, 0xd6, 0xac, 0xf4, 0x85, - 0x2f, 0x00, 0x12, 0x2f, 0x88, 0x57, 0xa4, 0x21, 0xf1, 0xb0, 0xcf, 0x31, 0xa4, 0x3d, 0x4c, 0xe2, - 0x85, 0x27, 0x84, 0x5a, 0xbe, 0x06, 0x12, 0xf2, 0xbd, 0xd7, 0x8e, 0x9d, 0xd8, 0xcb, 0x1f, 0xfa, - 0xc0, 0xc3, 0xde, 0x72, 0x7d, 0xcf, 0xf9, 0x9d, 0xdf, 0xf9, 0x73, 0xcf, 0x39, 0x81, 0xb7, 0x0c, - 0xdd, 0xe8, 0xee, 0x13, 0x47, 0x79, 0x4c, 0x1c, 0x6c, 0x12, 0xc7, 0xb4, 0xb1, 0x6b, 0x61, 0xe5, - 0x61, 0x1b, 0xbb, 0x5d, 0xb9, 0xe5, 0x12, 0x9f, 0xa0, 0x22, 0x17, 0x91, 0x13, 0x22, 0x72, 0x47, - 0x15, 0x17, 0x2c, 0x62, 0x11, 0x2a, 0xa4, 0x04, 0xbf, 0x98, 0xbc, 0x78, 0xc9, 0x22, 0xc4, 0xda, - 0xc7, 0x8a, 0xde, 0xb2, 0x15, 0xdd, 0x71, 0x88, 0xaf, 0xfb, 0x36, 0x71, 0x3c, 0x7e, 0x7b, 0xdd, - 0x24, 0x5e, 0x93, 0x78, 0x8a, 0xa1, 0x7b, 0xdc, 0x8c, 0xd2, 0x51, 0x0d, 0xec, 0xeb, 0xaa, 0xd2, - 0xd2, 0x2d, 0xdb, 0xa1, 0xc2, 0x5c, 0xb6, 0x14, 0x92, 0x33, 0x7c, 0xd3, 0x6c, 0x60, 0x73, 0xaf, - 0x45, 0x6c, 0xc7, 0x57, 0xfc, 0x03, 0x7e, 0x7f, 0x2d, 0xfd, 0x3e, 0x71, 0xe2, 0xa2, 0x91, 0x9f, - 0xbd, 0x1b, 0xdb, 0xb1, 0xe2, 0x7e, 0x8a, 0x57, 0xd2, 0x45, 0x06, 0xa0, 0xa4, 0x50, 0x0e, 0xb7, - 0x88, 0xd9, 0x08, 0x44, 0x3a, 0x6a, 0xf4, 0xbb, 0x5f, 0x26, 0x19, 0xd6, 0x96, 0xee, 0xea, 0x4d, - 0xaf, 0x9f, 0x7d, 0x52, 0x26, 0x19, 0x65, 0x2a, 0x2a, 0x2d, 0x00, 0xda, 0x09, 0x98, 0x3e, 0xa0, - 0xfa, 0x1a, 0x7e, 0xd8, 0xc6, 0x9e, 0x2f, 0xed, 0xc2, 0xeb, 0x89, 0xaf, 0x5e, 0x8b, 0x38, 0x1e, - 0x46, 0x1f, 0x41, 0x9e, 0xd9, 0x29, 0x0a, 0xcb, 0xc2, 0xca, 0x7c, 0x79, 0x59, 0xce, 0x4a, 0xa0, - 0xcc, 0x34, 0x2b, 0x33, 0xcf, 0xfe, 0x5c, 0x9a, 0xd2, 0xb8, 0x96, 0xb4, 0xc5, 0x8d, 0xdd, 0xc5, - 0x7a, 0x0d, 0xbb, 0xdc, 0x18, 0x7a, 0x13, 0xe6, 0xcc, 0x86, 0x6e, 0x3b, 0x55, 0xbb, 0x46, 0x71, - 0x0b, 0xda, 0x19, 0x7a, 0xde, 0xae, 0xa1, 0x0b, 0x90, 0x6f, 0x60, 0xdb, 0x6a, 0xf8, 0xc5, 0xe9, - 0x65, 0x61, 0x65, 0x46, 0xe3, 0x27, 0xe9, 0x27, 0x81, 0x13, 0x0c, 0x91, 0x38, 0xc1, 0xdb, 0x81, - 0x7c, 0xf0, 0x85, 0x13, 0xbc, 0x9a, 0x4d, 0x70, 0xdb, 0xa9, 0xe1, 0x03, 0x5c, 0xe3, 0x00, 0x5c, - 0x0d, 0x55, 0xe0, 0xb5, 0x3a, 0x71, 0xf7, 0xaa, 0xec, 0xe8, 0x51, 0xb3, 0xf3, 0xe5, 0xa5, 0x6c, - 0x98, 0x4d, 0xe2, 0xee, 0x79, 0xda, 0x7c, 0xa0, 0xc4, 0xa0, 0x3c, 0xe9, 0x22, 0xbc, 0x41, 0xb9, - 0x6d, 0x04, 0x4e, 0x7c, 0x6a, 0x7b, 0x7e, 0x18, 0xd5, 0x55, 0xb8, 0xd0, 0x7f, 0xc1, 0x79, 0x2f, - 0x42, 0x21, 0x0c, 0x41, 0x10, 0xdb, 0xdc, 0x4a, 0x41, 0x9b, 0xe3, 0x31, 0xf0, 0xa4, 0x72, 0x1c, - 0x6f, 0xdb, 0xa9, 0x93, 0xe1, 0x81, 0x93, 0xbe, 0x8e, 0x9b, 0x62, 0x3a, 0xdc, 0x54, 0x05, 0x80, - 0x2b, 0x39, 0x75, 0xc2, 0xc3, 0xf4, 0x76, 0xb6, 0x7f, 0x3d, 0x00, 0xc6, 0x30, 0xf8, 0x29, 0x7d, - 0x01, 0x22, 0x45, 0xff, 0x38, 0x28, 0xcd, 0x01, 0x5a, 0x8b, 0x50, 0xa0, 0x35, 0x5b, 0x75, 0xda, - 0x4d, 0x6a, 0x60, 0x46, 0x9b, 0xa3, 0x1f, 0xee, 0xb7, 0x9b, 0x09, 0xce, 0xd3, 0x49, 0xce, 0x3a, - 0x2c, 0xa6, 0xa2, 0x9e, 0x22, 0xf1, 0x6f, 0xe0, 0x22, 0x35, 0x11, 0x04, 0x9f, 0xa7, 0x6b, 0x84, - 0x2a, 0xdc, 0x04, 0xe8, 0x35, 0x10, 0x5e, 0x12, 0x57, 0x64, 0xd6, 0x6d, 0xe4, 0xa0, 0xdb, 0xc8, - 0xec, 0xb1, 0xf3, 0x6e, 0x23, 0x3f, 0xd0, 0x2d, 0xcc, 0x61, 0xb5, 0x98, 0xa6, 0xf4, 0x44, 0x80, - 0xe2, 0xa0, 0x79, 0xee, 0xde, 0x3a, 0x9c, 0x09, 0x8b, 0x2e, 0x28, 0x80, 0x31, 0x6a, 0x37, 0xd4, - 0x43, 0x5b, 0x29, 0x3c, 0xaf, 0x0e, 0xe5, 0xc9, 0xec, 0x27, 0x88, 0x7e, 0x09, 0x97, 0x22, 0x9e, - 0x34, 0x1b, 0x7d, 0xb1, 0x9a, 0x34, 0xc3, 0x06, 0x5c, 0xce, 0xc0, 0x3d, 0xb5, 0x20, 0x48, 0x3b, - 0x50, 0xa2, 0x36, 0x36, 0x6d, 0x47, 0xdf, 0xb7, 0x1f, 0xe3, 0xda, 0x18, 0xcf, 0x06, 0x2d, 0xc0, - 0x6c, 0xcb, 0x25, 0x1d, 0x4c, 0x89, 0xcf, 0x69, 0xec, 0x20, 0xfd, 0x9c, 0x83, 0xa5, 0x4c, 0x4c, - 0xce, 0x7c, 0x17, 0x16, 0xea, 0xe1, 0x6d, 0x75, 0xb2, 0x3a, 0x45, 0xf5, 0x01, 0x78, 0xb4, 0x06, - 0xc0, 0x22, 0x4d, 0xc1, 0x58, 0x4a, 0xc5, 0x08, 0x2c, 0x1a, 0x0d, 0x1d, 0x55, 0xa6, 0xf1, 0xd4, - 0x58, 0x5e, 0xa8, 0xea, 0x7d, 0x38, 0xe7, 0xea, 0x8f, 0xaa, 0xbd, 0x21, 0x53, 0xcc, 0xf5, 0xf5, - 0xc4, 0xc4, 0x34, 0x0a, 0x30, 0x34, 0xfd, 0xd1, 0x46, 0xf4, 0x4d, 0x3b, 0xeb, 0xc6, 0x8f, 0x68, - 0x17, 0x90, 0xe1, 0x9b, 0x55, 0xaf, 0x6d, 0x34, 0x6d, 0xcf, 0xb3, 0x89, 0x53, 0xdd, 0xc3, 0xdd, - 0xe2, 0x4c, 0x1f, 0x66, 0x72, 0x42, 0x76, 0x54, 0xf9, 0xf3, 0x48, 0xfe, 0x1e, 0xee, 0x6a, 0xe7, - 0x0d, 0xdf, 0x4c, 0x7c, 0x41, 0x5b, 0x34, 0xe4, 0xa4, 0x5e, 0x9c, 0xa5, 0x48, 0xea, 0x4b, 0x46, - 0x4a, 0x20, 0x96, 0x92, 0x02, 0xa6, 0x2f, 0xf9, 0x70, 0x2d, 0x23, 0x49, 0xbb, 0x8e, 0x6f, 0xef, - 0xdf, 0xa5, 0x93, 0x63, 0xf2, 0x99, 0xd3, 0xab, 0x8d, 0x5c, 0xbc, 0x36, 0x9e, 0xe6, 0xe0, 0xfa, - 0x28, 0x66, 0x5f, 0x95, 0xc9, 0xff, 0xa3, 0x4c, 0xca, 0x4f, 0xce, 0xc2, 0x2c, 0x4d, 0x18, 0xfa, - 0x4e, 0x80, 0x3c, 0x5b, 0x53, 0xd0, 0x8d, 0x6c, 0xb8, 0xc1, 0xed, 0x48, 0x7c, 0x67, 0x44, 0x69, - 0x96, 0x73, 0x69, 0xe5, 0xdb, 0xdf, 0xff, 0xfe, 0x61, 0x5a, 0x42, 0xcb, 0x4a, 0xfa, 0x5a, 0xd6, - 0x51, 0xf9, 0xf6, 0x86, 0x9e, 0x0a, 0x90, 0x67, 0xfd, 0x6c, 0x28, 0xa3, 0xc4, 0x0a, 0x35, 0x94, - 0x51, 0x72, 0x4d, 0x92, 0xb6, 0x28, 0xa3, 0x75, 0x74, 0x3b, 0x9b, 0x51, 0xaf, 0x36, 0x95, 0xc3, - 0xf0, 0xa5, 0x1c, 0x29, 0xac, 0xc9, 0x2a, 0x87, 0xec, 0x49, 0x1c, 0xa1, 0x1f, 0x05, 0x28, 0x44, - 0xdb, 0x0c, 0x52, 0x86, 0xb0, 0xe8, 0x5f, 0x88, 0xc4, 0x77, 0x47, 0x57, 0x18, 0x3d, 0x96, 0x94, - 0xad, 0x87, 0x7e, 0x09, 0xa9, 0xd1, 0x2a, 0x1f, 0x89, 0x5a, 0x6c, 0x48, 0x8c, 0x46, 0x2d, 0x3e, - 0x01, 0xa4, 0x5b, 0x94, 0x9a, 0x8a, 0x94, 0x31, 0x83, 0x8a, 0x7e, 0x13, 0xe0, 0x5c, 0x72, 0xe7, - 0x41, 0x37, 0x87, 0x58, 0x4f, 0x5d, 0xbc, 0xc4, 0xd5, 0x31, 0xb5, 0x38, 0xf1, 0x4f, 0x28, 0xf1, - 0x3b, 0xa8, 0x32, 0x6e, 0x35, 0xd0, 0x26, 0xe2, 0x29, 0x87, 0xd1, 0x2e, 0x70, 0x84, 0x7e, 0x15, - 0x60, 0x3e, 0xb6, 0xdd, 0x20, 0x75, 0x08, 0xa5, 0xc1, 0x45, 0x4c, 0x2c, 0x8f, 0xa3, 0xc2, 0x5d, - 0xb8, 0x49, 0x5d, 0x90, 0xd1, 0x8d, 0x6c, 0x17, 0xf8, 0x7e, 0x10, 0x0f, 0xfc, 0x73, 0x01, 0xce, - 0xf7, 0xaf, 0x22, 0xe8, 0xfd, 0x11, 0xcc, 0xa7, 0xec, 0x44, 0xe2, 0xad, 0xb1, 0xf5, 0x46, 0x7f, - 0x8c, 0x83, 0xdc, 0xd3, 0x62, 0xff, 0x5c, 0x00, 0x34, 0xd8, 0xf7, 0xd0, 0x07, 0x43, 0x88, 0x65, - 0x2e, 0x4a, 0xe2, 0xda, 0x04, 0x9a, 0xdc, 0xa9, 0x75, 0xea, 0xd4, 0x87, 0x68, 0x2d, 0xdb, 0xa9, - 0xb4, 0x39, 0x18, 0xcf, 0xce, 0x3f, 0x02, 0x5c, 0x7e, 0xe9, 0x50, 0x45, 0x1b, 0x63, 0xf3, 0x1b, - 0xdc, 0x04, 0xc4, 0x3b, 0xff, 0x0d, 0x84, 0xfb, 0xbb, 0x43, 0xfd, 0xbd, 0x87, 0xb6, 0x27, 0xf6, - 0x57, 0x61, 0x3d, 0x35, 0xea, 0xad, 0x95, 0xcf, 0x9e, 0x1d, 0x97, 0x84, 0x17, 0xc7, 0x25, 0xe1, - 0xaf, 0xe3, 0x92, 0xf0, 0xfd, 0x49, 0x69, 0xea, 0xc5, 0x49, 0x69, 0xea, 0x8f, 0x93, 0xd2, 0xd4, - 0x57, 0xab, 0x96, 0xed, 0x37, 0xda, 0x86, 0x6c, 0x92, 0x66, 0x68, 0x8e, 0xc2, 0x44, 0xb6, 0x0f, - 0xfa, 0xac, 0xfb, 0xdd, 0x16, 0xf6, 0x8c, 0x3c, 0xfd, 0xc7, 0xff, 0xde, 0xbf, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x12, 0x48, 0x53, 0xd4, 0x99, 0x11, 0x00, 0x00, + // 1155 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x8f, 0xf3, 0xaf, 0xc9, 0x0b, 0xad, 0xaa, 0x21, 0x94, 0xc5, 0x69, 0x37, 0xc1, 0x48, 0x25, + 0xad, 0x8a, 0x8d, 0x97, 0x96, 0x12, 0x21, 0x51, 0x65, 0x53, 0x92, 0x86, 0xa2, 0xd2, 0x18, 0xc2, + 0x01, 0x21, 0xad, 0x6c, 0xef, 0xac, 0xd7, 0x4a, 0xd6, 0xb3, 0xf5, 0x78, 0xb7, 0xd9, 0x86, 0x70, + 0xe0, 0x0b, 0x80, 0xc4, 0x05, 0x71, 0x45, 0x2a, 0x12, 0x87, 0x7e, 0x8e, 0x22, 0xf5, 0x50, 0x89, + 0x0b, 0x27, 0x84, 0x12, 0xbe, 0x06, 0x12, 0xf2, 0xcc, 0xd8, 0x6b, 0xef, 0xda, 0xdd, 0x3f, 0xe4, + 0xc0, 0x81, 0xdb, 0x8e, 0xe7, 0xbd, 0xdf, 0xfb, 0xbd, 0x7f, 0xf3, 0x9e, 0x16, 0x5e, 0xb7, 0x4c, + 0xab, 0xb3, 0x4f, 0x3c, 0xed, 0x11, 0xf1, 0xb0, 0x4d, 0x3c, 0xdb, 0xc5, 0xbe, 0x83, 0xb5, 0x07, + 0x2d, 0xec, 0x77, 0xd4, 0xa6, 0x4f, 0x02, 0x82, 0x0a, 0x42, 0x44, 0x4d, 0x89, 0xa8, 0x6d, 0x5d, + 0x5e, 0x74, 0x88, 0x43, 0x98, 0x90, 0x16, 0xfe, 0xe2, 0xf2, 0xf2, 0x45, 0x87, 0x10, 0x67, 0x1f, + 0x6b, 0x66, 0xd3, 0xd5, 0x4c, 0xcf, 0x23, 0x81, 0x19, 0xb8, 0xc4, 0xa3, 0xe2, 0xf6, 0xaa, 0x4d, + 0x68, 0x83, 0x50, 0xcd, 0x32, 0xa9, 0x30, 0xa3, 0xb5, 0x75, 0x0b, 0x07, 0xa6, 0xae, 0x35, 0x4d, + 0xc7, 0xf5, 0x98, 0xb0, 0x90, 0x2d, 0x46, 0xe4, 0xac, 0xc0, 0xb6, 0xeb, 0xd8, 0xde, 0x6b, 0x12, + 0xd7, 0x0b, 0xb4, 0xe0, 0x40, 0xdc, 0x5f, 0xc9, 0xbe, 0x4f, 0x9d, 0x84, 0x68, 0xec, 0x67, 0xf7, + 0xc6, 0xf5, 0x9c, 0xa4, 0x9f, 0xf2, 0xe5, 0x6c, 0x91, 0x3e, 0x28, 0x25, 0x92, 0xc3, 0x4d, 0x62, + 0xd7, 0x43, 0x91, 0xb6, 0x1e, 0xff, 0xee, 0x95, 0x49, 0x87, 0xb5, 0x69, 0xfa, 0x66, 0x83, 0xf6, + 0xb2, 0x4f, 0xcb, 0xa4, 0xa3, 0xcc, 0x44, 0x95, 0x45, 0x40, 0x3b, 0x21, 0xd3, 0xfb, 0x4c, 0xdf, + 0xc0, 0x0f, 0x5a, 0x98, 0x06, 0xca, 0x2e, 0xbc, 0x9c, 0xfa, 0x4a, 0x9b, 0xc4, 0xa3, 0x18, 0x7d, + 0x00, 0xb3, 0xdc, 0x4e, 0x41, 0x5a, 0x91, 0x56, 0x17, 0x4a, 0x2b, 0x6a, 0x5e, 0x02, 0x55, 0xae, + 0x59, 0x9e, 0x7e, 0xfa, 0xc7, 0xf2, 0x84, 0x21, 0xb4, 0x94, 0x2d, 0x61, 0xec, 0x0e, 0x36, 0xab, + 0xd8, 0x17, 0xc6, 0xd0, 0x6b, 0x30, 0x67, 0xd7, 0x4d, 0xd7, 0xab, 0xb8, 0x55, 0x86, 0x3b, 0x6f, + 0x9c, 0x61, 0xe7, 0xed, 0x2a, 0xba, 0x00, 0xb3, 0x75, 0xec, 0x3a, 0xf5, 0xa0, 0x30, 0xb9, 0x22, + 0xad, 0x4e, 0x1b, 0xe2, 0xa4, 0xfc, 0x28, 0x09, 0x82, 0x11, 0x92, 0x20, 0x78, 0x2b, 0x94, 0x0f, + 0xbf, 0x08, 0x82, 0x6f, 0xe6, 0x13, 0xdc, 0xf6, 0xaa, 0xf8, 0x00, 0x57, 0x05, 0x80, 0x50, 0x43, + 0x65, 0x78, 0xa9, 0x46, 0xfc, 0xbd, 0x0a, 0x3f, 0x52, 0x66, 0x76, 0xa1, 0xb4, 0x9c, 0x0f, 0xb3, + 0x49, 0xfc, 0x3d, 0x6a, 0x2c, 0x84, 0x4a, 0x1c, 0x8a, 0x2a, 0x15, 0x78, 0x85, 0x71, 0xdb, 0x08, + 0x9d, 0xf8, 0xd8, 0xa5, 0x41, 0xe4, 0xe8, 0x26, 0x40, 0xb7, 0x10, 0x05, 0xc3, 0xcb, 0x2a, 0xaf, + 0x5a, 0x35, 0xac, 0x5a, 0x95, 0x17, 0x8d, 0xa8, 0x5a, 0xf5, 0xbe, 0xe9, 0x60, 0xa1, 0x6b, 0x24, + 0x34, 0x95, 0xaf, 0xe1, 0x42, 0xaf, 0x01, 0xe1, 0xff, 0x12, 0xcc, 0x47, 0xa1, 0x0c, 0x73, 0x34, + 0xb5, 0x3a, 0x6f, 0xcc, 0x89, 0x58, 0x52, 0xb4, 0x95, 0x32, 0x3f, 0x29, 0x02, 0x34, 0xc8, 0x3c, + 0x47, 0x4e, 0xd9, 0x2f, 0x25, 0x1d, 0xdc, 0xf6, 0x6a, 0x64, 0x70, 0x26, 0x95, 0x2f, 0x93, 0x9c, + 0xb9, 0x8e, 0xe0, 0x5c, 0x06, 0x10, 0x4a, 0x5e, 0x8d, 0x88, 0xa8, 0xbc, 0x91, 0x1f, 0xf0, 0x2e, + 0x00, 0x77, 0x35, 0xfc, 0xa9, 0x7c, 0x06, 0x32, 0x43, 0xff, 0x30, 0xec, 0x95, 0x3e, 0x5a, 0x4b, + 0x30, 0xcf, 0x9a, 0xa8, 0xe2, 0xb5, 0x1a, 0xcc, 0xc0, 0xb4, 0x31, 0xc7, 0x3e, 0xdc, 0x6b, 0x35, + 0x52, 0x9c, 0x27, 0xd3, 0x9c, 0x4d, 0x58, 0xca, 0x44, 0x3d, 0x45, 0xe2, 0x5f, 0xc1, 0xab, 0xcc, + 0x44, 0x98, 0x45, 0x51, 0x3f, 0x43, 0xb4, 0xc5, 0x66, 0x46, 0x26, 0xc7, 0x29, 0xa4, 0xc7, 0x12, + 0x14, 0xfa, 0xcd, 0x0b, 0xf7, 0xd6, 0xe1, 0x4c, 0xd4, 0x05, 0x61, 0x25, 0x8d, 0xd0, 0x4c, 0x91, + 0xde, 0xe9, 0x55, 0xdc, 0xe7, 0x70, 0x31, 0xe6, 0xc9, 0xb2, 0xd1, 0x13, 0xab, 0x71, 0x33, 0x6c, + 0xc1, 0xa5, 0x1c, 0xdc, 0x53, 0x0b, 0x82, 0xb2, 0x03, 0x45, 0x66, 0x63, 0xd3, 0xf5, 0xcc, 0x7d, + 0xf7, 0x11, 0xae, 0x8e, 0xd0, 0x36, 0x68, 0x11, 0x66, 0x9a, 0x3e, 0x69, 0x63, 0x46, 0x7c, 0xce, + 0xe0, 0x07, 0xe5, 0xa7, 0x29, 0x58, 0xce, 0xc5, 0x14, 0xcc, 0x77, 0x61, 0xb1, 0x16, 0xdd, 0x56, + 0xc6, 0xab, 0x53, 0x54, 0xeb, 0x83, 0x47, 0x6b, 0x00, 0x3c, 0xd2, 0x0c, 0x8c, 0xa7, 0x54, 0x8e, + 0xc1, 0xe2, 0x59, 0xd5, 0xd6, 0x55, 0x16, 0x4f, 0x83, 0xe7, 0x85, 0xa9, 0xde, 0x83, 0x73, 0xbe, + 0xf9, 0xb0, 0xd2, 0x9d, 0x7a, 0x85, 0xa9, 0x9e, 0x47, 0x3a, 0x35, 0x1e, 0x43, 0x0c, 0xc3, 0x7c, + 0xb8, 0x11, 0x7f, 0x33, 0xce, 0xfa, 0xc9, 0x23, 0xda, 0x05, 0x64, 0x05, 0x76, 0x85, 0xb6, 0xac, + 0x86, 0x4b, 0xa9, 0x4b, 0xbc, 0xca, 0x1e, 0xee, 0x14, 0xa6, 0x7b, 0x30, 0xd3, 0x23, 0xbb, 0xad, + 0xab, 0x9f, 0xc6, 0xf2, 0x77, 0x71, 0xc7, 0x38, 0x6f, 0x05, 0x76, 0xea, 0x0b, 0xda, 0x62, 0x21, + 0x27, 0xb5, 0xc2, 0x0c, 0x43, 0xd2, 0x5f, 0x30, 0xe3, 0x42, 0xb1, 0x8c, 0x14, 0x70, 0x7d, 0x25, + 0x80, 0x2b, 0x39, 0x49, 0xda, 0xf5, 0x02, 0x77, 0xff, 0x0e, 0x1b, 0x65, 0xe3, 0x0f, 0xc1, 0x6e, + 0x6d, 0x4c, 0x25, 0x6b, 0xe3, 0xc9, 0x14, 0x5c, 0x1d, 0xc6, 0xec, 0xff, 0x65, 0xf2, 0xdf, 0x28, + 0x93, 0xd2, 0xe3, 0xb3, 0x30, 0xc3, 0x12, 0x86, 0xbe, 0x95, 0x60, 0x96, 0xef, 0x4d, 0xe8, 0x5a, + 0x3e, 0x5c, 0xff, 0xba, 0x26, 0xbf, 0x35, 0xa4, 0x34, 0xcf, 0xb9, 0xb2, 0xfa, 0xcd, 0x6f, 0x7f, + 0x7d, 0x3f, 0xa9, 0xa0, 0x15, 0x2d, 0x7b, 0x4f, 0x6c, 0xeb, 0x62, 0x9d, 0x44, 0x4f, 0x24, 0x98, + 0xe5, 0xef, 0xd9, 0x40, 0x46, 0xa9, 0x9d, 0x6e, 0x20, 0xa3, 0xf4, 0xde, 0xa6, 0x6c, 0x31, 0x46, + 0xeb, 0xe8, 0x56, 0x3e, 0xa3, 0x6e, 0x6d, 0x6a, 0x87, 0x51, 0xa7, 0x1c, 0x69, 0xfc, 0x91, 0xd5, + 0x0e, 0x79, 0x4b, 0x1c, 0xa1, 0x1f, 0x24, 0x98, 0x8f, 0xd7, 0x22, 0xa4, 0x0d, 0x60, 0xd1, 0xbb, + 0xa1, 0xc9, 0x6f, 0x0f, 0xaf, 0x30, 0x7c, 0x2c, 0x19, 0x5b, 0x8a, 0x7e, 0x8e, 0xa8, 0xb1, 0x2a, + 0x1f, 0x8a, 0x5a, 0x62, 0x48, 0x0c, 0x47, 0x2d, 0x39, 0x01, 0x94, 0x9b, 0x8c, 0x9a, 0x8e, 0xb4, + 0x11, 0x83, 0x8a, 0x7e, 0x95, 0xe0, 0x5c, 0x7a, 0xe7, 0x41, 0xd7, 0x07, 0x58, 0xcf, 0x5c, 0xbc, + 0xe4, 0x1b, 0x23, 0x6a, 0x09, 0xe2, 0x1f, 0x31, 0xe2, 0xb7, 0x51, 0x79, 0xd4, 0x6a, 0x60, 0x8f, + 0x08, 0xd5, 0x0e, 0xe3, 0x5d, 0xe0, 0x08, 0xfd, 0x22, 0xc1, 0x42, 0x62, 0xbb, 0x41, 0xfa, 0x00, + 0x4a, 0xfd, 0x8b, 0x98, 0x5c, 0x1a, 0x45, 0x45, 0xb8, 0x70, 0x9d, 0xb9, 0xa0, 0xa2, 0x6b, 0xf9, + 0x2e, 0x88, 0xfd, 0x20, 0x19, 0xf8, 0x67, 0x12, 0x9c, 0xef, 0x5d, 0x45, 0xd0, 0xbb, 0x43, 0x98, + 0xcf, 0xd8, 0x89, 0xe4, 0x9b, 0x23, 0xeb, 0x0d, 0xdf, 0x8c, 0xfd, 0xdc, 0xb3, 0x62, 0xff, 0x4c, + 0x02, 0xd4, 0xff, 0xee, 0xa1, 0xf7, 0x06, 0x10, 0xcb, 0x5d, 0x94, 0xe4, 0xb5, 0x31, 0x34, 0x85, + 0x53, 0xeb, 0xcc, 0xa9, 0xf7, 0xd1, 0x5a, 0xbe, 0x53, 0x59, 0x73, 0x30, 0x99, 0x9d, 0xbf, 0x25, + 0xb8, 0xf4, 0xc2, 0xa1, 0x8a, 0x36, 0x46, 0xe6, 0xd7, 0xbf, 0x09, 0xc8, 0xb7, 0xff, 0x1d, 0x88, + 0xf0, 0x77, 0x87, 0xf9, 0x7b, 0x17, 0x6d, 0x8f, 0xed, 0xaf, 0xc6, 0xdf, 0xd4, 0xf8, 0x6d, 0x2d, + 0x7f, 0xf2, 0xf4, 0xb8, 0x28, 0x3d, 0x3f, 0x2e, 0x4a, 0x7f, 0x1e, 0x17, 0xa5, 0xef, 0x4e, 0x8a, + 0x13, 0xcf, 0x4f, 0x8a, 0x13, 0xbf, 0x9f, 0x14, 0x27, 0xbe, 0xb8, 0xe1, 0xb8, 0x41, 0xbd, 0x65, + 0xa9, 0x36, 0x69, 0x44, 0xe6, 0x18, 0x4c, 0x6c, 0xfb, 0xa0, 0xc7, 0x7a, 0xd0, 0x69, 0x62, 0x6a, + 0xcd, 0xb2, 0xbf, 0x20, 0xde, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xf5, 0xc1, 0xb9, 0x2a, + 0x12, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1637,6 +1657,18 @@ func (m *QueryChainListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } @@ -1660,6 +1692,18 @@ func (m *QueryChainListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if len(m.ChainIds) > 0 { for iNdEx := len(m.ChainIds) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.ChainIds[iNdEx]) @@ -2291,6 +2335,10 @@ func (m *QueryChainListRequest) Size() (n int) { } var l int _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } return n } @@ -2306,6 +2354,10 @@ func (m *QueryChainListResponse) Size() (n int) { n += 1 + l + sovQuery(uint64(l)) } } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } return n } @@ -2915,6 +2967,42 @@ func (m *QueryChainListRequest) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: QueryChainListRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -2997,6 +3085,42 @@ func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error { } m.ChainIds = append(m.ChainIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go index 5ffdf1014..4a9974981 100644 --- a/x/zoneconcierge/types/query.pb.gw.go +++ b/x/zoneconcierge/types/query.pb.gw.go @@ -127,10 +127,21 @@ func local_request_Query_Header_0(ctx context.Context, marshaler runtime.Marshal } +var ( + filter_Query_ChainList_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + func request_Query_ChainList_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryChainListRequest var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainList_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.ChainList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err @@ -140,6 +151,13 @@ func local_request_Query_ChainList_0(ctx context.Context, marshaler runtime.Mars var protoReq QueryChainListRequest var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainList_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.ChainList(ctx, &protoReq) return msg, metadata, err