diff --git a/.circleci/config.yml b/.circleci/config.yml index 1ae67cd65..ee3bc7769 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ jobs: # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub. # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor machine: - image: ubuntu-2204:2022.07.1 + image: ubuntu-2204:2022.10.1 resource_class: large # Add steps to the job # See: https://circleci.com/docs/2.0/configuration-reference/#steps @@ -31,7 +31,7 @@ jobs: - run: name: Lint command: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.50.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.50.1 ./bin/golangci-lint run - run: name: Run tests @@ -43,6 +43,12 @@ jobs: make localnet-start-test sudo -E env "PATH=$PATH" make test-babylon-integration make localnet-stop + # TODO: If CI tests will take to long consider having only this e2e test + # instead of separate integration tests and e2e tests. + - run: + name: Run e2e tests + command: | + make test-e2e # Invoke jobs via workflows diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..a441aa146 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Babylonchain, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile index 1d448bd7f..123435446 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ build-linux: GOOS=linux GOARCH=$(if $(findstring aarch64,$(shell uname -m)) || $(findstring arm64,$(shell uname -m)),arm64,amd64) LEDGER_ENABLED=false $(MAKE) build $(BUILD_TARGETS): go.sum $(BUILDDIR)/ - go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./... + CGO_CFLAGS="-O -D__BLST_PORTABLE__" go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./... $(BUILDDIR)/: mkdir -p $(BUILDDIR)/ diff --git a/README.md b/README.md index 6009254fc..e853020a2 100644 --- a/README.md +++ b/README.md @@ -1,162 +1,41 @@ # Babylon -## Requirements +Bringing Bitcoin security to Cosmos and beyond. -- Go 1.18 +[![Website](https://badgen.net/badge/icon/website?label=)](https://babylonchain.io) +[![Whitepaper](https://badgen.net/badge/icon/whitepaper?label=)](https://arxiv.org/abs/2207.08392) +[![Twitter](https://badgen.net/badge/icon/twitter?icon=twitter&label)](https://twitter.com/babylon_chain) +[![Discord](https://badgen.net/badge/icon/discord?icon=discord&label)](https://discord.gg/babylonchain) -## Development requirements +## Build and install -- Go 1.18 -- Docker +The babylond application based on the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is the main application of the Babylon network. +This repository is used to build the Babylon core application to join the Babylon network. -## Building +### Requirements +To build and install, you need to have Go 1.19 available. +Follow the instructions on the [Golang page](https://go.dev/doc/install) to do that. -To build the chain, simply: +To build the binary: ```console make build ``` -This will lead to the creation of a `babylond` executable under the `build` -directory. +The binary will then be available at `./build/babylond` . -## Installing - -To build the chain and install a babylon executable: +To install: ```console make install ``` -## Testing - -```console -make test -``` - -## Running a node - -The following commands assume that the `babylond` executable has been -installed. If the repository was only built, then `./build/babylond` should be -used in its place. - -### Generating the node configuration -The configuration for a single node can be created through the `testnet` -command. While the testnet command can create an arbitrary number of nodes that -communicate on a testnet, here we focus on the setup of a single node. -```console -babylond testnet \ - --v 1 \ - --output-dir ./.testnet \ - --starting-ip-address 192.168.10.2 \ - --keyring-backend test \ - --chain-id chain-test -``` - -The flags specify the following: -- `--output-dir `: Specifies that the testnet files should - reside under this directory. -- `--v `: Leads to the creation of `N` nodes, each one residing under the - `/node{i}`. In this case `i={0..N-1}`. -- `--starting-ip-address `: Specifies the IP address for the nodes. For example, - `192.168.10.2` leads to the first node running on `192.168.10.2:46656`, the - second one on `192.168.10.3:46656` etc. -- `--keyring-backend {os,file,test}`: Specifies the backend to use for the keyring. Available - choices include `os`, `file`, and `test`. We use `test` for convenience. -- `--chain-id`: An identifier for the chain. Useful when perrforming operations - later. - -In this case, we generated a single node. If we take a look under `.testnet`: -```console -$ ls .testnet -gentxs node0 -``` - -The `gentxs` directory contains the genesis transactions. It contains -transactions that assign bbn tokens to a single address that is defined for each -node. - -The `node0` directory contains the the following, -```console -$ ls .testnet/node0/babylond -config data key_seed.json keyring-test -``` - -A brief description of the contents: -- `config`: Contains the configuration files for the node. -- `data`: Contains the database storage for the node. -- `key_seed.json`: Seed to generate the keys maintained by the keyring. -- `keyring-test`: Contains the test keyring. This directory was created because - we provided the `--keyring-backend test` flag. The `testnet` command, creates - a validator node named `node{i}` (depends on the node name), and assigns - bbn tokens to it through a transaction written to `.testnet/gentxs/node{i}.json`. - The keys for this node can be pointed to by the `node{i}` name. - -### Running the node -```console -babylond start --home ./.testnet/node0/babylond -``` - -### Logs - -The logs for a particular node can be found under -`.testnets/node{id}/babylond/babylond.log`. - -### Performing queries +## Documentation -After building a node and starting it, you can perform queries. -```console -babylond --home .testnet/node{i}/babylond/ --chain-id \ - query -``` +For the most up-to-date documentation please visit [docs.babylonchain.io](https://docs.babylonchain.io) -For example, in order to get the hashes maintained by the `btcligthclient` -module: -```console -$ babylond --home .testnet/node0/babylond/ --chain-id chain-test query btclightclient hashes +## Joining the testnet -hashes: -- 00000000000000000002bf1c218853bc920f41f74491e6c92c6bc6fdc881ab47 -pagination: - next_key: null - total: "1" -``` - -### Submitting transactions - -After building a node and running it, one can send transactions as follows: -```console -babylond --home .testnet/node{i}/babylond --chain-id \ - --keyring-backend {os,file,test} --fees \ - --from --broadcast-mode {sync,async,block} \ - tx [data] -``` +Please follow the instructions on the [Joining the Testnet documentation page](https://docs.babylonchain.io/docs/testnet/overview). -The `--fees` flag specifies the amount of fees that we are willing to pay and -the denomination and the `--from` flag denotes the name of the key that we want -to use to sign the transaction (i.e. from which account we want this -transaction to happen). The `--broadcast-mode` specifies how long we want to -wait until we receive a response from the CLI: `async` means immediately, -`sync` means after the transaction has been validated through `CheckTx`, -and `block` means after the transaction has been processed by the next block. +## Contributing -For example, in the `btclightclient` module, in order -to submit a header, one should: -```console -babylond --home .testnet/node0/babylond --chain-id chain-test \ - --keyring-backend test --fees 100bbn \ - --from node0 --broadcast-mode block \ - tx btclightclient insert-header -``` - -## Running a multi-node testnet - -We provide support for running a multi-node testnet using Docker. To build it - -```console -make localnet-start -``` - -The corresponding node directories can be found under `.testnets` -```console -$ ls .testnets -gentxs node0 node1 node2 node3 -``` +The [docs](./docs) directory contains the necessary information on how to get started using the babylond executable for development purposes. diff --git a/app/app.go b/app/app.go index 1a7a80f4b..6790210f7 100644 --- a/app/app.go +++ b/app/app.go @@ -102,6 +102,9 @@ import ( "github.com/babylonchain/babylon/x/epoching" epochingkeeper "github.com/babylonchain/babylon/x/epoching/keeper" epochingtypes "github.com/babylonchain/babylon/x/epoching/types" + "github.com/babylonchain/babylon/x/monitor" + monitorkeeper "github.com/babylonchain/babylon/x/monitor/keeper" + monitortypes "github.com/babylonchain/babylon/x/monitor/types" storetypes "github.com/cosmos/cosmos-sdk/store/types" govclient "github.com/cosmos/cosmos-sdk/x/gov/client" govv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" @@ -162,6 +165,7 @@ var ( btclightclient.AppModuleBasic{}, btccheckpoint.AppModuleBasic{}, checkpointing.AppModuleBasic{}, + monitor.AppModuleBasic{}, // IBC-related ibc.AppModuleBasic{}, @@ -225,6 +229,7 @@ type BabylonApp struct { BTCLightClientKeeper btclightclientkeeper.Keeper BtcCheckpointKeeper btccheckpointkeeper.Keeper CheckpointingKeeper checkpointingkeeper.Keeper + MonitorKeeper monitorkeeper.Keeper // IBC-related modules IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly @@ -247,6 +252,7 @@ type BabylonApp struct { } func init() { + // Note: If this changes, the home directory under x/checkpointing/client/cli/tx.go needs to change as well userHomeDir, err := os.UserHomeDir() if err != nil { panic(err) @@ -286,6 +292,7 @@ func NewBabylonApp( btclightclienttypes.StoreKey, btccheckpointtypes.StoreKey, checkpointingtypes.StoreKey, + monitortypes.StoreKey, // IBC-related modules ibchost.StoreKey, ibctransfertypes.StoreKey, @@ -466,14 +473,6 @@ func NewBabylonApp( // No more routes can be added app.IBCKeeper.SetRouter(ibcRouter) - // add msgServiceRouter so that the epoching module can forward unwrapped messages to the staking module - epochingKeeper.SetMsgServiceRouter(app.BaseApp.MsgServiceRouter()) - // make ZoneConcierge to subscribe to the epoching's hooks - epochingKeeper.SetHooks( - epochingtypes.NewMultiEpochingHooks(app.ZoneConciergeKeeper.Hooks()), - ) - app.EpochingKeeper = epochingKeeper - btclightclientKeeper := *btclightclientkeeper.NewKeeper( appCodec, keys[btclightclienttypes.StoreKey], @@ -482,6 +481,22 @@ func NewBabylonApp( btcConfig, ) + app.MonitorKeeper = monitorkeeper.NewKeeper( + appCodec, + keys[monitortypes.StoreKey], + keys[monitortypes.StoreKey], + app.GetSubspace(monitortypes.ModuleName), + &btclightclientKeeper, + ) + + // add msgServiceRouter so that the epoching module can forward unwrapped messages to the staking module + epochingKeeper.SetMsgServiceRouter(app.BaseApp.MsgServiceRouter()) + // make ZoneConcierge to subscribe to the epoching's hooks + epochingKeeper.SetHooks( + epochingtypes.NewMultiEpochingHooks(app.ZoneConciergeKeeper.Hooks(), app.MonitorKeeper.Hooks()), + ) + app.EpochingKeeper = epochingKeeper + checkpointingKeeper := checkpointingkeeper.NewKeeper( appCodec, @@ -493,7 +508,7 @@ func NewBabylonApp( privSigner.ClientCtx, ) app.CheckpointingKeeper = *checkpointingKeeper.SetHooks( - checkpointingtypes.NewMultiCheckpointingHooks(app.EpochingKeeper.Hooks(), app.ZoneConciergeKeeper.Hooks()), + checkpointingtypes.NewMultiCheckpointingHooks(app.EpochingKeeper.Hooks(), app.ZoneConciergeKeeper.Hooks(), app.MonitorKeeper.Hooks()), ) app.ZoneConciergeKeeper.SetCheckpointingKeeper(app.CheckpointingKeeper) @@ -558,6 +573,7 @@ func NewBabylonApp( btclightclient.NewAppModule(appCodec, app.BTCLightClientKeeper, app.AccountKeeper, app.BankKeeper), btccheckpoint.NewAppModule(appCodec, app.BtcCheckpointKeeper, app.AccountKeeper, app.BankKeeper), checkpointing.NewAppModule(appCodec, app.CheckpointingKeeper, app.AccountKeeper, app.BankKeeper), + monitor.NewAppModule(appCodec, app.MonitorKeeper, app.AccountKeeper, app.BankKeeper), // IBC-related modules ibc.NewAppModule(app.IBCKeeper), transferModule, @@ -580,6 +596,7 @@ func NewBabylonApp( btclightclienttypes.ModuleName, btccheckpointtypes.ModuleName, checkpointingtypes.ModuleName, + monitortypes.ModuleName, // IBC-related modules ibchost.ModuleName, ibctransfertypes.ModuleName, @@ -602,6 +619,7 @@ func NewBabylonApp( btclightclienttypes.ModuleName, btccheckpointtypes.ModuleName, checkpointingtypes.ModuleName, + monitortypes.ModuleName, // IBC-related modules ibchost.ModuleName, ibctransfertypes.ModuleName, @@ -626,6 +644,7 @@ func NewBabylonApp( btclightclienttypes.ModuleName, btccheckpointtypes.ModuleName, checkpointingtypes.ModuleName, + monitortypes.ModuleName, // IBC-related modules ibchost.ModuleName, ibctransfertypes.ModuleName, @@ -665,6 +684,7 @@ func NewBabylonApp( btclightclient.NewAppModule(appCodec, app.BTCLightClientKeeper, app.AccountKeeper, app.BankKeeper), btccheckpoint.NewAppModule(appCodec, app.BtcCheckpointKeeper, app.AccountKeeper, app.BankKeeper), checkpointing.NewAppModule(appCodec, app.CheckpointingKeeper, app.AccountKeeper, app.BankKeeper), + monitor.NewAppModule(appCodec, app.MonitorKeeper, app.AccountKeeper, app.BankKeeper), // IBC-related modules ibc.NewAppModule(app.IBCKeeper), transferModule, @@ -897,6 +917,7 @@ func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino paramsKeeper.Subspace(btclightclienttypes.ModuleName) paramsKeeper.Subspace(btccheckpointtypes.ModuleName) paramsKeeper.Subspace(checkpointingtypes.ModuleName) + paramsKeeper.Subspace(monitortypes.ModuleName) // IBC-related modules paramsKeeper.Subspace(ibchost.ModuleName) paramsKeeper.Subspace(ibctransfertypes.ModuleName) diff --git a/app/test_helpers.go b/app/test_helpers.go index 75395f567..170e95d19 100644 --- a/app/test_helpers.go +++ b/app/test_helpers.go @@ -95,7 +95,6 @@ func setup(withGenesis bool, invCheckPeriod uint) (*BabylonApp, GenesisState) { // one validator in validator set during InitGenesis abci call - https://github.com/cosmos/cosmos-sdk/pull/9697 func NewBabyblonAppWithCustomOptions(t *testing.T, isCheckTx bool, privSigner *PrivSigner, options SetupOptions) *BabylonApp { t.Helper() - privVal := datagen.NewPV() pubKey, err := privVal.GetPubKey() require.NoError(t, err) diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 55d132ecc..a4c2ae549 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -4,6 +4,180 @@ info: description: A REST interface for state queries version: 1.0.0 paths: + /babylon/btccheckpoint/v1: + get: + summary: BtcCheckpointsInfo returns checkpoint info for a range of epochs + operationId: BtcCheckpointsInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + info_list: + type: array + items: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: >- + height of earliest BTC block that includes this + checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted + from the checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the + submissions, calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryBtcCheckpointsInfoResponse is response type for the + Query/BtcCheckpointsInfo RPC method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: start_epoch + in: query + required: false + type: string + format: uint64 + - name: end_epoch + in: query + required: false + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query /babylon/btccheckpoint/v1/params: get: summary: Parameters queries the parameters of the module. @@ -73,23 +247,54 @@ paths: - Query /babylon/btccheckpoint/v1/{epoch_num}: get: - summary: >- - BtcCheckpointHeight returns earliest block height for given - rawcheckpoint - operationId: BtcCheckpointHeight + summary: BtcCheckpointInfo returns checkpoint info for a given epoch + operationId: BtcCheckpointInfo responses: '200': description: A successful response. schema: type: object properties: - earliest_btc_block_number: - type: string - format: uint64 - title: Earliest btc block number containing given raw checkpoint + info: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from + the checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the + submissions, calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses title: >- - QueryCurrentEpochResponse is response type for the - Query/CurrentEpoch RPC method + QueryBtcCheckpointInfoResponse is response type for the + Query/BtcCheckpointInfo RPC method default: description: An unexpected error response. schema: @@ -1239,204 +1444,244 @@ paths: type: string tags: - Query - /babylon/epoching/v1/epochs/{epoch_num}: + /babylon/epoching/v1/epochs: get: - summary: EpochInfo queries the information of a given epoch - operationId: EpochInfo + summary: >- + EpochsInfo queries the metadata of epochs in a given range, depending on + the + + parameters in the pagination request. Th main use case will be querying + the + + latest epochs in time order. + operationId: EpochsInfo responses: '200': description: A successful response. schema: type: object properties: - epoch: - type: object - properties: - epoch_number: - type: string - format: uint64 - current_epoch_interval: - type: string - format: uint64 - first_block_height: - type: string - format: uint64 - last_block_header: - description: >- - last_block_header is the header of the last block in this - epoch. + epochs: + type: array + items: + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in + this epoch. - Babylon needs to remember the last header of each epoch to - complete unbonding validators/delegations when a previous - epoch's checkpoint is finalised. + Babylon needs to remember the last header of each epoch + to complete unbonding validators/delegations when a + previous epoch's checkpoint is finalised. - The last_block_header field is nil in the epoch's - beginning, and is set upon the end of this epoch. - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - including all blockchain data structures and the rules - of the application's + including all blockchain data structures and the + rules of the application's - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - app_hash_root: + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch + + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: type: string format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 title: >- - app_hash_root is the Merkle root of all AppHashs in this - epoch - - It will be used for proving a block is in an epoch - sealer_header: - title: >- - sealer_header is the 2nd header of the next epoch + total is total number of results available if + PageRequest.count_total - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the - including all blockchain data structures and the rules - of the application's + corresponding request message has used PageRequest. - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } default: description: An unexpected error response. schema: @@ -1630,715 +1875,860 @@ paths: "value": "1.212s" } parameters: - - name: epoch_num - in: path - required: true + - name: start_epoch + in: query + required: false + type: string + format: uint64 + - name: end_epoch + in: query + required: false + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false type: string format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query - /babylon/epoching/v1/epochs/{epoch_num}/messages: + /babylon/epoching/v1/epochs/{epoch_num}: get: - summary: EpochMsgs queries the messages of a given epoch - operationId: EpochMsgs + summary: EpochInfo queries the information of a given epoch + operationId: EpochInfo responses: '200': description: A successful response. schema: type: object properties: - msgs: - type: array - items: - type: object - properties: - tx_id: - type: string - format: byte - title: tx_id is the ID of the tx that contains the message - msg_id: - type: string - format: byte - title: >- - msg_id is the original message ID, i.e., hash of the - marshaled message - block_height: - type: string - format: uint64 - title: >- - block_height is the height when this msg is submitted to - Babylon - block_time: - type: string - format: date-time - title: >- - block_time is the timestamp when this msg is submitted - to Babylon - msg_create_validator: - type: object - properties: - description: - type: object - properties: - moniker: - type: string - description: >- - moniker defines a human-readable name for the - validator. - identity: - type: string - description: >- - identity defines an optional identity signature - (ex. UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for - security contact. - details: - type: string - description: details define other optional details. - description: Description defines a validator description. - commission: - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to - delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate - which validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily - increase of the validator commission, as a - fraction. - description: >- - CommissionRates defines the initial commission rates - to be used for creating - - a validator. - min_self_delegation: - type: string - delegator_address: - type: string - validator_address: - type: string - pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the - type of the serialized - - protocol buffer message. This string must - contain at least - - one "/" character. The last segment of the URL's - path must represent - - the fully qualified name of the type (as in + epoch: + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this + epoch. - `path/google.protobuf.Duration`). The name - should be in a canonical form + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. - (e.g., leading "." is not accepted). + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + including all blockchain data structures and the rules + of the application's - In practice, teams usually precompile into the - binary all types that they + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch - expect it to use in the context of Any. However, - for URLs which use the + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch - scheme `http`, `https`, or no scheme, one can - optionally set up a type + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, - server that maps type URLs to message - definitions as follows: + including all blockchain data structures and the rules + of the application's + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - * If no scheme is provided, `https` is assumed. + protocol buffer message. This string must contain at + least - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup - results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + one "/" character. The last segment of the URL's path + must represent - Note: this functionality is not currently - available in the official + the fully qualified name of the type (as in - protobuf release, and it is not used for type - URLs beginning with + `path/google.protobuf.Duration`). The name should be in + a canonical form - type.googleapis.com. + (e.g., leading "." is not accepted). - Schemes other than `http`, `https` (or the empty - scheme) might be + In practice, teams usually precompile into the binary + all types that they - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of - the above specified type. - description: >- - `Any` contains an arbitrary serialized protocol - buffer message along with a + expect it to use in the context of Any. However, for + URLs which use the - URL that describes the type of the serialized - message. + scheme `http`, `https`, or no scheme, one can optionally + set up a type + server that maps type URLs to message definitions as + follows: - Protobuf library provides support to pack/unpack Any - values in the form - of utility functions or additional generated methods - of the Any type. + * If no scheme is provided, `https` is assumed. + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Example 1: Pack and unpack a message in C++. + Note: this functionality is not currently available in + the official - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + protobuf release, and it is not used for type URLs + beginning with - Example 2: Pack and unpack a message in Java. + type.googleapis.com. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - Example 3: Pack and unpack a message in Python. + Schemes other than `http`, `https` (or the empty scheme) + might be - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a - Example 4: Pack and unpack a message in Go + URL that describes the type of the serialized message. - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } - The pack methods provided by protobuf library will - by default use + Protobuf library provides support to pack/unpack Any values + in the form - 'type.googleapis.com/full.type.name' as the type URL - and the unpack + of utility functions or additional generated methods of the + Any type. - methods only use the fully qualified type name after - the last '/' - in the type URL, for example "foo.bar.com/x/y.z" - will yield type + Example 1: Pack and unpack a message in C++. - name "y.z". + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + Example 2: Pack and unpack a message in Java. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - JSON + Example 3: Pack and unpack a message in Python. - ==== + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - The JSON representation of an `Any` value uses the - regular + Example 4: Pack and unpack a message in Go - representation of the deserialized, embedded - message, with an + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - additional field `@type` which contains the type - URL. Example: + The pack methods provided by protobuf library will by + default use - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + methods only use the fully qualified type name after the + last '/' - If the embedded message type is well-known and has a - custom JSON + in the type URL, for example "foo.bar.com/x/y.z" will yield + type - representation, that representation will be embedded - adding a field + name "y.z". - `value` which holds the custom JSON in addition to - the `@type` - field. Example (for message - [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - value: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an - amount. + JSON + ==== - NOTE: The amount field is an Int which implements - the custom method + The JSON representation of an `Any` value uses the regular - signatures required by gogoproto. - description: >- - MsgCreateValidator defines a SDK message for creating a - new validator. - msg_delegate: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an - amount. + representation of the deserialized, embedded message, with + an + additional field `@type` which contains the type URL. + Example: - NOTE: The amount field is an Int which implements - the custom method + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - signatures required by gogoproto. - description: >- - MsgDelegate defines a SDK message for performing a - delegation of coins + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - from a delegator to a validator. - msg_undelegate: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an - amount. + If the embedded message type is well-known and has a custom + JSON + representation, that representation will be embedded adding + a field - NOTE: The amount field is an Int which implements - the custom method + `value` which holds the custom JSON in addition to the + `@type` - signatures required by gogoproto. - description: >- - MsgUndelegate defines a SDK message for performing an - undelegation from a + field. Example (for message [google.protobuf.Duration][]): - delegate and a validator. - msg_begin_redelegate: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: epoch_num + in: path + required: true + type: string + format: uint64 + tags: + - Query + /babylon/epoching/v1/epochs/{epoch_num}/messages: + get: + summary: EpochMsgs queries the messages of a given epoch + operationId: EpochMsgs + responses: + '200': + description: A successful response. + schema: + type: object + properties: + msgs: + type: array + items: + type: object + properties: + tx_id: + type: string + format: byte + title: tx_id is the ID of the tx that contains the message + msg_id: + type: string + format: byte + title: >- + msg_id is the original message ID, i.e., hash of the + marshaled message + block_height: + type: string + format: uint64 + title: >- + block_height is the height when this msg is submitted to + Babylon + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp when this msg is submitted + to Babylon + msg_create_validator: type: object properties: - delegator_address: - type: string - validator_src_address: - type: string - validator_dst_address: - type: string - amount: + description: type: object properties: - denom: + moniker: type: string - amount: + description: >- + moniker defines a human-readable name for the + validator. + identity: + type: string + description: >- + identity defines an optional identity signature + (ex. UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for + security contact. + details: + type: string + description: details define other optional details. + description: Description defines a validator description. + commission: + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to + delegators, as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate + which validator can ever charge, as a fraction. + max_change_rate: type: string + description: >- + max_change_rate defines the maximum daily + increase of the validator commission, as a + fraction. description: >- - Coin defines a token with a denomination and an - amount. - + CommissionRates defines the initial commission rates + to be used for creating - NOTE: The amount field is an Int which implements - the custom method + a validator. + min_self_delegation: + type: string + delegator_address: + type: string + validator_address: + type: string + pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized - signatures required by gogoproto. - description: >- - MsgBeginRedelegate defines a SDK message for performing - a redelegation + protocol buffer message. This string must + contain at least - of coins from a delegator and source validator to a - destination validator. - title: >- - QueuedMessage is a message that can change the validator set - and is delayed to the epoch boundary - title: msgs is the list of messages queued in the current epoch - pagination: - title: pagination defines the pagination in the response - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + one "/" character. The last segment of the URL's + path must represent - was set, its value is undefined otherwise - description: >- - PageResponse is to be embedded in gRPC response messages where - the + the fully qualified name of the type (as in - corresponding request message has used PageRequest. + `path/google.protobuf.Duration`). The name + should be in a canonical form - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: >- - QueryEpochMsgsResponse is the response type for the - Query/EpochMsgs RPC method - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + (e.g., leading "." is not accepted). - protocol buffer message. This string must contain at - least - one "/" character. The last segment of the URL's path - must represent + In practice, teams usually precompile into the + binary all types that they - the fully qualified name of the type (as in + expect it to use in the context of Any. However, + for URLs which use the - `path/google.protobuf.Duration`). The name should be in - a canonical form + scheme `http`, `https`, or no scheme, one can + optionally set up a type - (e.g., leading "." is not accepted). + server that maps type URLs to message + definitions as follows: - In practice, teams usually precompile into the binary - all types that they + * If no scheme is provided, `https` is assumed. - expect it to use in the context of Any. However, for - URLs which use the + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup + results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - scheme `http`, `https`, or no scheme, one can optionally - set up a type + Note: this functionality is not currently + available in the official - server that maps type URLs to message definitions as - follows: + protobuf release, and it is not used for type + URLs beginning with + type.googleapis.com. - * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + Schemes other than `http`, `https` (or the empty + scheme) might be - Note: this functionality is not currently available in - the official + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of + the above specified type. + description: >- + `Any` contains an arbitrary serialized protocol + buffer message along with a - protobuf release, and it is not used for type URLs - beginning with + URL that describes the type of the serialized + message. - type.googleapis.com. + Protobuf library provides support to pack/unpack Any + values in the form - Schemes other than `http`, `https` (or the empty scheme) - might be + of utility functions or additional generated methods + of the Any type. - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - URL that describes the type of the serialized message. + Example 1: Pack and unpack a message in C++. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Protobuf library provides support to pack/unpack Any values - in the form + Example 2: Pack and unpack a message in Java. - of utility functions or additional generated methods of the - Any type. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + Example 3: Pack and unpack a message in Python. - Example 1: Pack and unpack a message in C++. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Example 4: Pack and unpack a message in Go - Example 2: Pack and unpack a message in Java. + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + The pack methods provided by protobuf library will + by default use - Example 3: Pack and unpack a message in Python. + 'type.googleapis.com/full.type.name' as the type URL + and the unpack - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + methods only use the fully qualified type name after + the last '/' - Example 4: Pack and unpack a message in Go + in the type URL, for example "foo.bar.com/x/y.z" + will yield type - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + name "y.z". - The pack methods provided by protobuf library will by - default use - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - methods only use the fully qualified type name after the - last '/' + JSON - in the type URL, for example "foo.bar.com/x/y.z" will yield - type + ==== - name "y.z". + The JSON representation of an `Any` value uses the + regular + representation of the deserialized, embedded + message, with an + additional field `@type` which contains the type + URL. Example: - JSON + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - ==== + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - The JSON representation of an `Any` value uses the regular + If the embedded message type is well-known and has a + custom JSON - representation of the deserialized, embedded message, with - an + representation, that representation will be embedded + adding a field - additional field `@type` which contains the type URL. - Example: + `value` which holds the custom JSON in addition to + the `@type` - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + field. Example (for message + [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + value: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an + amount. - If the embedded message type is well-known and has a custom - JSON - representation, that representation will be embedded adding - a field + NOTE: The amount field is an Int which implements + the custom method - `value` which holds the custom JSON in addition to the - `@type` + signatures required by gogoproto. + description: >- + MsgCreateValidator defines a SDK message for creating a + new validator. + msg_delegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an + amount. - field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: epoch_num - description: epoch_num is the number of epoch of the requested msg queue - in: path - required: true - type: string - format: uint64 - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. + NOTE: The amount field is an Int which implements + the custom method - It is less efficient than using key. Only one of offset or key - should + signatures required by gogoproto. + description: >- + MsgDelegate defines a SDK message for performing a + delegation of coins - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. + from a delegator to a validator. + msg_undelegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an + amount. - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - a count of the total number of items available for pagination in - UIs. + NOTE: The amount field is an Int which implements + the custom method - count_total is only respected when offset is used. It is ignored - when key + signatures required by gogoproto. + description: >- + MsgUndelegate defines a SDK message for performing an + undelegation from a - is set. - in: query - required: false - type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. + delegate and a validator. + msg_begin_redelegate: + type: object + properties: + delegator_address: + type: string + validator_src_address: + type: string + validator_dst_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an + amount. - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean - tags: - - Query - /babylon/epoching/v1/epochs/{epoch_num}/validator_set: - get: - summary: EpochValSet queries the validator set of a given epoch - operationId: EpochValSet - responses: - '200': - description: A successful response. - schema: - type: object - properties: - validators: - type: array - items: - type: object - properties: - addr: - type: string - format: byte - title: addr is the validator's address (in sdk.ValAddress) - power: - type: string - format: int64 - title: power is the validator's voting power - total_voting_power: - type: string - format: int64 + NOTE: The amount field is an Int which implements + the custom method + + signatures required by gogoproto. + description: >- + MsgBeginRedelegate defines a SDK message for performing + a redelegation + + of coins from a delegator and source validator to a + destination validator. + title: >- + QueuedMessage is a message that can change the validator set + and is delayed to the epoch boundary + title: msgs is the list of messages queued in the current epoch pagination: + title: pagination defines the pagination in the response type: object properties: next_key: @@ -2366,6 +2756,9 @@ paths: repeated Bar results = 1; PageResponse page = 2; } + title: >- + QueryEpochMsgsResponse is the response type for the + Query/EpochMsgs RPC method default: description: An unexpected error response. schema: @@ -2560,6 +2953,7 @@ paths: } parameters: - name: epoch_num + description: epoch_num is the number of epoch of the requested msg queue in: path required: true type: string @@ -2622,729 +3016,753 @@ paths: type: boolean tags: - Query - /babylon/epoching/v1/epochs:latest/messages: + /babylon/epoching/v1/epochs/{epoch_num}/validator_set: get: - summary: >- - LatestEpochMsgs queries the messages within a given number of most - recent epochs - operationId: LatestEpochMsgs + summary: EpochValSet queries the validator set of a given epoch + operationId: EpochValSet responses: '200': description: A successful response. schema: type: object properties: - latest_epoch_msgs: + validators: type: array items: type: object properties: - epoch_number: + addr: type: string - format: uint64 - msgs: - type: array - items: - type: object - properties: - tx_id: - type: string - format: byte - title: >- - tx_id is the ID of the tx that contains the - message - msg_id: - type: string - format: byte - title: >- - msg_id is the original message ID, i.e., hash of - the marshaled message - block_height: - type: string - format: uint64 - title: >- - block_height is the height when this msg is - submitted to Babylon - block_time: - type: string - format: date-time - title: >- - block_time is the timestamp when this msg is - submitted to Babylon - msg_create_validator: - type: object - properties: - description: - type: object - properties: - moniker: - type: string - description: >- - moniker defines a human-readable name for - the validator. - identity: - type: string - description: >- - identity defines an optional identity - signature (ex. UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email - for security contact. - details: - type: string - description: details define other optional details. - description: Description defines a validator description. - commission: - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to - delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission - rate which validator can ever charge, as a - fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily - increase of the validator commission, as a - fraction. - description: >- - CommissionRates defines the initial commission - rates to be used for creating - - a validator. - min_self_delegation: - type: string - delegator_address: - type: string - validator_address: - type: string - pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely - identifies the type of the serialized + format: byte + title: addr is the validator's address (in sdk.ValAddress) + power: + type: string + format: int64 + title: power is the validator's voting power + total_voting_power: + type: string + format: int64 + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - protocol buffer message. This string must - contain at least + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the - one "/" character. The last segment of the - URL's path must represent + corresponding request message has used PageRequest. - the fully qualified name of the type (as - in + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - `path/google.protobuf.Duration`). The name - should be in a canonical form + protocol buffer message. This string must contain at + least - (e.g., leading "." is not accepted). + one "/" character. The last segment of the URL's path + must represent + the fully qualified name of the type (as in - In practice, teams usually precompile into - the binary all types that they + `path/google.protobuf.Duration`). The name should be in + a canonical form - expect it to use in the context of Any. - However, for URLs which use the + (e.g., leading "." is not accepted). - scheme `http`, `https`, or no scheme, one - can optionally set up a type - server that maps type URLs to message - definitions as follows: + In practice, teams usually precompile into the binary + all types that they + expect it to use in the context of Any. However, for + URLs which use the - * If no scheme is provided, `https` is - assumed. + scheme `http`, `https`, or no scheme, one can optionally + set up a type - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup - results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + server that maps type URLs to message definitions as + follows: - Note: this functionality is not currently - available in the official - protobuf release, and it is not used for - type URLs beginning with + * If no scheme is provided, `https` is assumed. - type.googleapis.com. - - - Schemes other than `http`, `https` (or the - empty scheme) might be + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - used with implementation specific - semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer - of the above specified type. - description: >- - `Any` contains an arbitrary serialized - protocol buffer message along with a + Note: this functionality is not currently available in + the official - URL that describes the type of the serialized - message. + protobuf release, and it is not used for type URLs + beginning with + type.googleapis.com. - Protobuf library provides support to - pack/unpack Any values in the form - of utility functions or additional generated - methods of the Any type. + Schemes other than `http`, `https` (or the empty scheme) + might be + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a - Example 1: Pack and unpack a message in C++. + URL that describes the type of the serialized message. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - Example 2: Pack and unpack a message in Java. + Protobuf library provides support to pack/unpack Any values + in the form - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + of utility functions or additional generated methods of the + Any type. - Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + Example 1: Pack and unpack a message in C++. - Example 4: Pack and unpack a message in Go + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + Example 2: Pack and unpack a message in Java. - The pack methods provided by protobuf library - will by default use + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - 'type.googleapis.com/full.type.name' as the - type URL and the unpack + Example 3: Pack and unpack a message in Python. - methods only use the fully qualified type name - after the last '/' + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - in the type URL, for example - "foo.bar.com/x/y.z" will yield type + Example 4: Pack and unpack a message in Go - name "y.z". + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + The pack methods provided by protobuf library will by + default use + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - JSON + methods only use the fully qualified type name after the + last '/' - ==== + in the type URL, for example "foo.bar.com/x/y.z" will yield + type - The JSON representation of an `Any` value uses - the regular + name "y.z". - representation of the deserialized, embedded - message, with an - additional field `@type` which contains the - type URL. Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + JSON - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + ==== - If the embedded message type is well-known and - has a custom JSON + The JSON representation of an `Any` value uses the regular - representation, that representation will be - embedded adding a field + representation of the deserialized, embedded message, with + an - `value` which holds the custom JSON in - addition to the `@type` + additional field `@type` which contains the type URL. + Example: - field. Example (for message - [google.protobuf.Duration][]): + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - value: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and - an amount. + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + If the embedded message type is well-known and has a custom + JSON - NOTE: The amount field is an Int which - implements the custom method + representation, that representation will be embedded adding + a field - signatures required by gogoproto. - description: >- - MsgCreateValidator defines a SDK message for - creating a new validator. - msg_delegate: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and - an amount. + `value` which holds the custom JSON in addition to the + `@type` + field. Example (for message [google.protobuf.Duration][]): - NOTE: The amount field is an Int which - implements the custom method + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: epoch_num + in: path + required: true + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. - signatures required by gogoproto. - description: >- - MsgDelegate defines a SDK message for performing a - delegation of coins + It is less efficient than using key. Only one of offset or key + should - from a delegator to a validator. - msg_undelegate: + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /babylon/epoching/v1/epochs:latest/messages: + get: + summary: >- + LatestEpochMsgs queries the messages within a given number of most + recent epochs + operationId: LatestEpochMsgs + responses: + '200': + description: A successful response. + schema: + type: object + properties: + latest_epoch_msgs: + type: array + items: + type: object + properties: + epoch_number: + type: string + format: uint64 + msgs: + type: array + items: + type: object + properties: + tx_id: + type: string + format: byte + title: >- + tx_id is the ID of the tx that contains the + message + msg_id: + type: string + format: byte + title: >- + msg_id is the original message ID, i.e., hash of + the marshaled message + block_height: + type: string + format: uint64 + title: >- + block_height is the height when this msg is + submitted to Babylon + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp when this msg is + submitted to Babylon + msg_create_validator: type: object properties: - delegator_address: - type: string - validator_address: - type: string - amount: + description: type: object properties: - denom: + moniker: type: string - amount: + description: >- + moniker defines a human-readable name for + the validator. + identity: + type: string + description: >- + identity defines an optional identity + signature (ex. UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email + for security contact. + details: + type: string + description: details define other optional details. + description: Description defines a validator description. + commission: + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to + delegators, as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission + rate which validator can ever charge, as a + fraction. + max_change_rate: type: string + description: >- + max_change_rate defines the maximum daily + increase of the validator commission, as a + fraction. description: >- - Coin defines a token with a denomination and - an amount. - - - NOTE: The amount field is an Int which - implements the custom method - - signatures required by gogoproto. - description: >- - MsgUndelegate defines a SDK message for performing - an undelegation from a + CommissionRates defines the initial commission + rates to be used for creating - delegate and a validator. - msg_begin_redelegate: - type: object - properties: - delegator_address: + a validator. + min_self_delegation: type: string - validator_src_address: + delegator_address: type: string - validator_dst_address: + validator_address: type: string - amount: + pubkey: type: object properties: - denom: - type: string - amount: + type_url: type: string - description: >- - Coin defines a token with a denomination and - an amount. + description: >- + A URL/resource name that uniquely + identifies the type of the serialized + protocol buffer message. This string must + contain at least - NOTE: The amount field is an Int which - implements the custom method + one "/" character. The last segment of the + URL's path must represent - signatures required by gogoproto. - description: >- - MsgBeginRedelegate defines a SDK message for - performing a redelegation + the fully qualified name of the type (as + in - of coins from a delegator and source validator to - a destination validator. - title: >- - QueuedMessage is a message that can change the - validator set and is delayed to the epoch boundary - title: >- - epoch_msg_map is a list of QueuedMessageList + `path/google.protobuf.Duration`). The name + should be in a canonical form - each QueuedMessageList has a field identifying the epoch - number - pagination: - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + (e.g., leading "." is not accepted). - was set, its value is undefined otherwise - description: >- - PageResponse is to be embedded in gRPC response messages where - the - corresponding request message has used PageRequest. + In practice, teams usually precompile into + the binary all types that they - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: >- - QueryLatestEpochMsgsResponse is the response type for the - Query/LatestEpochMsgs RPC method - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + expect it to use in the context of Any. + However, for URLs which use the - protocol buffer message. This string must contain at - least + scheme `http`, `https`, or no scheme, one + can optionally set up a type - one "/" character. The last segment of the URL's path - must represent + server that maps type URLs to message + definitions as follows: - the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in - a canonical form + * If no scheme is provided, `https` is + assumed. - (e.g., leading "." is not accepted). + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup + results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + Note: this functionality is not currently + available in the official - In practice, teams usually precompile into the binary - all types that they + protobuf release, and it is not used for + type URLs beginning with - expect it to use in the context of Any. However, for - URLs which use the + type.googleapis.com. - scheme `http`, `https`, or no scheme, one can optionally - set up a type - server that maps type URLs to message definitions as - follows: + Schemes other than `http`, `https` (or the + empty scheme) might be + used with implementation specific + semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer + of the above specified type. + description: >- + `Any` contains an arbitrary serialized + protocol buffer message along with a - * If no scheme is provided, `https` is assumed. + URL that describes the type of the serialized + message. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - Note: this functionality is not currently available in - the official + Protobuf library provides support to + pack/unpack Any values in the form - protobuf release, and it is not used for type URLs - beginning with + of utility functions or additional generated + methods of the Any type. - type.googleapis.com. + Example 1: Pack and unpack a message in C++. - Schemes other than `http`, `https` (or the empty scheme) - might be + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a + Example 2: Pack and unpack a message in Java. - URL that describes the type of the serialized message. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + Example 3: Pack and unpack a message in Python. - Protobuf library provides support to pack/unpack Any values - in the form + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - of utility functions or additional generated methods of the - Any type. + Example 4: Pack and unpack a message in Go + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - Example 1: Pack and unpack a message in C++. + The pack methods provided by protobuf library + will by default use - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + 'type.googleapis.com/full.type.name' as the + type URL and the unpack - Example 2: Pack and unpack a message in Java. + methods only use the fully qualified type name + after the last '/' - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + in the type URL, for example + "foo.bar.com/x/y.z" will yield type - Example 3: Pack and unpack a message in Python. + name "y.z". - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + JSON - The pack methods provided by protobuf library will by - default use + ==== - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + The JSON representation of an `Any` value uses + the regular - methods only use the fully qualified type name after the - last '/' + representation of the deserialized, embedded + message, with an - in the type URL, for example "foo.bar.com/x/y.z" will yield - type + additional field `@type` which contains the + type URL. Example: - name "y.z". + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + If the embedded message type is well-known and + has a custom JSON - JSON + representation, that representation will be + embedded adding a field - ==== + `value` which holds the custom JSON in + addition to the `@type` - The JSON representation of an `Any` value uses the regular + field. Example (for message + [google.protobuf.Duration][]): - representation of the deserialized, embedded message, with - an + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + value: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and + an amount. - additional field `@type` which contains the type URL. - Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + NOTE: The amount field is an Int which + implements the custom method - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + signatures required by gogoproto. + description: >- + MsgCreateValidator defines a SDK message for + creating a new validator. + msg_delegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and + an amount. - If the embedded message type is well-known and has a custom - JSON - representation, that representation will be embedded adding - a field + NOTE: The amount field is an Int which + implements the custom method - `value` which holds the custom JSON in addition to the - `@type` + signatures required by gogoproto. + description: >- + MsgDelegate defines a SDK message for performing a + delegation of coins - field. Example (for message [google.protobuf.Duration][]): + from a delegator to a validator. + msg_undelegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and + an amount. - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: end_epoch - description: end_epoch is the number of the last epoch to query. - in: query - required: false - type: string - format: uint64 - - name: epoch_count - description: epoch_count is the number of epochs to query. - in: query - required: false - type: string - format: uint64 - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - It is less efficient than using key. Only one of offset or key - should + NOTE: The amount field is an Int which + implements the custom method - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. + signatures required by gogoproto. + description: >- + MsgUndelegate defines a SDK message for performing + an undelegation from a - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include + delegate and a validator. + msg_begin_redelegate: + type: object + properties: + delegator_address: + type: string + validator_src_address: + type: string + validator_dst_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and + an amount. - a count of the total number of items available for pagination in - UIs. - count_total is only respected when offset is used. It is ignored - when key + NOTE: The amount field is an Int which + implements the custom method - is set. - in: query - required: false - type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. + signatures required by gogoproto. + description: >- + MsgBeginRedelegate defines a SDK message for + performing a redelegation + of coins from a delegator and source validator to + a destination validator. + title: >- + QueuedMessage is a message that can change the + validator set and is delayed to the epoch boundary + title: >- + epoch_msg_map is a list of QueuedMessageList - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean - tags: - - Query - /babylon/epoching/v1/params: - get: - summary: Params queries the parameters of the module. - operationId: EpochingParams - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - description: params holds all the parameters of this module. + each QueuedMessageList has a field identifying the epoch + number + pagination: type: object properties: - epoch_interval: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string format: uint64 title: >- - epoch_interval is the number of consecutive blocks to form - an epoch - description: >- - QueryParamsResponse is the response type for the Query/Params RPC - method. + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryLatestEpochMsgsResponse is the response type for the + Query/LatestEpochMsgs RPC method default: description: An unexpected error response. schema: @@ -3537,20 +3955,306 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + parameters: + - name: end_epoch + description: end_epoch is the number of the last epoch to query. + in: query + required: false + type: string + format: uint64 + - name: epoch_count + description: epoch_count is the number of epochs to query. + in: query + required: false + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query - /babylon/epoching/v1/validator_lifecycle/{val_addr}: + /babylon/epoching/v1/params: get: - summary: ValidatorLifecycle queries the lifecycle of a given validator - operationId: ValidatorLifecycle + summary: Params queries the parameters of the module. + operationId: EpochingParams responses: '200': description: A successful response. schema: type: object properties: - val_life: - type: object + params: + description: params holds all the parameters of this module. + type: object + properties: + epoch_interval: + type: string + format: uint64 + title: >- + epoch_interval is the number of consecutive blocks to form + an epoch + description: >- + QueryParamsResponse is the response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + tags: + - Query + /babylon/epoching/v1/validator_lifecycle/{val_addr}: + get: + summary: ValidatorLifecycle queries the lifecycle of a given validator + operationId: ValidatorLifecycle + responses: + '200': + description: A successful response. + schema: + type: object + properties: + val_life: + type: object properties: val_addr: type: string @@ -4035,81 +4739,46 @@ paths: format: uint64 tags: - Query - /babylon/checkpointing/v1/latest_checkpoint: + /babylon/checkpointing/v1/last_raw_checkpoint/{status}: get: - summary: LatestCheckpoint queries the checkpoint with the highest epoch num. - operationId: LatestCheckpoint + summary: >- + LastCheckpointWithStatus queries the last checkpoint with a given status + or a more matured status + operationId: LastCheckpointWithStatus responses: '200': description: A successful response. schema: type: object properties: - latest_checkpoint: + raw_checkpoint: type: object properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: >- - epoch_num defines the epoch number the raw checkpoint - is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers - of the BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated - from individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: + epoch_num: type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: type: string format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: type: string - format: uint64 + format: byte title: >- - power_sum defines the accumulated voting power for the - checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - description: >- - QueryLatestCheckpointResponse is the response type for the - Query/LatestCheckpoint - - RPC method. + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data default: description: An unexpected error response. schema: @@ -4132,6 +4801,17 @@ paths: value: type: string format: byte + parameters: + - name: status + in: path + required: true + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED tags: - Query /babylon/checkpointing/v1/params: @@ -4243,6 +4923,49 @@ paths: title: >- power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition + towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that + triggers the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block + that triggers the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, i.e., + each state transition and + + the time (in both timestamp and block height) of this + transition. description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. description: >- QueryRawCheckpointResponse is the response type for the @@ -4351,6 +5074,49 @@ paths: title: >- power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition + towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block + that triggers the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block + that triggers the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, + i.e., each state transition and + + the time (in both timestamp and block height) of this + transition. description: >- RawCheckpointWithMeta wraps the raw checkpoint with meta data. @@ -4473,256 +5239,64 @@ paths: type: boolean tags: - Query - /babylon/checkpointing/v1/recent_raw_checkpoints/{from_epoch_num}: + /babylon/zoneconcierge/v1/chain_info/{chain_id}: get: - summary: >- - RawCheckpointList queries a list of checkpoints starting from a given - epoch number to the current epoch number. - operationId: RecentRawCheckpointList + summary: ChainInfo queries the latest info of a chain in Babylon's view + operationId: ChainInfo responses: '200': description: A successful response. schema: type: object properties: - raw_checkpoints: - type: array - items: - type: object - properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: >- - epoch_num defines the epoch number the raw - checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers - of the BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is - aggregated from individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: - type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: - type: string - format: uint64 - title: >- - power_sum defines the accumulated voting power for the - checkpoint - description: >- - RawCheckpointWithMeta wraps the raw checkpoint with meta - data. - title: >- - the order is going from the newest to oldest based on the - epoch number - pagination: - description: pagination defines the pagination in the response. + chain_info: + title: chain_info is the info of the CZ type: object properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: + chain_id: type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - QueryRecentRawCheckpointListResponse is the response type for the - Query/RecentRawCheckpoints + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - RPC method. - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - parameters: - - name: from_epoch_num - description: from_epoch defines the start epoch of the query, which is inclusive - in: path - required: true - type: string - format: uint64 - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - - - Since: cosmos-sdk 0.43 - in: query - required: false - type: boolean - tags: - - Query - /babylon/zoneconcierge/v1/chain_info/{chain_id}: - get: - summary: ChainInfo queries the latest info of a chain in Babylon's view - operationId: ChainInfo - responses: - '200': - description: A successful response. - schema: - type: object - properties: - chain_info: - title: chain_info is the info of the CZ - type: object - properties: - chain_id: - type: string - title: chain_id is the ID of the chain - latest_header: - title: >- - latest_header is the latest header in the canonical chain - of CZ - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the - header on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, - - including all blockchain data structures and the - rules of the application's + including all blockchain data structures and the + rules of the application's state transition machine. chain_id: @@ -4956,6 +5530,12 @@ paths: the subsequent headers cannot be verified without knowing the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain description: >- QueryChainInfoResponse is response type for the Query/ChainInfo RPC method. @@ -5158,279 +5738,66 @@ paths: type: string tags: - Query - /babylon/zoneconcierge/v1/chains: + /babylon/zoneconcierge/v1/chain_info/{chain_id}/epochs/{epoch_num}: get: - summary: ChainList queries the list of chains that checkpoint to Babylon - operationId: ChainList + summary: >- + EpochChainInfo queries the latest info of a chain in a given epoch of + Babylon's view + operationId: EpochChainInfo responses: '200': description: A successful response. schema: type: object properties: - chain_ids: - type: array - items: - type: string - title: >- - QueryChainListResponse is response type for the Query/ChainList - RPC method - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. + chain_info: + title: chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - tags: - - Query - /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}: - get: - summary: >- - FinalizedChainInfo queries the BTC-finalised info of a chain, with - proofs - operationId: FinalizedChainInfo - responses: - '200': - description: A successful response. - schema: - type: object - properties: - finalized_chain_info: - title: finalized_chain_info is the info of the CZ - type: object - properties: - chain_id: - type: string - title: chain_id is the ID of the chain - latest_header: - title: >- - latest_header is the latest header in the canonical chain - of CZ - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the - header on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, - - including all blockchain data structures and the - rules of the application's + including all blockchain data structures and the + rules of the application's state transition machine. chain_id: @@ -5664,119 +6031,251 @@ paths: the subsequent headers cannot be verified without knowing the validator set in the previous header. - epoch_info: - title: epoch_info is the metadata of the last BTC-finalised epoch - type: object - properties: - epoch_number: - type: string - format: uint64 - current_epoch_interval: + timestamped_headers_count: type: string format: uint64 - first_block_height: - type: string - format: uint64 - last_block_header: - description: >- - last_block_header is the header of the last block in this - epoch. + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain + description: >- + QueryEpochChainInfoResponse is response type for the + Query/EpochChainInfo RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - Babylon needs to remember the last header of each epoch to - complete unbonding validators/delegations when a previous - epoch's checkpoint is finalised. + protocol buffer message. This string must contain at + least - The last_block_header field is nil in the epoch's - beginning, and is set upon the end of this epoch. - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, + one "/" character. The last segment of the URL's path + must represent - including all blockchain data structures and the rules - of the application's + the fully qualified name of the type (as in - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - app_hash_root: + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + in: path + required: true + type: string + - name: epoch_num + in: path + required: true + type: string + format: uint64 + tags: + - Query + /babylon/zoneconcierge/v1/chain_info/{chain_id}/header/{height}: + get: + summary: Header queries the CZ header and fork headers at a given height. + operationId: Header + responses: + '200': + description: A successful response. + schema: + type: object + properties: + header: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: type: string format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 title: >- - app_hash_root is the Merkle root of all AppHashs in this - epoch + height is the height of this header on CZ ledger - It will be used for proving a block is in an epoch - sealer_header: + (hash, height) jointly provides the position of the header + on CZ ledger + babylon_header: title: >- - sealer_header is the 2nd header of the next epoch - - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header + babylon_header is the header of the babylon block that + includes this CZ header type: object properties: version: @@ -5853,258 +6352,176 @@ paths: type: string format: byte description: Header defines the structure of a Tendermint block header. - raw_checkpoint: - title: raw_checkpoint is the raw checkpoint of this epoch - type: object - properties: - epoch_num: + babylon_epoch: type: string format: uint64 - title: >- - epoch_num defines the epoch number the raw checkpoint is - for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of - the BLS multi sig - bls_multi_sig: + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string format: byte title: >- - bls_multi_sig defines the multi sig that is aggregated - from individual BLS sigs - btc_submission_key: - title: >- - btc_submission_key is position of two BTC txs that include the - raw checkpoint of this epoch + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + fork_headers: type: object properties: - key: + headers: type: array items: type: object properties: - index: - type: integer - format: int64 + chain_id: + type: string + title: chain_id is the unique ID of the chain hash: type: string format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in - - which transaction was included and transaction index in - the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header - is included in a certain Babylon block - type: object - properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - description: >- - TxProof represents a Merkle proof of the presence of a - transaction in the Merkle tree. - proof_header_in_epoch: - type: object - properties: - total: - type: string - format: int64 - index: - type: string - format: int64 - leaf_hash: - type: string - format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is - in a certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: + title: hash is the hash of this header + height: type: string format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting - power, and its bls public key - title: >- - validator_set is the validator set of the sealed epoch + title: >- + height is the height of this header on CZ ledger - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - metadata is committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block + that includes this CZ header type: object properties: - type: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: type: string - key: + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: type: string format: byte - data: + title: hashes of block data + data_hash: type: string format: byte - title: >- - ProofOp defines an operation used for calculating - Merkle root - - The data could be arbitrary format, providing - nessecary data - - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's - validator set is committed to `app_hash` of the sealer - header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: type: string - key: + format: byte + evidence_hash: type: string format: byte - data: + title: consensus info + proposer_address: type: string format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 title: >- - ProofOp defines an operation used for calculating - Merkle root - - The data could be arbitrary format, providing - nessecary data - - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: type: string format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified - by hash of block in - - which transaction was included and transaction index in - the block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain - - Although it is already a part of SubmissionKey, we store - it here again + title: >- + babylon_tx_hash is the hash of the tx that includes + this header - to make TransactionInfo self-contained. + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon + ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. - For example, storing the key allows TransactionInfo to - not relay on + For example, assuming the following blockchain - the fact that TransactionInfo will be ordered in the - same order as + ``` - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte - title: >- - proof is the Merkle proof that this tx is included in - the position in `key` + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - TODO: maybe it could use here better format as we - already processed and + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including - - the position of the tx on BTC blockchain + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. - - the full tx content + Such forks exist since Babylon considers CZs might have + dishonest majority. - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint - is included in BTC ledger + Also note that the IBC-Go implementation will only consider + the first header in a fork valid, since - It is the two TransactionInfo in the best (i.e., earliest) - checkpoint submission + the subsequent headers cannot be verified without knowing the + validator set in the previous header. description: >- - QueryFinalizedChainInfoResponse is response type for the - Query/FinalizedChainInfo RPC method. + QueryParamsResponse is response type for the Query/Header RPC + method. default: description: An unexpected error response. schema: @@ -6299,35 +6716,65 @@ paths: } parameters: - name: chain_id - description: chain_id is the ID of the CZ in: path required: true type: string - - name: prove - description: >- - prove indicates whether the querier wants to get proofs of this - timestamp. - in: query - required: false - type: boolean + - name: height + in: path + required: true + type: string + format: uint64 tags: - Query - /babylon/zoneconcierge/v1/params: + /babylon/zoneconcierge/v1/chains: get: - summary: Parameters queries the parameters of the module. - operationId: ZoneConciergeParams + summary: ChainList queries the list of chains that checkpoint to Babylon + operationId: ChainList responses: '200': description: A successful response. schema: type: object properties: - params: - description: params holds all the parameters of this module. + chain_ids: + type: array + items: + type: string + title: >- + chain_ids are IDs of the chains in ascending alphabetical + order + pagination: + title: pagination defines the pagination in the response type: object - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryChainListResponse is response type for the Query/ChainList + RPC method default: description: An unexpected error response. schema: @@ -6520,230 +6967,3328 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - tags: - - Query -definitions: - babylon.btccheckpoint.v1.Params: - type: object - properties: - btc_confirmation_depth: - type: string - format: uint64 - title: >- - btc_confirmation_depth is the confirmation depth in BTC. - - A block is considered irreversible only when it is at least k-deep in - BTC - - (k in research paper) - checkpoint_finalization_timeout: - type: string - format: uint64 - title: >- - checkpoint_finalization_timeout is the maximum time window (measured - in BTC blocks) between a checkpoint + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. - - being submitted to BTC, and + It is less efficient than using key. Only one of offset or key + should - - being reported back to BBN + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. - If a checkpoint has not been reported back within w BTC blocks, then - BBN has dishonest majority and is stalling checkpoints + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include - (w in research paper) - description: Params defines the parameters for the module. - babylon.btccheckpoint.v1.QueryBtcCheckpointHeightResponse: - type: object - properties: - earliest_btc_block_number: - type: string - format: uint64 - title: Earliest btc block number containing given raw checkpoint - title: >- - QueryCurrentEpochResponse is response type for the Query/CurrentEpoch RPC - method - babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse: - type: object - properties: - keys: - type: array - items: - type: object - properties: - key: - type: array - items: + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}: + get: + summary: >- + FinalizedChainInfo queries the BTC-finalised info of a chain, with + proofs + operationId: FinalizedChainInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_chain_info: + title: finalized_chain_info is the info of the CZ type: object properties: - index: - type: integer - format: int64 - hash: + chain_id: type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash - of block in - - which transaction was included and transaction index in the - block - title: >- - Checkpoint can be composed from multiple transactions, so to - identify whole + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - submission we need list of transaction keys. + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - Each submission can generally be identified by this list of (txIdx, - blockHash) + including all blockchain data structures and the + rules of the application's - tuples. + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes + this header - Note: this could possibly be optimized as if transactions were in - one block + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - they would have the same block hash and different indexes, but each - blockhash + (hash, height) jointly provides the position of + the header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon + block that includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - is only 33 (1 byte for prefix encoding and 32 byte hash), so there - should + including all blockchain data structures and + the rules of the application's - be other strong arguments for this optimization - description: All submissions saved during an epoch. - pagination: - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - babylon.btccheckpoint.v1.QueryParamsResponse: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - properties: - btc_confirmation_depth: - type: string - format: uint64 - title: >- - btc_confirmation_depth is the confirmation depth in BTC. - - A block is considered irreversible only when it is at least k-deep - in BTC + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on + Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that + includes this header - (k in research paper) - checkpoint_finalization_timeout: - type: string - format: uint64 - title: >- - checkpoint_finalization_timeout is the maximum time window - (measured in BTC blocks) between a checkpoint + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon + ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at + the same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the + same height. - - being submitted to BTC, and + For example, assuming the following blockchain - - being reported back to BBN + ``` - If a checkpoint has not been reported back within w BTC blocks, - then BBN has dishonest majority and is stalling checkpoints + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - (w in research paper) - description: QueryParamsResponse is response type for the Query/Params RPC method. - babylon.btccheckpoint.v1.SubmissionKey: - type: object - properties: - key: - type: array - items: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash of - block in + Then the fork will be {[D1, D2]} where each item is in + struct `IndexedBlock`. - which transaction was included and transaction index in the block - title: >- - Checkpoint can be composed from multiple transactions, so to identify - whole - submission we need list of transaction keys. + Note that each `IndexedHeader` in the fork should have a + valid quorum certificate. - Each submission can generally be identified by this list of (txIdx, - blockHash) + Such forks exist since Babylon considers CZs might have + dishonest majority. - tuples. + Also note that the IBC-Go implementation will only + consider the first header in a fork valid, since - Note: this could possibly be optimized as if transactions were in one - block + the subsequent headers cannot be verified without knowing + the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain + epoch_info: + title: epoch_info is the metadata of the last BTC-finalised epoch + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this + epoch. - they would have the same block hash and different indexes, but each - blockhash + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. - is only 33 (1 byte for prefix encoding and 32 byte hash), so there should + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, - be other strong arguments for this optimization - babylon.btccheckpoint.v1.TransactionKey: - type: object - properties: - index: - type: integer - format: int64 - hash: - type: string - format: byte - title: |- - Each provided OP_RETURN transaction can be idendtified by hash of block in - which transaction was included and transaction index in the block - cosmos.base.query.v1beta1.PageRequest: - type: object - properties: - key: - type: string - format: byte - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - offset: - type: string - format: uint64 - description: |- - offset is a numeric offset that can be used when key is unavailable. - It is less efficient than using key. Only one of offset or key should - be set. - limit: - type: string - format: uint64 - description: >- - limit is the total number of results to be returned in the result - page. + including all blockchain data structures and the rules + of the application's - If left empty it will default to a value to be set by each app. + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch + + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: + type: string + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + btc_submission_key: + title: >- + btc_submission_key is position of two BTC txs that include the + raw checkpoint of this epoch + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + proof: + title: proof is the proof that the chain info is finalized + type: object + properties: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the + header is included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a + transaction in the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header + is in a certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, + voting power, and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + metadata is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be + idendtified by hash of block in + + which transaction was included and transaction index + in the block + description: >- + key is the position (txIdx, blockHash) of this tx on + BTC blockchain + + Although it is already a part of SubmissionKey, we + store it here again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo + to not relay on + + the fact that TransactionInfo will be ordered in the + same order as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included + in the position in `key` + + TODO: maybe it could use here better format as we + already processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains + Babylon checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's + checkpoint is included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission + description: >- + QueryFinalizedChainInfoResponse is response type for the + Query/FinalizedChainInfo RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + description: chain_id is the ID of the CZ + in: path + required: true + type: string + - name: prove + description: >- + prove indicates whether the querier wants to get proofs of this + timestamp. + in: query + required: false + type: boolean + tags: + - Query + /babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}/height/{height}: + get: + summary: >- + FinalizedChainInfoUntilHeight queries the BTC-finalised info no later + than the provided CZ height, with proofs + operationId: FinalizedChainInfoUntilHeight + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_chain_info: + title: finalized_chain_info is the info of the CZ + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes + this header + + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of + the header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon + block that includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and + the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on + Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that + includes this header + + (babylon_block_height, babylon_tx_hash) jointly + provides the position of the header on Babylon + ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at + the same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the + same height. + + For example, assuming the following blockchain + + ``` + + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` + + Then the fork will be {[D1, D2]} where each item is in + struct `IndexedBlock`. + + + Note that each `IndexedHeader` in the fork should have a + valid quorum certificate. + + Such forks exist since Babylon considers CZs might have + dishonest majority. + + Also note that the IBC-Go implementation will only + consider the first header in a fork valid, since + + the subsequent headers cannot be verified without knowing + the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped + headers in CZ's canonical chain + epoch_info: + title: epoch_info is the metadata of the last BTC-finalised epoch + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this + epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. + + The last_block_header field is nil in the epoch's + beginning, and is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: >- + app_hash_root is the Merkle root of all AppHashs in this + epoch + + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: + type: string + format: uint64 + title: >- + epoch_num defines the epoch number the raw checkpoint is + for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of + the BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated + from individual BLS sigs + btc_submission_key: + title: >- + btc_submission_key is position of two BTC txs that include the + raw checkpoint of this epoch + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified + by hash of block in + + which transaction was included and transaction index in + the block + proof: + title: proof is the proof that the chain info is finalized + type: object + properties: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the + header is included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a + transaction in the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header + is in a certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, + voting power, and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + metadata is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer + header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for + calculating Merkle root + + The data could be arbitrary format, providing + nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be + idendtified by hash of block in + + which transaction was included and transaction index + in the block + description: >- + key is the position (txIdx, blockHash) of this tx on + BTC blockchain + + Although it is already a part of SubmissionKey, we + store it here again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo + to not relay on + + the fact that TransactionInfo will be ordered in the + same order as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included + in the position in `key` + + TODO: maybe it could use here better format as we + already processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains + Babylon checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's + checkpoint is included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission + description: >- + QueryFinalizedChainInfoUntilHeightResponse is response type for + the Query/FinalizedChainInfoUntilHeight RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + description: chain_id is the ID of the CZ + in: path + required: true + type: string + - name: height + description: >- + height is the height of the CZ chain + + such that the returned finalised chain info will be no later than + this height + in: path + required: true + type: string + format: uint64 + - name: prove + description: >- + prove indicates whether the querier wants to get proofs of this + timestamp. + in: query + required: false + type: boolean + tags: + - Query + /babylon/zoneconcierge/v1/headers/{chain_id}: + get: + summary: >- + ListHeaders queries the headers of a chain in Babylon's view, with + pagination support + operationId: ListHeaders + responses: + '200': + description: A successful response. + schema: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + description: >- + QueryListHeadersResponse is response type for the + Query/ListHeaders RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /babylon/zoneconcierge/v1/headers/{chain_id}/epochs/{epoch_num}: + get: + summary: >- + ListEpochHeaders queries the headers of a chain timestamped in a given + epoch of Babylon, with pagination support + operationId: ListEpochHeaders + responses: + '200': + description: A successful response. + schema: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + description: >- + QueryListEpochHeadersResponse is response type for the + Query/ListEpochHeaders RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: chain_id + in: path + required: true + type: string + - name: epoch_num + in: path + required: true + type: string + format: uint64 + tags: + - Query + /babylon/zoneconcierge/v1/params: + get: + summary: Parameters queries the parameters of the module. + operationId: ZoneConciergeParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + tags: + - Query +definitions: + babylon.btccheckpoint.v1.BTCCheckpointInfo: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the + checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, + calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses + babylon.btccheckpoint.v1.CheckpointAddresses: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the checkpoint + itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, calculated + from + + submission message MsgInsertBTCSpvProof itself + babylon.btccheckpoint.v1.Params: + type: object + properties: + btc_confirmation_depth: + type: string + format: uint64 + title: >- + btc_confirmation_depth is the confirmation depth in BTC. + + A block is considered irreversible only when it is at least k-deep in + BTC + + (k in research paper) + checkpoint_finalization_timeout: + type: string + format: uint64 + title: >- + checkpoint_finalization_timeout is the maximum time window (measured + in BTC blocks) between a checkpoint + + - being submitted to BTC, and + + - being reported back to BBN + + If a checkpoint has not been reported back within w BTC blocks, then + BBN has dishonest majority and is stalling checkpoints + + (w in research paper) + description: Params defines the parameters for the module. + babylon.btccheckpoint.v1.QueryBtcCheckpointInfoResponse: + type: object + properties: + info: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the + checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, + calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses + title: >- + QueryBtcCheckpointInfoResponse is response type for the + Query/BtcCheckpointInfo RPC method + babylon.btccheckpoint.v1.QueryBtcCheckpointsInfoResponse: + type: object + properties: + info_list: + type: array + items: + type: object + properties: + epoch_number: + type: string + format: uint64 + title: epoch number of this checkpoint + earliest_btc_block_number: + type: string + format: uint64 + title: height of earliest BTC block that includes this checkpoint + earliest_btc_block_hash: + type: string + format: byte + title: hash of earliest BTC block that includes this checkpoint + vigilante_address_list: + type: array + items: + type: object + properties: + submitter: + type: string + format: byte + description: >- + TODO: this could probably be better typed + + Address of the checkpoint submitter, extracted from the + checkpoint itself. + reporter: + type: string + format: byte + title: >- + Address of the reporter which reported the submissions, + calculated from + + submission message MsgInsertBTCSpvProof itself + title: list of vigilantes' addresses + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryBtcCheckpointsInfoResponse is response type for the + Query/BtcCheckpointsInfo RPC method + babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse: + type: object + properties: + keys: + type: array + items: + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in + + which transaction was included and transaction index in the + block + title: >- + Checkpoint can be composed from multiple transactions, so to + identify whole + + submission we need list of transaction keys. + + Each submission can generally be identified by this list of (txIdx, + blockHash) + + tuples. + + Note: this could possibly be optimized as if transactions were in + one block + + they would have the same block hash and different indexes, but each + blockhash + + is only 33 (1 byte for prefix encoding and 32 byte hash), so there + should + + be other strong arguments for this optimization + description: All submissions saved during an epoch. + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + babylon.btccheckpoint.v1.QueryParamsResponse: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + btc_confirmation_depth: + type: string + format: uint64 + title: >- + btc_confirmation_depth is the confirmation depth in BTC. + + A block is considered irreversible only when it is at least k-deep + in BTC + + (k in research paper) + checkpoint_finalization_timeout: + type: string + format: uint64 + title: >- + checkpoint_finalization_timeout is the maximum time window + (measured in BTC blocks) between a checkpoint + + - being submitted to BTC, and + + - being reported back to BBN + + If a checkpoint has not been reported back within w BTC blocks, + then BBN has dishonest majority and is stalling checkpoints + + (w in research paper) + description: QueryParamsResponse is response type for the Query/Params RPC method. + babylon.btccheckpoint.v1.SubmissionKey: + type: object + properties: + key: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash of + block in + + which transaction was included and transaction index in the block + title: >- + Checkpoint can be composed from multiple transactions, so to identify + whole + + submission we need list of transaction keys. + + Each submission can generally be identified by this list of (txIdx, + blockHash) + + tuples. + + Note: this could possibly be optimized as if transactions were in one + block + + they would have the same block hash and different indexes, but each + blockhash + + is only 33 (1 byte for prefix encoding and 32 byte hash), so there should + + be other strong arguments for this optimization + babylon.btccheckpoint.v1.TransactionKey: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: |- + Each provided OP_RETURN transaction can be idendtified by hash of block in + which transaction was included and transaction index in the block + cosmos.base.query.v1beta1.PageRequest: + type: object + properties: + key: + type: string + format: byte + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + offset: + type: string + format: uint64 + description: |- + offset is a numeric offset that can be used when key is unavailable. + It is less efficient than using key. Only one of offset or key should + be set. + limit: + type: string + format: uint64 + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. count_total: type: boolean description: >- @@ -8213,6 +11758,223 @@ definitions: repeated Bar results = 1; PageResponse page = 2; } + babylon.epoching.v1.QueryEpochsInfoResponse: + type: object + properties: + epochs: + type: array + items: + type: object + properties: + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous + epoch's checkpoint is finalised. + + The last_block_header field is nil in the epoch's beginning, and + is set upon the end of this epoch. + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: + type: string + format: byte + title: |- + app_hash_root is the Merkle root of all AppHashs in this epoch + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } babylon.epoching.v1.QueryLatestEpochMsgsResponse: type: object properties: @@ -8745,678 +12507,1659 @@ definitions: type: string validator_address: type: string - pubkey: + pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + value: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: MsgCreateValidator defines a SDK message for creating a new validator. + msg_delegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: |- + MsgDelegate defines a SDK message for performing a delegation of coins + from a delegator to a validator. + msg_undelegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: >- + MsgUndelegate defines a SDK message for performing an undelegation + from a + + delegate and a validator. + msg_begin_redelegate: + type: object + properties: + delegator_address: + type: string + validator_src_address: + type: string + validator_dst_address: + type: string + amount: type: object properties: - type_url: + denom: type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - protocol buffer message. This string must contain at least - one "/" character. The last segment of the URL's path must - represent + NOTE: The amount field is an Int which implements the custom + method - the fully qualified name of the type (as in + signatures required by gogoproto. + description: >- + MsgBeginRedelegate defines a SDK message for performing a redelegation - `path/google.protobuf.Duration`). The name should be in a - canonical form + of coins from a delegator and source validator to a destination + validator. + title: >- + QueuedMessage is a message that can change the validator set and is + delayed to the epoch boundary + babylon.epoching.v1.QueuedMessageList: + type: object + properties: + epoch_number: + type: string + format: uint64 + msgs: + type: array + items: + type: object + properties: + tx_id: + type: string + format: byte + title: tx_id is the ID of the tx that contains the message + msg_id: + type: string + format: byte + title: >- + msg_id is the original message ID, i.e., hash of the marshaled + message + block_height: + type: string + format: uint64 + title: block_height is the height when this msg is submitted to Babylon + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp when this msg is submitted to + Babylon + msg_create_validator: + type: object + properties: + description: + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. + UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for security + contact. + details: + type: string + description: details define other optional details. + description: Description defines a validator description. + commission: + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to delegators, as a + fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of + the validator commission, as a fraction. + description: >- + CommissionRates defines the initial commission rates to be + used for creating - (e.g., leading "." is not accepted). + a validator. + min_self_delegation: + type: string + delegator_address: + type: string + validator_address: + type: string + pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + protocol buffer message. This string must contain at + least - In practice, teams usually precompile into the binary all - types that they + one "/" character. The last segment of the URL's path + must represent - expect it to use in the context of Any. However, for URLs - which use the + the fully qualified name of the type (as in - scheme `http`, `https`, or no scheme, one can optionally set - up a type + `path/google.protobuf.Duration`). The name should be in + a canonical form - server that maps type URLs to message definitions as follows: + (e.g., leading "." is not accepted). - * If no scheme is provided, `https` is assumed. + In practice, teams usually precompile into the binary + all types that they - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + expect it to use in the context of Any. However, for + URLs which use the - Note: this functionality is not currently available in the - official + scheme `http`, `https`, or no scheme, one can optionally + set up a type - protobuf release, and it is not used for type URLs beginning - with + server that maps type URLs to message definitions as + follows: - type.googleapis.com. + * If no scheme is provided, `https` is assumed. - Schemes other than `http`, `https` (or the empty scheme) might - be + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use - URL that describes the type of the serialized message. + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + methods only use the fully qualified type name after the + last '/' - Protobuf library provides support to pack/unpack Any values in the - form + in the type URL, for example "foo.bar.com/x/y.z" will yield + type - of utility functions or additional generated methods of the Any - type. + name "y.z". - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + JSON - Example 2: Pack and unpack a message in Java. + ==== - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + The JSON representation of an `Any` value uses the regular - Example 3: Pack and unpack a message in Python. + representation of the deserialized, embedded message, with + an - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + additional field `@type` which contains the type URL. + Example: - Example 4: Pack and unpack a message in Go + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - The pack methods provided by protobuf library will by default use + If the embedded message type is well-known and has a custom + JSON - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + representation, that representation will be embedded adding + a field - methods only use the fully qualified type name after the last '/' + `value` which holds the custom JSON in addition to the + `@type` - in the type URL, for example "foo.bar.com/x/y.z" will yield type + field. Example (for message [google.protobuf.Duration][]): - name "y.z". + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + value: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + NOTE: The amount field is an Int which implements the custom + method - JSON + signatures required by gogoproto. + description: >- + MsgCreateValidator defines a SDK message for creating a new + validator. + msg_delegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - ==== - The JSON representation of an `Any` value uses the regular + NOTE: The amount field is an Int which implements the custom + method - representation of the deserialized, embedded message, with an + signatures required by gogoproto. + description: >- + MsgDelegate defines a SDK message for performing a delegation of + coins - additional field `@type` which contains the type URL. Example: + from a delegator to a validator. + msg_undelegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + NOTE: The amount field is an Int which implements the custom + method - If the embedded message type is well-known and has a custom JSON + signatures required by gogoproto. + description: >- + MsgUndelegate defines a SDK message for performing an + undelegation from a - representation, that representation will be embedded adding a - field + delegate and a validator. + msg_begin_redelegate: + type: object + properties: + delegator_address: + type: string + validator_src_address: + type: string + validator_dst_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): + NOTE: The amount field is an Int which implements the custom + method - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - value: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. + signatures required by gogoproto. + description: >- + MsgBeginRedelegate defines a SDK message for performing a + redelegation + of coins from a delegator and source validator to a destination + validator. + title: >- + QueuedMessage is a message that can change the validator set and is + delayed to the epoch boundary + babylon.epoching.v1.ValStateUpdate: + type: object + properties: + state: + type: string + enum: + - CREATED + - BONDED + - UNBONDING + - UNBONDED + - REMOVED + default: CREATED + block_height: + type: string + format: uint64 + block_time: + type: string + format: date-time + babylon.epoching.v1.Validator: + type: object + properties: + addr: + type: string + format: byte + title: addr is the validator's address (in sdk.ValAddress) + power: + type: string + format: int64 + title: power is the validator's voting power + babylon.epoching.v1.ValidatorLifecycle: + type: object + properties: + val_addr: + type: string + val_life: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CREATED + - BONDED + - UNBONDING + - UNBONDED + - REMOVED + default: CREATED + block_height: + type: string + format: uint64 + block_time: + type: string + format: date-time + cosmos.base.v1beta1.Coin: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. - NOTE: The amount field is an Int which implements the custom - method + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + cosmos.staking.v1beta1.CommissionRates: + type: object + properties: + rate: + type: string + description: rate is the commission rate charged to delegators, as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which validator can ever + charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of the validator + commission, as a fraction. + description: >- + CommissionRates defines the initial commission rates to be used for + creating - signatures required by gogoproto. - description: MsgCreateValidator defines a SDK message for creating a new validator. - msg_delegate: + a validator. + cosmos.staking.v1beta1.Description: + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. UPort or + Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: security_contact defines an optional email for security contact. + details: + type: string + description: details define other optional details. + description: Description defines a validator description. + cosmos.staking.v1beta1.MsgBeginRedelegate: + type: object + properties: + delegator_address: + type: string + validator_src_address: + type: string + validator_dst_address: + type: string + amount: type: object properties: - delegator_address: - type: string - validator_address: + denom: type: string amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. + type: string description: |- - MsgDelegate defines a SDK message for performing a delegation of coins - from a delegator to a validator. - msg_undelegate: + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: |- + MsgBeginRedelegate defines a SDK message for performing a redelegation + of coins from a delegator and source validator to a destination validator. + cosmos.staking.v1beta1.MsgCreateValidator: + type: object + properties: + description: type: object properties: - delegator_address: + moniker: type: string - validator_address: + description: moniker defines a human-readable name for the validator. + identity: type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - description: >- - MsgUndelegate defines a SDK message for performing an undelegation - from a - - delegate and a validator. - msg_begin_redelegate: + identity defines an optional identity signature (ex. UPort or + Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: security_contact defines an optional email for security contact. + details: + type: string + description: details define other optional details. + description: Description defines a validator description. + commission: type: object properties: - delegator_address: + rate: type: string - validator_src_address: + description: rate is the commission rate charged to delegators, as a fraction. + max_rate: type: string - validator_dst_address: + description: >- + max_rate defines the maximum commission rate which validator can + ever charge, as a fraction. + max_change_rate: type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. + max_change_rate defines the maximum daily increase of the + validator commission, as a fraction. description: >- - MsgBeginRedelegate defines a SDK message for performing a redelegation + CommissionRates defines the initial commission rates to be used for + creating - of coins from a delegator and source validator to a destination - validator. - title: >- - QueuedMessage is a message that can change the validator set and is - delayed to the epoch boundary - babylon.epoching.v1.QueuedMessageList: - type: object - properties: - epoch_number: + a validator. + min_self_delegation: type: string - format: uint64 - msgs: - type: array - items: - type: object - properties: - tx_id: - type: string - format: byte - title: tx_id is the ID of the tx that contains the message - msg_id: - type: string - format: byte - title: >- - msg_id is the original message ID, i.e., hash of the marshaled - message - block_height: - type: string - format: uint64 - title: block_height is the height when this msg is submitted to Babylon - block_time: - type: string - format: date-time - title: >- - block_time is the timestamp when this msg is submitted to - Babylon - msg_create_validator: - type: object - properties: - description: - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. - UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for security - contact. - details: - type: string - description: details define other optional details. - description: Description defines a validator description. - commission: - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, as a - fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of - the validator commission, as a fraction. - description: >- - CommissionRates defines the initial commission rates to be - used for creating + delegator_address: + type: string + validator_address: + type: string + pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - a validator. - min_self_delegation: - type: string - delegator_address: - type: string - validator_address: - type: string - pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + protocol buffer message. This string must contain at least - protocol buffer message. This string must contain at - least + one "/" character. The last segment of the URL's path must + represent - one "/" character. The last segment of the URL's path - must represent + the fully qualified name of the type (as in - the fully qualified name of the type (as in + `path/google.protobuf.Duration`). The name should be in a + canonical form - `path/google.protobuf.Duration`). The name should be in - a canonical form + (e.g., leading "." is not accepted). - (e.g., leading "." is not accepted). + In practice, teams usually precompile into the binary all types + that they - In practice, teams usually precompile into the binary - all types that they + expect it to use in the context of Any. However, for URLs which + use the - expect it to use in the context of Any. However, for - URLs which use the + scheme `http`, `https`, or no scheme, one can optionally set up a + type - scheme `http`, `https`, or no scheme, one can optionally - set up a type + server that maps type URLs to message definitions as follows: - server that maps type URLs to message definitions as - follows: + * If no scheme is provided, `https` is assumed. - * If no scheme is provided, `https` is assumed. + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + Note: this functionality is not currently available in the + official - Note: this functionality is not currently available in - the official + protobuf release, and it is not used for type URLs beginning with - protobuf release, and it is not used for type URLs - beginning with + type.googleapis.com. - type.googleapis.com. + Schemes other than `http`, `https` (or the empty scheme) might be - Schemes other than `http`, `https` (or the empty scheme) - might be + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a + URL that describes the type of the serialized message. - URL that describes the type of the serialized message. + Protobuf library provides support to pack/unpack Any values in the + form - Protobuf library provides support to pack/unpack Any values - in the form + of utility functions or additional generated methods of the Any type. - of utility functions or additional generated methods of the - Any type. + Example 1: Pack and unpack a message in C++. - Example 1: Pack and unpack a message in C++. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Example 2: Pack and unpack a message in Java. - Example 2: Pack and unpack a message in Java. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Example 3: Pack and unpack a message in Python. - Example 3: Pack and unpack a message in Python. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + Example 4: Pack and unpack a message in Go - Example 4: Pack and unpack a message in Go + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + The pack methods provided by protobuf library will by default use - The pack methods provided by protobuf library will by - default use + 'type.googleapis.com/full.type.name' as the type URL and the unpack - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + methods only use the fully qualified type name after the last '/' - methods only use the fully qualified type name after the - last '/' + in the type URL, for example "foo.bar.com/x/y.z" will yield type - in the type URL, for example "foo.bar.com/x/y.z" will yield - type + name "y.z". - name "y.z". + JSON - JSON + ==== - ==== + The JSON representation of an `Any` value uses the regular - The JSON representation of an `Any` value uses the regular + representation of the deserialized, embedded message, with an - representation of the deserialized, embedded message, with - an + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + value: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: MsgCreateValidator defines a SDK message for creating a new validator. + cosmos.staking.v1beta1.MsgDelegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. - additional field `@type` which contains the type URL. - Example: + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: |- + MsgDelegate defines a SDK message for performing a delegation of coins + from a delegator to a validator. + cosmos.staking.v1beta1.MsgUndelegate: + type: object + properties: + delegator_address: + type: string + validator_address: + type: string + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: |- + MsgUndelegate defines a SDK message for performing an undelegation from a + delegate and a validator. + tendermint.types.BlockID: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + tendermint.types.Header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in the + blockchain, - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + including all blockchain data structures and the rules of the + application's - If the embedded message type is well-known and has a custom - JSON + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + tendermint.types.PartSetHeader: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + tendermint.version.Consensus: + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in the + blockchain, - representation, that representation will be embedded adding - a field + including all blockchain data structures and the rules of the + application's - `value` which holds the custom JSON in addition to the - `@type` + state transition machine. + babylon.checkpointing.v1.CheckpointStateUpdate: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. - field. Example (for message [google.protobuf.Duration][]): + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: state defines the event of a state transition towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that triggers the + state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block that triggers the + state update + babylon.checkpointing.v1.CheckpointStatus: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - value: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + babylon.checkpointing.v1.Params: + type: object + description: Params defines the parameters for the module. + babylon.checkpointing.v1.QueryBlsPublicKeyListResponse: + type: object + properties: + validator_with_bls_keys: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and its + bls public key + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + was set, its value is undefined otherwise + description: >- + QueryBlsPublicKeyListResponse is the response type for the + Query/BlsPublicKeys - NOTE: The amount field is an Int which implements the custom - method + RPC method. + babylon.checkpointing.v1.QueryEpochStatusResponse: + type: object + properties: + status: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. - signatures required by gogoproto. - description: >- - MsgCreateValidator defines a SDK message for creating a new - validator. - msg_delegate: + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + description: |- + QueryEpochStatusResponse is the response type for the Query/EpochStatus + RPC method. + babylon.checkpointing.v1.QueryLastCheckpointWithStatusResponse: + type: object + properties: + raw_checkpoint: + type: object + properties: + epoch_num: + type: string + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that individual BLS + sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the BLS + multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data + babylon.checkpointing.v1.QueryParamsResponse: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + description: QueryParamsResponse is response type for the Query/Params RPC method. + babylon.checkpointing.v1.QueryRawCheckpointListResponse: + type: object + properties: + raw_checkpoints: + type: array + items: + type: object + properties: + ckpt: type: object properties: - delegator_address: + epoch_num: type: string - validator_address: + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that + individual BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the + BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data + status: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. - NOTE: The amount field is an Int which implements the custom - method + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: status defines the status of the checkpoint + bls_aggr_pk: + type: string + format: byte + title: bls_aggr_pk defines the aggregated BLS public key + power_sum: + type: string + format: uint64 + title: >- + power_sum defines the accumulated voting power for the + checkpoint + lifecycle: + type: array + items: + type: object + properties: + state: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. - signatures required by gogoproto. + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition towards this + state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that + triggers the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block that + triggers the state update description: >- - MsgDelegate defines a SDK message for performing a delegation of - coins - - from a delegator to a validator. - msg_undelegate: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. + lifecycle defines the lifecycle of this checkpoint, i.e., each + state transition and + the time (in both timestamp and block height) of this + transition. + description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. + title: the order is going from the newest to oldest based on the epoch number + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - NOTE: The amount field is an Int which implements the custom - method + was set, its value is undefined otherwise + description: >- + QueryRawCheckpointListResponse is the response type for the + Query/RawCheckpoints - signatures required by gogoproto. - description: >- - MsgUndelegate defines a SDK message for performing an - undelegation from a + RPC method. + babylon.checkpointing.v1.QueryRawCheckpointResponse: + type: object + properties: + raw_checkpoint: + type: object + properties: + ckpt: + type: object + properties: + epoch_num: + type: string + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that individual + BLS sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the + BLS multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data + status: + type: string + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. - delegate and a validator. - msg_begin_redelegate: + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: status defines the status of the checkpoint + bls_aggr_pk: + type: string + format: byte + title: bls_aggr_pk defines the aggregated BLS public key + power_sum: + type: string + format: uint64 + title: power_sum defines the accumulated voting power for the checkpoint + lifecycle: + type: array + items: type: object properties: - delegator_address: + state: type: string - validator_src_address: + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: >- + state defines the event of a state transition towards this + state + block_height: type: string - validator_dst_address: + format: uint64 + title: >- + block_height is the height of the Babylon block that + triggers the state update + block_time: type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method + format: date-time + title: >- + block_time is the timestamp in the Babylon block that + triggers the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, i.e., each + state transition and - signatures required by gogoproto. - description: >- - MsgBeginRedelegate defines a SDK message for performing a - redelegation + the time (in both timestamp and block height) of this transition. + description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. + description: >- + QueryRawCheckpointResponse is the response type for the + Query/RawCheckpoint - of coins from a delegator and source validator to a destination - validator. - title: >- - QueuedMessage is a message that can change the validator set and is - delayed to the epoch boundary - babylon.epoching.v1.ValStateUpdate: + RPC method. + babylon.checkpointing.v1.QueryRecentEpochStatusCountResponse: type: object properties: - state: - type: string - enum: - - CREATED - - BONDED - - UNBONDING - - UNBONDED - - REMOVED - default: CREATED - block_height: + tip_epoch: type: string format: uint64 - block_time: + epoch_count: type: string - format: date-time - babylon.epoching.v1.Validator: + format: uint64 + status_count: + type: object + additionalProperties: + type: string + format: uint64 + description: >- + QueryRecentEpochStatusCountResponse is the response type for the + Query/EpochStatusCount + + RPC method. + babylon.checkpointing.v1.RawCheckpoint: type: object properties: - addr: + epoch_num: + type: string + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: type: string format: byte - title: addr is the validator's address (in sdk.ValAddress) - power: + title: >- + last_commit_hash defines the 'LastCommitHash' that individual BLS sigs + are signed on + bitmap: type: string - format: int64 - title: power is the validator's voting power - babylon.epoching.v1.ValidatorLifecycle: + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the BLS multi + sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from individual + BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data + babylon.checkpointing.v1.RawCheckpointWithMeta: type: object properties: - val_addr: + ckpt: + type: object + properties: + epoch_num: + type: string + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: + type: string + format: byte + title: >- + last_commit_hash defines the 'LastCommitHash' that individual BLS + sigs are signed on + bitmap: + type: string + format: byte + title: >- + bitmap defines the bitmap that indicates the signers of the BLS + multi sig + bls_multi_sig: + type: string + format: byte + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + title: RawCheckpoint wraps the BLS multi sig with meta data + status: type: string - val_life: + enum: + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. + + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: status defines the status of the checkpoint + bls_aggr_pk: + type: string + format: byte + title: bls_aggr_pk defines the aggregated BLS public key + power_sum: + type: string + format: uint64 + title: power_sum defines the accumulated voting power for the checkpoint + lifecycle: type: array items: type: object @@ -9424,511 +14167,637 @@ definitions: state: type: string enum: - - CREATED - - BONDED - - UNBONDING - - UNBONDED - - REMOVED - default: CREATED - block_height: - type: string - format: uint64 - block_time: - type: string - format: date-time - cosmos.base.v1beta1.Coin: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - cosmos.staking.v1beta1.CommissionRates: - type: object - properties: - rate: - type: string - description: rate is the commission rate charged to delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which validator can ever - charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of the validator - commission, as a fraction. - description: >- - CommissionRates defines the initial commission rates to be used for - creating + - CKPT_STATUS_ACCUMULATING + - CKPT_STATUS_SEALED + - CKPT_STATUS_SUBMITTED + - CKPT_STATUS_CONFIRMED + - CKPT_STATUS_FINALIZED + default: CKPT_STATUS_ACCUMULATING + description: |- + CkptStatus is the status of a checkpoint. - a validator. - cosmos.staking.v1beta1.Description: - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort or - Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: security_contact defines an optional email for security contact. - details: - type: string - description: details define other optional details. - description: Description defines a validator description. - cosmos.staking.v1beta1.MsgBeginRedelegate: + - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. + - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. + - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. + - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. + - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. + title: state defines the event of a state transition towards this state + block_height: + type: string + format: uint64 + title: >- + block_height is the height of the Babylon block that triggers + the state update + block_time: + type: string + format: date-time + title: >- + block_time is the timestamp in the Babylon block that triggers + the state update + description: >- + lifecycle defines the lifecycle of this checkpoint, i.e., each state + transition and + + the time (in both timestamp and block height) of this transition. + description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. + babylon.checkpointing.v1.ValidatorWithBlsKey: type: object properties: - delegator_address: + validator_address: type: string - validator_src_address: + bls_pub_key: type: string - validator_dst_address: + format: byte + voting_power: type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: |- - MsgBeginRedelegate defines a SDK message for performing a redelegation - of coins from a delegator and source validator to a destination validator. - cosmos.staking.v1beta1.MsgCreateValidator: + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and its bls + public key + babylon.btccheckpoint.v1.TransactionInfo: type: object properties: - description: - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort or - Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: security_contact defines an optional email for security contact. - details: - type: string - description: details define other optional details. - description: Description defines a validator description. - commission: + key: type: object properties: - rate: - type: string - description: rate is the commission rate charged to delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which validator can - ever charge, as a fraction. - max_change_rate: + index: + type: integer + format: int64 + hash: type: string - description: >- - max_change_rate defines the maximum daily increase of the - validator commission, as a fraction. - description: >- - CommissionRates defines the initial commission rates to be used for - creating + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by hash of + block in - a validator. - min_self_delegation: - type: string - delegator_address: + which transaction was included and transaction index in the block + description: |- + key is the position (txIdx, blockHash) of this tx on BTC blockchain + Although it is already a part of SubmissionKey, we store it here again + to make TransactionInfo self-contained. + For example, storing the key allows TransactionInfo to not relay on + the fact that TransactionInfo will be ordered in the same order as + TransactionKeys in SubmissionKey. + transaction: type: string - validator_address: + format: byte + title: transaction is the full transaction in bytes + proof: type: string - pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all types - that they - - expect it to use in the context of Any. However, for URLs which - use the - - scheme `http`, `https`, or no scheme, one can optionally set up a - type - - server that maps type URLs to message definitions as follows: - + format: byte + title: >- + proof is the Merkle proof that this tx is included in the position in + `key` - * If no scheme is provided, `https` is assumed. + TODO: maybe it could use here better format as we already processed + and - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon checkpoint, + including - Note: this functionality is not currently available in the - official + - the position of the tx on BTC blockchain - protobuf release, and it is not used for type URLs beginning with + - the full tx content - type.googleapis.com. + - the Merkle proof that this tx is on the above position + babylon.zoneconcierge.v1.ChainInfo: + type: object + properties: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, - Schemes other than `http`, `https` (or the empty scheme) might be + including all blockchain data structures and the rules of the + application's - used with implementation specific semantics. - value: + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string format: byte - description: >- - Must be a valid serialized protocol buffer of the above specified - type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any type. - - - Example 1: Pack and unpack a message in C++. + title: >- + babylon_tx_hash is the hash of the tx that includes this header - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of IndexedHeader + (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - Example 2: Pack and unpack a message in Java. + (hash, height) jointly provides the position of the header + on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + including all blockchain data structures and the rules + of the application's - Example 3: Pack and unpack a message in Python. + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the same + height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same height. - Example 4: Pack and unpack a message in Go + For example, assuming the following blockchain - foo := &pb.Foo{...} - any, err := anypb.New(foo) - if err != nil { - ... - } - ... - foo := &pb.Foo{} - if err := any.UnmarshalTo(foo); err != nil { - ... - } + ``` - The pack methods provided by protobuf library will by default use + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - 'type.googleapis.com/full.type.name' as the type URL and the unpack + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - methods only use the fully qualified type name after the last '/' - in the type URL, for example "foo.bar.com/x/y.z" will yield type + Note that each `IndexedHeader` in the fork should have a valid quorum + certificate. - name "y.z". + Such forks exist since Babylon considers CZs might have dishonest + majority. + Also note that the IBC-Go implementation will only consider the first + header in a fork valid, since + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in CZ's + canonical chain + title: ChainInfo is the information of a CZ + babylon.zoneconcierge.v1.Forks: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger - JSON + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, - ==== + including all blockchain data structures and the rules of + the application's - The JSON representation of an `Any` value uses the regular + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header - representation of the deserialized, embedded message, with an + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: blocks is the list of non-canonical indexed headers at the same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same height. - additional field `@type` which contains the type URL. Example: + For example, assuming the following blockchain - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + ``` - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - If the embedded message type is well-known and has a custom JSON + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - representation, that representation will be embedded adding a field - `value` which holds the custom JSON in addition to the `@type` + Note that each `IndexedHeader` in the fork should have a valid quorum + certificate. - field. Example (for message [google.protobuf.Duration][]): + Such forks exist since Babylon considers CZs might have dishonest + majority. - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - value: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. + Also note that the IBC-Go implementation will only consider the first + header in a fork valid, since - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: MsgCreateValidator defines a SDK message for creating a new validator. - cosmos.staking.v1beta1.MsgDelegate: + the subsequent headers cannot be verified without knowing the validator + set in the previous header. + babylon.zoneconcierge.v1.IndexedHeader: type: object properties: - delegator_address: - type: string - validator_address: + chain_id: type: string - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: |- - MsgDelegate defines a SDK message for performing a delegation of coins - from a delegator to a validator. - cosmos.staking.v1beta1.MsgUndelegate: - type: object - properties: - delegator_address: + title: chain_id is the unique ID of the chain + hash: type: string - validator_address: + format: byte + title: hash is the hash of this header + height: type: string - amount: + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes this + CZ header type: object properties: - denom: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: type: string - amount: + height: type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: |- - MsgUndelegate defines a SDK message for performing an undelegation from a - delegate and a validator. - tendermint.types.BlockID: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer format: int64 - hash: + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: type: string format: byte - title: PartsetHeader - title: BlockID - tendermint.types.Header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: + title: hashes of block data + data_hash: type: string - format: uint64 - app: + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in the - blockchain, - - including all blockchain data structures and the rules of the - application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: + format: byte + title: consensus info + proposer_address: type: string format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - tendermint.types.PartSetHeader: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - tendermint.version.Consensus: - type: object - properties: - block: + description: Header defines the structure of a Tendermint block header. + babylon_epoch: type: string format: uint64 - app: + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in the - blockchain, - - including all blockchain data structures and the rules of the - application's - - state transition machine. - babylon.checkpointing.v1.CheckpointStatus: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this header - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - babylon.checkpointing.v1.Params: + (babylon_block_height, babylon_tx_hash) jointly provides the position + of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + babylon.zoneconcierge.v1.Params: type: object description: Params defines the parameters for the module. - babylon.checkpointing.v1.QueryBlsPublicKeyListResponse: + babylon.zoneconcierge.v1.ProofEpochSealed: type: object properties: - validator_with_bls_keys: + validator_set: type: array items: type: object @@ -9944,361 +14813,574 @@ definitions: title: >- ValidatorWithBlsKey couples validator address, voting power, and its bls public key - pagination: - description: pagination defines the pagination in the response. + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on `last_commit_hash` + of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header type: object properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator set is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: |- + ProofOp defines an operation used for calculating Merkle root + The data could be arbitrary format, providing nessecary data + for example neighbouring node hash + title: >- + ProofEpochSealed is the proof that an epoch is sealed by the sealer + header, i.e., the 2nd header of the next epoch - was set, its value is undefined otherwise - description: >- - QueryBlsPublicKeyListResponse is the response type for the - Query/BlsPublicKeys + With the access of metadata - RPC method. - babylon.checkpointing.v1.QueryEpochStatusResponse: - type: object - properties: - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. + - Metadata of this epoch, which includes the sealer header - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - description: |- - QueryEpochStatusResponse is the response type for the Query/EpochStatus - RPC method. - babylon.checkpointing.v1.QueryLatestCheckpointResponse: + - Raw checkpoint of this epoch + + The verifier can perform the following verification rules: + + - The raw checkpoint's `last_commit_hash` is same as in the sealer header + + - More than 1/3 (in voting power) validators in the validator set of this + epoch have signed `last_commit_hash` of the sealer header + + - The epoch medatata is committed to the `app_hash` of the sealer header + + - The validator set is committed to the `app_hash` of the sealer header + babylon.zoneconcierge.v1.ProofFinalizedChainInfo: type: object properties: - latest_checkpoint: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block type: object properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that individual - BLS sigs are signed on - bitmap: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of the - BLS multi sig - bls_multi_sig: + format: int64 + index: + type: string + format: int64 + leaf_hash: type: string format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction in + the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: + format: int64 + index: type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: + format: int64 + leaf_hash: type: string - format: uint64 - title: power_sum defines the accumulated voting power for the checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - description: >- - QueryLatestCheckpointResponse is the response type for the - Query/LatestCheckpoint - - RPC method. - babylon.checkpointing.v1.QueryParamsResponse: - type: object - properties: - params: - description: params holds all the parameters of this module. + format: byte + aunts: + type: array + items: + type: string + format: byte + title: >- + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed type: object - description: QueryParamsResponse is response type for the Query/Params RPC method. - babylon.checkpointing.v1.QueryRawCheckpointListResponse: - type: object - properties: - raw_checkpoints: - type: array - items: - type: object - properties: - ckpt: + properties: + validator_set: + type: array + items: type: object properties: - epoch_num: + validator_address: type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: + bls_pub_key: type: string format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: + voting_power: type: string - format: byte + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, and + its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata is + committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte title: >- - bitmap defines the bitmap that indicates the signers of the - BLS multi sig - bls_multi_sig: + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's validator + set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: type: string format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + + Although it is already a part of SubmissionKey, we store it here + again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo to not relay + on + + the fact that TransactionInfo will be ordered in the same order + as + + TransactionKeys in SubmissionKey. + transaction: type: string format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: + title: transaction is the full transaction in bytes + proof: type: string - format: uint64 + format: byte title: >- - power_sum defines the accumulated voting power for the - checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - title: the order is going from the newest to oldest based on the epoch number - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - description: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently. It will be empty if - there are no more results. - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + proof is the Merkle proof that this tx is included in the + position in `key` + + TODO: maybe it could use here better format as we already + processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain - was set, its value is undefined otherwise - description: >- - QueryRawCheckpointListResponse is the response type for the - Query/RawCheckpoints + - the full tx content - RPC method. - babylon.checkpointing.v1.QueryRawCheckpointResponse: + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) checkpoint + submission + title: >- + ProofFinalizedChainInfo is a set of proofs that attest a chain info is + BTC-finalised + babylon.zoneconcierge.v1.QueryChainInfoResponse: type: object properties: - raw_checkpoint: + chain_info: + title: chain_info is the info of the CZ type: object properties: - ckpt: + chain_id: + type: string + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain type: object properties: - epoch_num: + chain_id: type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: + title: chain_id is the unique ID of the chain + hash: type: string format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that individual - BLS sigs are signed on - bitmap: + title: hash is the hash of this header + height: type: string - format: byte + format: uint64 title: >- - bitmap defines the bitmap that indicates the signers of the - BLS multi sig - bls_multi_sig: + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on + CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string format: byte title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: - type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: + For example, assuming the following blockchain + + ``` + + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` + + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. + + + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. + + Such forks exist since Babylon considers CZs might have dishonest + majority. + + Also note that the IBC-Go implementation will only consider the + first header in a fork valid, since + + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + timestamped_headers_count: type: string format: uint64 - title: power_sum defines the accumulated voting power for the checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - description: >- - QueryRawCheckpointResponse is the response type for the - Query/RawCheckpoint - - RPC method. - babylon.checkpointing.v1.QueryRecentEpochStatusCountResponse: - type: object - properties: - tip_epoch: - type: string - format: uint64 - epoch_count: - type: string - format: uint64 - status_count: - type: object - additionalProperties: - type: string - format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain description: >- - QueryRecentEpochStatusCountResponse is the response type for the - Query/EpochStatusCount - - RPC method. - babylon.checkpointing.v1.QueryRecentRawCheckpointListResponse: + QueryChainInfoResponse is response type for the Query/ChainInfo RPC + method. + babylon.zoneconcierge.v1.QueryChainListResponse: type: object properties: - raw_checkpoints: + chain_ids: type: array items: - type: object - properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that - individual BLS sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of the - BLS multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING - description: |- - CkptStatus is the status of a checkpoint. - - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: - type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: - type: string - format: uint64 - title: >- - power_sum defines the accumulated voting power for the - checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - title: the order is going from the newest to oldest based on the epoch number + type: string + title: chain_ids are IDs of the chains in ascending alphabetical order pagination: - description: pagination defines the pagination in the response. + title: pagination defines the pagination in the response type: object properties: next_key: @@ -10316,1060 +15398,1024 @@ definitions: PageRequest.count_total was set, its value is undefined otherwise - description: >- - QueryRecentRawCheckpointListResponse is the response type for the - Query/RecentRawCheckpoints - - RPC method. - babylon.checkpointing.v1.RawCheckpoint: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that individual BLS sigs - are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of the BLS multi - sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated from individual - BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - babylon.checkpointing.v1.RawCheckpointWithMeta: - type: object - properties: - ckpt: - type: object - properties: - epoch_num: - type: string - format: uint64 - title: epoch_num defines the epoch number the raw checkpoint is for - last_commit_hash: - type: string - format: byte - title: >- - last_commit_hash defines the 'LastCommitHash' that individual BLS - sigs are signed on - bitmap: - type: string - format: byte - title: >- - bitmap defines the bitmap that indicates the signers of the BLS - multi sig - bls_multi_sig: - type: string - format: byte - title: >- - bls_multi_sig defines the multi sig that is aggregated from - individual BLS sigs - title: RawCheckpoint wraps the BLS multi sig with meta data - status: - type: string - enum: - - CKPT_STATUS_ACCUMULATING - - CKPT_STATUS_SEALED - - CKPT_STATUS_SUBMITTED - - CKPT_STATUS_CONFIRMED - - CKPT_STATUS_FINALIZED - default: CKPT_STATUS_ACCUMULATING description: |- - CkptStatus is the status of a checkpoint. + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - - CKPT_STATUS_ACCUMULATING: ACCUMULATING defines a checkpoint that is awaiting for BLS signatures. - - CKPT_STATUS_SEALED: SEALED defines a checkpoint that has accumulated sufficient BLS signatures. - - CKPT_STATUS_SUBMITTED: SUBMITTED defines a checkpoint that is included on BTC. - - CKPT_STATUS_CONFIRMED: CONFIRMED defines a checkpoint that is k-deep on BTC. - - CKPT_STATUS_FINALIZED: FINALIZED defines a checkpoint that is w-deep on BTC. - title: status defines the status of the checkpoint - bls_aggr_pk: - type: string - format: byte - title: bls_aggr_pk defines the aggregated BLS public key - power_sum: - type: string - format: uint64 - title: power_sum defines the accumulated voting power for the checkpoint - description: RawCheckpointWithMeta wraps the raw checkpoint with meta data. - babylon.checkpointing.v1.ValidatorWithBlsKey: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and its bls - public key - babylon.btccheckpoint.v1.TransactionInfo: + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: QueryChainListResponse is response type for the Query/ChainList RPC method + babylon.zoneconcierge.v1.QueryEpochChainInfoResponse: type: object properties: - key: + chain_info: + title: chain_info is the info of the CZ type: object properties: - index: - type: integer - format: int64 - hash: + chain_id: type: string - format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash of - block in + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on + CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. + + For example, assuming the following blockchain - which transaction was included and transaction index in the block - description: |- - key is the position (txIdx, blockHash) of this tx on BTC blockchain - Although it is already a part of SubmissionKey, we store it here again - to make TransactionInfo self-contained. - For example, storing the key allows TransactionInfo to not relay on - the fact that TransactionInfo will be ordered in the same order as - TransactionKeys in SubmissionKey. - transaction: - type: string - format: byte - title: transaction is the full transaction in bytes - proof: - type: string - format: byte - title: >- - proof is the Merkle proof that this tx is included in the position in - `key` + ``` - TODO: maybe it could use here better format as we already processed - and + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon checkpoint, - including + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - - the position of the tx on BTC blockchain - - the full tx content + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. - - the Merkle proof that this tx is on the above position - babylon.zoneconcierge.v1.ChainInfo: + Such forks exist since Babylon considers CZs might have dishonest + majority. + + Also note that the IBC-Go implementation will only consider the + first header in a fork valid, since + + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain + description: >- + QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo + RPC method. + babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse: type: object properties: - chain_id: - type: string - title: chain_id is the ID of the chain - latest_header: - title: latest_header is the latest header in the canonical chain of CZ + finalized_chain_info: + title: finalized_chain_info is the info of the CZ type: object properties: chain_id: type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the header on CZ - ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that includes - this CZ header + title: chain_id is the ID of the chain + latest_header: + title: latest_header is the latest header in CZ's canonical chain type: object properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block - in the blockchain, - - including all blockchain data structures and the rules of the - application's - - state transition machine. chain_id: type: string - height: + title: chain_id is the unique ID of the chain + hash: type: string - format: int64 - time: + format: byte + title: hash is the hash of this header + height: type: string - format: date-time - last_block_id: - title: prev block info + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on + CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header type: object properties: - hash: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: type: string - format: byte - part_set_header: + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info type: object properties: - total: - type: integer - format: int64 hash: type: string format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: type: string - format: byte - title: consensus info - proposer_address: + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this header - - (babylon_block_height, babylon_tx_hash) jointly provides the - position of the header on Babylon ledger - latest_forks: - title: >- - latest_forks is the latest forks, formed as a series of IndexedHeader - (from low to high) - type: object - properties: - headers: - type: array - items: - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - (hash, height) jointly provides the position of the header - on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + latest_forks: + title: >- + latest_forks is the latest forks, formed as a series of + IndexedHeader (from low to high) + type: object + properties: + headers: + type: array + items: type: object properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a - block in the blockchain, - - including all blockchain data structures and the rules - of the application's - - state transition machine. chain_id: type: string - height: + title: chain_id is the unique ID of the chain + hash: type: string - format: int64 - time: + format: byte + title: hash is the hash of this header + height: type: string - format: date-time - last_block_id: - title: prev block info + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the + header on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header type: object properties: - hash: - type: string - format: byte - part_set_header: + version: + title: basic block info type: object properties: - total: - type: integer - format: int64 - hash: + block: type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this - header - - (babylon_block_height, babylon_tx_hash) jointly provides the - position of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - title: >- - blocks is the list of non-canonical indexed headers at the same - height - description: >- - Forks is a list of non-canonical `IndexedHeader`s at the same height. - - For example, assuming the following blockchain - - ``` - - A <- B <- C <- D <- E - \ -- D1 - \ -- D2 - ``` - - Then the fork will be {[D1, D2]} where each item is in struct - `IndexedBlock`. - - - Note that each `IndexedHeader` in the fork should have a valid quorum - certificate. - - Such forks exist since Babylon considers CZs might have dishonest - majority. - - Also note that the IBC-Go implementation will only consider the first - header in a fork valid, since - - the subsequent headers cannot be verified without knowing the - validator set in the previous header. - title: ChainInfo is the information of a CZ - babylon.zoneconcierge.v1.Forks: - type: object - properties: - headers: - type: array - items: - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the header on CZ - ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that includes - this CZ header - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a - block in the blockchain, + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, - including all blockchain data structures and the rules of - the application's + including all blockchain data structures and the + rules of the application's - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer + state transition machine. + chain_id: + type: string + height: + type: string format: int64 - hash: + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: type: string format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this header - - (babylon_block_height, babylon_tx_hash) jointly provides the - position of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - title: blocks is the list of non-canonical indexed headers at the same height - description: >- - Forks is a list of non-canonical `IndexedHeader`s at the same height. + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + babylon_epoch: + type: string + format: uint64 + title: >- + epoch is the epoch number of this header on Babylon + ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header - For example, assuming the following blockchain + (babylon_block_height, babylon_tx_hash) jointly provides + the position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the + same height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same + height. - ``` + For example, assuming the following blockchain - A <- B <- C <- D <- E - \ -- D1 - \ -- D2 - ``` + ``` - Then the fork will be {[D1, D2]} where each item is in struct - `IndexedBlock`. + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. - Note that each `IndexedHeader` in the fork should have a valid quorum - certificate. - Such forks exist since Babylon considers CZs might have dishonest - majority. + Note that each `IndexedHeader` in the fork should have a valid + quorum certificate. - Also note that the IBC-Go implementation will only consider the first - header in a fork valid, since + Such forks exist since Babylon considers CZs might have dishonest + majority. - the subsequent headers cannot be verified without knowing the validator - set in the previous header. - babylon.zoneconcierge.v1.IndexedHeader: - type: object - properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: - type: string - format: byte - title: hash is the hash of this header - height: - type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger + Also note that the IBC-Go implementation will only consider the + first header in a fork valid, since - (hash, height) jointly provides the position of the header on CZ - ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that includes this - CZ header + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain + epoch_info: + title: epoch_info is the metadata of the last BTC-finalised epoch type: object properties: - version: - title: basic block info + epoch_number: + type: string + format: uint64 + current_epoch_interval: + type: string + format: uint64 + first_block_height: + type: string + format: uint64 + last_block_header: + description: >- + last_block_header is the header of the last block in this epoch. + + Babylon needs to remember the last header of each epoch to + complete unbonding validators/delegations when a previous epoch's + checkpoint is finalised. + + The last_block_header field is nil in the epoch's beginning, and + is set upon the end of this epoch. type: object properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in - the blockchain, + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, - including all blockchain data structures and the rules of the - application's + including all blockchain data structures and the rules of the + application's - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + app_hash_root: type: string - format: date-time - last_block_id: - title: prev block info + format: byte + title: |- + app_hash_root is the Merkle root of all AppHashs in this epoch + It will be used for proving a block is in an epoch + sealer_header: + title: >- + sealer_header is the 2nd header of the next epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header type: object properties: - hash: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: type: string - format: byte - part_set_header: + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info type: object properties: - total: - type: integer - format: int64 hash: type: string format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + raw_checkpoint: + title: raw_checkpoint is the raw checkpoint of this epoch + type: object + properties: + epoch_num: type: string - format: byte - last_results_hash: + format: uint64 + title: epoch_num defines the epoch number the raw checkpoint is for + last_commit_hash: type: string format: byte - evidence_hash: + title: >- + last_commit_hash defines the 'LastCommitHash' that individual BLS + sigs are signed on + bitmap: type: string format: byte - title: consensus info - proposer_address: + title: >- + bitmap defines the bitmap that indicates the signers of the BLS + multi sig + bls_multi_sig: type: string format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: - type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: - type: string - format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this header - - (babylon_block_height, babylon_tx_hash) jointly provides the position - of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - babylon.zoneconcierge.v1.Params: - type: object - description: Params defines the parameters for the module. - babylon.zoneconcierge.v1.ProofEpochSealed: - type: object - properties: - validator_set: - type: array - items: - type: object - properties: - validator_address: - type: string - bls_pub_key: - type: string - format: byte - voting_power: - type: string - format: uint64 - title: >- - ValidatorWithBlsKey couples validator address, voting power, and its - bls public key - title: >- - validator_set is the validator set of the sealed epoch - - This validator set has generated a BLS multisig on `last_commit_hash` - of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: - type: object - properties: - type: - type: string - key: - type: string - format: byte - data: - type: string - format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - proof_epoch_val_set: + title: >- + bls_multi_sig defines the multi sig that is aggregated from + individual BLS sigs + btc_submission_key: title: >- - proof_epoch_info is the Merkle proof that the epoch's validator set is - committed to `app_hash` of the sealer header + btc_submission_key is position of two BTC txs that include the raw + checkpoint of this epoch type: object properties: - ops: + key: type: array items: type: object properties: - type: - type: string - key: - type: string - format: byte - data: + index: + type: integer + format: int64 + hash: type: string format: byte - title: |- - ProofOp defines an operation used for calculating Merkle root - The data could be arbitrary format, providing nessecary data - for example neighbouring node hash - title: >- - ProofEpochSealed is the proof that an epoch is sealed by the sealer - header, i.e., the 2nd header of the next epoch - - With the access of metadata - - - Metadata of this epoch, which includes the sealer header - - - Raw checkpoint of this epoch - - The verifier can perform the following verification rules: - - - The raw checkpoint's `last_commit_hash` is same as in the sealer header - - - More than 1/3 (in voting power) validators in the validator set of this - epoch have signed `last_commit_hash` of the sealer header - - - The epoch medatata is committed to the `app_hash` of the sealer header + title: >- + Each provided OP_RETURN transaction can be idendtified by hash + of block in - - The validator set is committed to the `app_hash` of the sealer header - babylon.zoneconcierge.v1.QueryChainInfoResponse: - type: object - properties: - chain_info: - title: chain_info is the info of the CZ + which transaction was included and transaction index in the + block + proof: + title: proof is the proof that the chain info is finalized type: object properties: - chain_id: - type: string - title: chain_id is the ID of the chain - latest_header: - title: latest_header is the latest header in the canonical chain of CZ + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block type: object properties: - chain_id: - type: string - title: chain_id is the unique ID of the chain - hash: + root_hash: type: string format: byte - title: hash is the hash of this header - height: + data: type: string - format: uint64 - title: >- - height is the height of this header on CZ ledger - - (hash, height) jointly provides the position of the header on - CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header + format: byte + proof: type: object properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a - block in the blockchain, - - including all blockchain data structures and the rules of - the application's - - state transition machine. - chain_id: - type: string - height: + total: type: string format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: + index: type: string - format: byte - title: consensus info - proposer_address: + format: int64 + leaf_hash: type: string format: byte - description: Header defines the structure of a Tendermint block header. - babylon_epoch: + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction + in the Merkle tree. + proof_header_in_epoch: + type: object + properties: + total: type: string - format: uint64 - title: epoch is the epoch number of this header on Babylon ledger - babylon_tx_hash: + format: int64 + index: + type: string + format: int64 + leaf_hash: type: string format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this - header - - (babylon_block_height, babylon_tx_hash) jointly provides the - position of the header on Babylon ledger - latest_forks: + aunts: + type: array + items: + type: string + format: byte title: >- - latest_forks is the latest forks, formed as a series of - IndexedHeader (from low to high) + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed type: object properties: - headers: + validator_set: type: array items: type: object properties: - chain_id: + validator_address: type: string - title: chain_id is the unique ID of the chain - hash: + bls_pub_key: type: string format: byte - title: hash is the hash of this header - height: + voting_power: type: string format: uint64 - title: >- - height is the height of this header on CZ ledger + title: >- + ValidatorWithBlsKey couples validator address, voting power, + and its bls public key + title: >- + validator_set is the validator set of the sealed epoch - (hash, height) jointly provides the position of the - header on CZ ledger - babylon_header: - title: >- - babylon_header is the header of the babylon block that - includes this CZ header + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata + is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: type: object properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, - - including all blockchain data structures and the - rules of the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: + type: type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: + key: type: string format: byte - app_hash: + data: type: string format: byte - last_results_hash: + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: type: string - format: byte - evidence_hash: + key: type: string format: byte - title: consensus info - proposer_address: + data: type: string format: byte - description: >- - Header defines the structure of a Tendermint block - header. - babylon_epoch: - type: string - format: uint64 title: >- - epoch is the epoch number of this header on Babylon - ledger - babylon_tx_hash: + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: type: string format: byte - title: >- - babylon_tx_hash is the hash of the tx that includes this - header + title: >- + Each provided OP_RETURN transaction can be idendtified by + hash of block in - (babylon_block_height, babylon_tx_hash) jointly provides - the position of the header on Babylon ledger - title: IndexedHeader is the metadata of a CZ header - title: >- - blocks is the list of non-canonical indexed headers at the - same height - description: >- - Forks is a list of non-canonical `IndexedHeader`s at the same - height. + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain - For example, assuming the following blockchain + Although it is already a part of SubmissionKey, we store it + here again - ``` + to make TransactionInfo self-contained. - A <- B <- C <- D <- E - \ -- D1 - \ -- D2 - ``` + For example, storing the key allows TransactionInfo to not + relay on - Then the fork will be {[D1, D2]} where each item is in struct - `IndexedBlock`. + the fact that TransactionInfo will be ordered in the same + order as + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in the + position in `key` - Note that each `IndexedHeader` in the fork should have a valid - quorum certificate. + TODO: maybe it could use here better format as we already + processed and - Such forks exist since Babylon considers CZs might have dishonest - majority. + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including - Also note that the IBC-Go implementation will only consider the - first header in a fork valid, since + - the position of the tx on BTC blockchain - the subsequent headers cannot be verified without knowing the - validator set in the previous header. + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission description: >- - QueryChainInfoResponse is response type for the Query/ChainInfo RPC - method. - babylon.zoneconcierge.v1.QueryChainListResponse: - type: object - properties: - chain_ids: - type: array - items: - type: string - title: QueryChainListResponse is response type for the Query/ChainList RPC method - babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse: + QueryFinalizedChainInfoResponse is response type for the + Query/FinalizedChainInfo RPC method. + babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightResponse: type: object properties: finalized_chain_info: @@ -11380,7 +16426,7 @@ definitions: type: string title: chain_id is the ID of the chain latest_header: - title: latest_header is the latest header in the canonical chain of CZ + title: latest_header is the latest header in CZ's canonical chain type: object properties: chain_id: @@ -11646,6 +16692,12 @@ definitions: the subsequent headers cannot be verified without knowing the validator set in the previous header. + timestamped_headers_count: + type: string + format: uint64 + title: >- + timestamped_headers_count is the number of timestamped headers in + CZ's canonical chain epoch_info: title: epoch_info is the metadata of the last BTC-finalised epoch type: object @@ -11881,19 +16933,43 @@ definitions: which transaction was included and transaction index in the block - proof_tx_in_block: - title: >- - proof_tx_in_block is the proof that tx that carries the header is - included in a certain Babylon block + proof: + title: proof is the proof that the chain info is finalized type: object properties: - root_hash: - type: string - format: byte - data: - type: string - format: byte - proof: + proof_tx_in_block: + title: >- + proof_tx_in_block is the proof that tx that carries the header is + included in a certain Babylon block + type: object + properties: + root_hash: + type: string + format: byte + data: + type: string + format: byte + proof: + type: object + properties: + total: + type: string + format: int64 + index: + type: string + format: int64 + leaf_hash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte + description: >- + TxProof represents a Merkle proof of the presence of a transaction + in the Merkle tree. + proof_header_in_epoch: type: object properties: total: @@ -11910,175 +16986,686 @@ definitions: items: type: string format: byte - description: >- - TxProof represents a Merkle proof of the presence of a transaction in - the Merkle tree. - proof_header_in_epoch: + title: >- + proof_header_in_epoch is the proof that the Babylon header is in a + certain epoch + proof_epoch_sealed: + title: proof_epoch_sealed is the proof that the epoch is sealed + type: object + properties: + validator_set: + type: array + items: + type: object + properties: + validator_address: + type: string + bls_pub_key: + type: string + format: byte + voting_power: + type: string + format: uint64 + title: >- + ValidatorWithBlsKey couples validator address, voting power, + and its bls public key + title: >- + validator_set is the validator set of the sealed epoch + + This validator set has generated a BLS multisig on + `last_commit_hash` of the sealer header + proof_epoch_info: + title: >- + proof_epoch_info is the Merkle proof that the epoch's metadata + is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_val_set: + title: >- + proof_epoch_info is the Merkle proof that the epoch's + validator set is committed to `app_hash` of the sealer header + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + title: >- + ProofOp defines an operation used for calculating Merkle + root + + The data could be arbitrary format, providing nessecary + data + + for example neighbouring node hash + proof_epoch_submitted: + type: array + items: + type: object + properties: + key: + type: object + properties: + index: + type: integer + format: int64 + hash: + type: string + format: byte + title: >- + Each provided OP_RETURN transaction can be idendtified by + hash of block in + + which transaction was included and transaction index in the + block + description: >- + key is the position (txIdx, blockHash) of this tx on BTC + blockchain + + Although it is already a part of SubmissionKey, we store it + here again + + to make TransactionInfo self-contained. + + For example, storing the key allows TransactionInfo to not + relay on + + the fact that TransactionInfo will be ordered in the same + order as + + TransactionKeys in SubmissionKey. + transaction: + type: string + format: byte + title: transaction is the full transaction in bytes + proof: + type: string + format: byte + title: >- + proof is the Merkle proof that this tx is included in the + position in `key` + + TODO: maybe it could use here better format as we already + processed and + + valideated the proof? + title: >- + TransactionInfo is the info of a tx that contains Babylon + checkpoint, including + + - the position of the tx on BTC blockchain + + - the full tx content + + - the Merkle proof that this tx is on the above position + title: >- + proof_epoch_submitted is the proof that the epoch's checkpoint is + included in BTC ledger + + It is the two TransactionInfo in the best (i.e., earliest) + checkpoint submission + description: >- + QueryFinalizedChainInfoUntilHeightResponse is response type for the + Query/FinalizedChainInfoUntilHeight RPC method. + babylon.zoneconcierge.v1.QueryHeaderResponse: + type: object + properties: + header: type: object properties: - total: + chain_id: type: string - format: int64 - index: + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: type: string - format: int64 - leaf_hash: + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string format: byte - aunts: - type: array - items: - type: string - format: byte - title: >- - proof_header_in_epoch is the proof that the Babylon header is in a - certain epoch - proof_epoch_sealed: - title: proof_epoch_sealed is the proof that the epoch is sealed + title: >- + babylon_tx_hash is the hash of the tx that includes this header + + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + fork_headers: type: object properties: - validator_set: + headers: type: array items: type: object properties: - validator_address: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header + on CZ ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that + includes this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules + of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte + title: >- + babylon_tx_hash is the hash of the tx that includes this + header + + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: >- + blocks is the list of non-canonical indexed headers at the same + height + description: >- + Forks is a list of non-canonical `IndexedHeader`s at the same height. + + For example, assuming the following blockchain + + ``` + + A <- B <- C <- D <- E + \ -- D1 + \ -- D2 + ``` + + Then the fork will be {[D1, D2]} where each item is in struct + `IndexedBlock`. + + + Note that each `IndexedHeader` in the fork should have a valid quorum + certificate. + + Such forks exist since Babylon considers CZs might have dishonest + majority. + + Also note that the IBC-Go implementation will only consider the first + header in a fork valid, since + + the subsequent headers cannot be verified without knowing the + validator set in the previous header. + description: QueryParamsResponse is response type for the Query/Header RPC method. + babylon.zoneconcierge.v1.QueryListEpochHeadersResponse: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: type: string - bls_pub_key: + format: byte + app_hash: type: string format: byte - voting_power: + last_results_hash: type: string - format: uint64 + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: + type: string + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: + type: string + format: byte title: >- - ValidatorWithBlsKey couples validator address, voting power, and - its bls public key - title: >- - validator_set is the validator set of the sealed epoch + babylon_tx_hash is the hash of the tx that includes this header - This validator set has generated a BLS multisig on - `last_commit_hash` of the sealer header - proof_epoch_info: - title: >- - proof_epoch_info is the Merkle proof that the epoch's metadata is - committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + description: >- + QueryListEpochHeadersResponse is response type for the + Query/ListEpochHeaders RPC method. + babylon.zoneconcierge.v1.QueryListHeadersResponse: + type: object + properties: + headers: + type: array + items: + type: object + properties: + chain_id: + type: string + title: chain_id is the unique ID of the chain + hash: + type: string + format: byte + title: hash is the hash of this header + height: + type: string + format: uint64 + title: >- + height is the height of this header on CZ ledger + + (hash, height) jointly provides the position of the header on CZ + ledger + babylon_header: + title: >- + babylon_header is the header of the babylon block that includes + this CZ header + type: object + properties: + version: + title: basic block info type: object properties: - type: - type: string - key: + block: type: string - format: byte - data: + format: uint64 + app: type: string - format: byte - title: >- - ProofOp defines an operation used for calculating Merkle - root + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, - The data could be arbitrary format, providing nessecary data + including all blockchain data structures and the rules of + the application's - for example neighbouring node hash - proof_epoch_val_set: - title: >- - proof_epoch_info is the Merkle proof that the epoch's validator - set is committed to `app_hash` of the sealer header - type: object - properties: - ops: - type: array - items: + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info type: object properties: - type: - type: string - key: - type: string - format: byte - data: + hash: type: string format: byte - title: >- - ProofOp defines an operation used for calculating Merkle - root - - The data could be arbitrary format, providing nessecary data - - for example neighbouring node hash - proof_epoch_submitted: - type: array - items: - type: object - properties: - key: - type: object - properties: - index: - type: integer - format: int64 - hash: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: type: string format: byte - title: >- - Each provided OP_RETURN transaction can be idendtified by hash - of block in - - which transaction was included and transaction index in the - block - description: >- - key is the position (txIdx, blockHash) of this tx on BTC - blockchain - - Although it is already a part of SubmissionKey, we store it here - again - - to make TransactionInfo self-contained. - - For example, storing the key allows TransactionInfo to not relay - on - - the fact that TransactionInfo will be ordered in the same order - as - - TransactionKeys in SubmissionKey. - transaction: + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + babylon_epoch: type: string - format: byte - title: transaction is the full transaction in bytes - proof: + format: uint64 + title: epoch is the epoch number of this header on Babylon ledger + babylon_tx_hash: type: string format: byte title: >- - proof is the Merkle proof that this tx is included in the - position in `key` - - TODO: maybe it could use here better format as we already - processed and - - valideated the proof? - title: >- - TransactionInfo is the info of a tx that contains Babylon - checkpoint, including - - - the position of the tx on BTC blockchain + babylon_tx_hash is the hash of the tx that includes this header - - the full tx content + (babylon_block_height, babylon_tx_hash) jointly provides the + position of the header on Babylon ledger + title: IndexedHeader is the metadata of a CZ header + title: headers is the list of headers + pagination: + title: pagination defines the pagination in the response + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - - the Merkle proof that this tx is on the above position - title: >- - proof_epoch_submitted is the proof that the epoch's checkpoint is - included in BTC ledger + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. - It is the two TransactionInfo in the best (i.e., earliest) checkpoint - submission + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } description: >- - QueryFinalizedChainInfoResponse is response type for the - Query/FinalizedChainInfo RPC method. + QueryListHeadersResponse is response type for the Query/ListHeaders RPC + method. babylon.zoneconcierge.v1.QueryParamsResponse: type: object properties: diff --git a/contrib/images/babylond-dlv/Dockerfile b/contrib/images/babylond-dlv/Dockerfile index 6291ca776..79f1cfaba 100644 --- a/contrib/images/babylond-dlv/Dockerfile +++ b/contrib/images/babylond-dlv/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18-alpine AS build +FROM golang:1.19-alpine AS build RUN apk add build-base git linux-headers libc-dev RUN go install github.com/go-delve/delve/cmd/dlv@latest WORKDIR /work @@ -16,7 +16,7 @@ COPY --from=build /work/build/babylond /babylond/ COPY --from=build /go/bin/dlv /usr/local/bin WORKDIR /babylond -EXPOSE 26656 26657 2345 +EXPOSE 26656 26657 2345 1317 ENTRYPOINT ["/usr/bin/wrapper.sh"] CMD ["start", "--log_format", "plain"] STOPSIGNAL SIGTERM diff --git a/contrib/images/babylond-env/Dockerfile b/contrib/images/babylond-env/Dockerfile index 5bc6299b4..9fe29b87c 100644 --- a/contrib/images/babylond-env/Dockerfile +++ b/contrib/images/babylond-env/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18-alpine AS build +FROM golang:1.19-alpine AS build RUN apk add build-base git linux-headers WORKDIR /work COPY go.mod go.sum /work/ @@ -15,7 +15,7 @@ VOLUME /babylond COPY --from=build /work/build/babylond /babylond/ WORKDIR /babylond -EXPOSE 26656 26657 +EXPOSE 26656 26657 1317 ENTRYPOINT ["/usr/bin/wrapper.sh"] CMD ["start", "--log_format", "plain"] STOPSIGNAL SIGTERM diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..4ac29268d --- /dev/null +++ b/docs/README.md @@ -0,0 +1,10 @@ +# Babylon Developer Docs + +This page contains documentation targeted at individuals that want to contribute to the Babylon repository. + +For user-facing docs, visit the [Babylon documentation page](https://docs.babylonchain.io). + +## Contents + +- [Development Requirements](./dev-reqs.md) +- [Running a node for testing purposes](./run-node.md) diff --git a/docs/dev-reqs.md b/docs/dev-reqs.md new file mode 100644 index 000000000..0e04112f7 --- /dev/null +++ b/docs/dev-reqs.md @@ -0,0 +1,5 @@ +## Development Requirements + +To develop the Babylon repository, the following requirements are recommended: +- Golang version 1.19 +- Docker diff --git a/docs/run-node.md b/docs/run-node.md new file mode 100644 index 000000000..705bb72f1 --- /dev/null +++ b/docs/run-node.md @@ -0,0 +1,135 @@ +## Running a node + +The following commands assume that the `babylond` executable has been +installed. If the repository was only built, then `./build/babylond` should be +used in its place. + +### Generating the node configuration +The configuration for a single node can be created through the `testnet` +command. While the `testnet` command can create an arbitrary number of nodes that +communicate on a testnet, here we focus on the setup of a single node. +```console +babylond testnet \ + --v 1 \ + --output-dir ./.testnet \ + --starting-ip-address 192.168.10.2 \ + --keyring-backend test \ + --chain-id chain-test +``` + +The flags specify the following: +- `--output-dir `: Specifies that the testnet files should + reside under this directory. +- `--v `: Leads to the creation of `N` nodes, each one residing under the + `/node{i}`. In this case `i={0..N-1}`. +- `--starting-ip-address `: Specifies the IP address for the nodes. For example, + `192.168.10.2` leads to the first node running on `192.168.10.2:46656`, the + second one on `192.168.10.3:46656` etc. +- `--keyring-backend {os,file,test}`: Specifies the backend to use for the keyring. Available + choices include `os`, `file`, and `test`. We use `test` for convenience. +- `--chain-id`: An identifier for the chain. Useful when perrforming operations + later. + +In this case, we generated a single node. If we take a look under `.testnet`: +```console +$ ls .testnet +gentxs node0 +``` + +The `gentxs` directory contains the genesis transactions. It contains +transactions that assign bbn tokens to a single address that is defined for each +node. + +The `node0` directory contains the the following, +```console +$ ls .testnet/node0/babylond +config data key_seed.json keyring-test +``` + +A brief description of the contents: +- `config`: Contains the configuration files for the node. +- `data`: Contains the database storage for the node. +- `key_seed.json`: Seed to generate the keys maintained by the keyring. +- `keyring-test`: Contains the test keyring. This directory was created because + we provided the `--keyring-backend test` flag. The `testnet` command, creates + a validator node named `node{i}` (depends on the node name), and assigns + bbn tokens to it through a transaction written to `.testnet/gentxs/node{i}.json`. + The keys for this node can be pointed to by the `node{i}` name. + +### Running the node +```console +babylond start --home ./.testnet/node0/babylond +``` + +### Logs + +The logs for a particular node can be found under +`.testnets/node{id}/babylond/babylond.log`. + +### Performing queries + +After building a node and starting it, you can perform queries. +```console +babylond --home .testnet/node{i}/babylond/ --chain-id \ + query +``` + +For example, in order to get the hashes maintained by the `btcligthclient` +module: +```console +$ babylond --home .testnet/node0/babylond/ --chain-id chain-test query btclightclient hashes + +hashes: +- 00000000000000000002bf1c218853bc920f41f74491e6c92c6bc6fdc881ab47 +pagination: + next_key: null + total: "1" +``` + +### Submitting transactions + +After building a node and running it, one can send transactions as follows: +```console +babylond --home .testnet/node{i}/babylond --chain-id \ + --keyring-backend {os,file,test} --fees \ + --from --broadcast-mode {sync,async,block} \ + tx [data] +``` + +The `--fees` flag specifies the amount of fees that we are willing to pay and +the denomination and the `--from` flag denotes the name of the key that we want +to use to sign the transaction (i.e. from which account we want this +transaction to happen). The `--broadcast-mode` specifies how long we want to +wait until we receive a response from the CLI: `async` means immediately, +`sync` means after the transaction has been validated through `CheckTx`, +and `block` means after the transaction has been processed by the next block. + +For example, in the `btclightclient` module, in order +to submit a header, one should: +```console +babylond --home .testnet/node0/babylond --chain-id chain-test \ + --keyring-backend test --fees 100bbn \ + --from node0 --broadcast-mode block \ + tx btclightclient insert-header +``` + +## Running a multi-node testnet + +We provide support for running a multi-node testnet using Docker. To build it + +```console +make localnet-start +``` + +The corresponding node directories can be found under `.testnets` +```console +$ ls .testnets +gentxs node0 node1 node2 node3 +``` + +## Testing + +```console +make test +``` + diff --git a/go.mod b/go.mod index 27d75ae63..2c7954cc8 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ -go 1.18 +go 1.19 module github.com/babylonchain/babylon require ( - github.com/btcsuite/btcd v0.22.1 + github.com/btcsuite/btcd v0.22.3 github.com/cosmos/cosmos-sdk v0.46.6 github.com/gogo/protobuf v1.3.3 github.com/golang/protobuf v1.5.2 diff --git a/go.sum b/go.sum index 4b479a7ca..5aec91ca3 100644 --- a/go.sum +++ b/go.sum @@ -158,8 +158,9 @@ github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BR github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs= -github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd v0.22.3 h1:kYNaWFvOw6xvqP0vR20RP1Zq1DVMBxEO8QN5d1/EfNg= +github.com/btcsuite/btcd v0.22.3/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= diff --git a/proto/babylon/btccheckpoint/btccheckpoint.proto b/proto/babylon/btccheckpoint/btccheckpoint.proto index bdb44326f..84067295c 100644 --- a/proto/babylon/btccheckpoint/btccheckpoint.proto +++ b/proto/babylon/btccheckpoint/btccheckpoint.proto @@ -81,14 +81,14 @@ message TransactionInfo { // key is the position (txIdx, blockHash) of this tx on BTC blockchain // Although it is already a part of SubmissionKey, we store it here again // to make TransactionInfo self-contained. - // For example, storing the key allows TransactionInfo to not relay on - // the fact that TransactionInfo will be ordered in the same order as + // For example, storing the key allows TransactionInfo to not relay on + // the fact that TransactionInfo will be ordered in the same order as // TransactionKeys in SubmissionKey. TransactionKey key = 1; // transaction is the full transaction in bytes bytes transaction = 2; // proof is the Merkle proof that this tx is included in the position in `key` - // TODO: maybe it could use here better format as we already processed and + // TODO: maybe it could use here better format as we already processed and // valideated the proof? bytes proof = 3; } @@ -98,10 +98,8 @@ message TransactionInfo { // depth/block number info, without context (i.e info about chain) is pretty useless // and blockshash in enough to retrieve is from lightclient message SubmissionData { - // TODO: this could probably be better typed - // Address of submitter of given checkpoint. Required to payup the reward to - // submitter of given checkpoint - bytes submitter = 1; + // address of the submitter and reporter + CheckpointAddresses vigilante_addresses = 1; // txs_info is the two `TransactionInfo`s corresponding to the submission // It is used for // - recovering address of sender of btc transction to payup the reward. @@ -119,8 +117,24 @@ message EpochData { // Current btc status of the epoch BtcStatus status = 2; +} - // Required to comunicate with checkpoint module about checkpoint status - bytes raw_checkpoint = 3; +message CheckpointAddresses { + // TODO: this could probably be better typed + // Address of the checkpoint submitter, extracted from the checkpoint itself. + bytes submitter = 1; + // Address of the reporter which reported the submissions, calculated from + // submission message MsgInsertBTCSpvProof itself + bytes reporter = 2; } +message BTCCheckpointInfo { + // epoch number of this checkpoint + uint64 epoch_number = 1; + // height of earliest BTC block that includes this checkpoint + uint64 earliest_btc_block_number = 2; + // hash of earliest BTC block that includes this checkpoint + bytes earliest_btc_block_hash = 3; + // list of vigilantes' addresses + repeated CheckpointAddresses vigilante_address_list = 4; +} diff --git a/proto/babylon/btccheckpoint/query.proto b/proto/babylon/btccheckpoint/query.proto index dfaeb091e..6cad44bab 100644 --- a/proto/babylon/btccheckpoint/query.proto +++ b/proto/babylon/btccheckpoint/query.proto @@ -16,11 +16,16 @@ service Query { option (google.api.http).get = "/babylon/btccheckpoint/v1/params"; } - // BtcCheckpointHeight returns earliest block height for given rawcheckpoint - rpc BtcCheckpointHeight(QueryBtcCheckpointHeightRequest) returns (QueryBtcCheckpointHeightResponse) { + // BtcCheckpointInfo returns checkpoint info for a given epoch + rpc BtcCheckpointInfo(QueryBtcCheckpointInfoRequest) returns (QueryBtcCheckpointInfoResponse) { option (google.api.http).get = "/babylon/btccheckpoint/v1/{epoch_num}"; } + // BtcCheckpointsInfo returns checkpoint info for a range of epochs + rpc BtcCheckpointsInfo(QueryBtcCheckpointsInfoRequest) returns (QueryBtcCheckpointsInfoResponse) { + option (google.api.http).get = "/babylon/btccheckpoint/v1"; + } + rpc EpochSubmissions(QueryEpochSubmissionsRequest) returns (QueryEpochSubmissionsResponse) { option (google.api.http).get = "/babylon/btccheckpoint/v1/{epoch_num}/submissions"; } @@ -35,15 +40,30 @@ message QueryParamsResponse { Params params = 1 [ (gogoproto.nullable) = false ]; } -message QueryBtcCheckpointHeightRequest { +message QueryBtcCheckpointInfoRequest { // Number of epoch for which the earliest checkpointing btc height is requested uint64 epoch_num = 1; } -// QueryCurrentEpochResponse is response type for the Query/CurrentEpoch RPC method -message QueryBtcCheckpointHeightResponse { - // Earliest btc block number containing given raw checkpoint - uint64 earliest_btc_block_number = 1; +// QueryBtcCheckpointInfoResponse is response type for the Query/BtcCheckpointInfo RPC method +message QueryBtcCheckpointInfoResponse { + BTCCheckpointInfo info = 1; +} + +// QueryBtcCheckpointsInfoRequest is request type for the Query/BtcCheckpointsInfo RPC method +message QueryBtcCheckpointsInfoRequest { + uint64 start_epoch = 1; + uint64 end_epoch = 2; + + // pagination defines whether to have the pagination in the request + cosmos.base.query.v1beta1.PageRequest pagination = 3; +} + +// QueryBtcCheckpointsInfoResponse is response type for the Query/BtcCheckpointsInfo RPC method +message QueryBtcCheckpointsInfoResponse { + repeated BTCCheckpointInfo info_list = 1; + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 2; } message QueryEpochSubmissionsRequest { diff --git a/proto/babylon/checkpointing/checkpoint.proto b/proto/babylon/checkpointing/checkpoint.proto index e314c5d07..ccbac63c4 100644 --- a/proto/babylon/checkpointing/checkpoint.proto +++ b/proto/babylon/checkpointing/checkpoint.proto @@ -1,6 +1,7 @@ syntax = "proto3"; package babylon.checkpointing.v1; +import "google/protobuf/timestamp.proto"; import "cosmos_proto/cosmos.proto"; import "gogoproto/gogo.proto"; @@ -37,6 +38,9 @@ message RawCheckpointWithMeta { ]; // power_sum defines the accumulated voting power for the checkpoint uint64 power_sum = 4; + // lifecycle defines the lifecycle of this checkpoint, i.e., each state transition and + // the time (in both timestamp and block height) of this transition. + repeated CheckpointStateUpdate lifecycle = 5; } // CkptStatus is the status of a checkpoint. @@ -55,6 +59,17 @@ enum CheckpointStatus { CKPT_STATUS_FINALIZED = 4 [(gogoproto.enumvalue_customname) = "Finalized"]; } +message CheckpointStateUpdate { + option (gogoproto.equal) = true; + + // state defines the event of a state transition towards this state + CheckpointStatus state = 1; + // block_height is the height of the Babylon block that triggers the state update + uint64 block_height = 2; + // block_time is the timestamp in the Babylon block that triggers the state update + google.protobuf.Timestamp block_time = 3 [(gogoproto.stdtime) = true]; +} + // BlsSig wraps the BLS sig with meta data. message BlsSig { option (gogoproto.equal) = false; diff --git a/proto/babylon/checkpointing/query.proto b/proto/babylon/checkpointing/query.proto index 0fb338e3a..3accee792 100644 --- a/proto/babylon/checkpointing/query.proto +++ b/proto/babylon/checkpointing/query.proto @@ -16,21 +16,12 @@ service Query { rpc RawCheckpointList(QueryRawCheckpointListRequest) returns (QueryRawCheckpointListResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/raw_checkpoints/{status}"; } - // RawCheckpointList queries a list of checkpoints starting from a given epoch number to the current epoch number. - rpc RecentRawCheckpointList(QueryRecentRawCheckpointListRequest) returns (QueryRecentRawCheckpointListResponse) { - option (google.api.http).get = "/babylon/checkpointing/v1/recent_raw_checkpoints/{from_epoch_num}"; - } // RawCheckpoint queries a checkpoints at a given epoch number. rpc RawCheckpoint(QueryRawCheckpointRequest) returns (QueryRawCheckpointResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/raw_checkpoint/{epoch_num}"; } - // LatestCheckpoint queries the checkpoint with the highest epoch num. - rpc LatestCheckpoint(QueryLatestCheckpointRequest) returns (QueryLatestCheckpointResponse) { - option (google.api.http).get = "/babylon/checkpointing/v1/latest_checkpoint"; - } - // BlsPublicKeyList queries a list of bls public keys of the validators at a given epoch number. rpc BlsPublicKeyList(QueryBlsPublicKeyListRequest) returns (QueryBlsPublicKeyListResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/bls_public_keys/{epoch_num}"; @@ -46,6 +37,11 @@ service Query { option (google.api.http).get = "/babylon/checkpointing/v1/epochs:status_count"; } + // LastCheckpointWithStatus queries the last checkpoint with a given status or a more matured status + rpc LastCheckpointWithStatus(QueryLastCheckpointWithStatusRequest) returns (QueryLastCheckpointWithStatusResponse) { + option (google.api.http).get = "/babylon/checkpointing/v1/last_raw_checkpoint/{status}"; + } + // Parameters queries the parameters of the module. rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { option (google.api.http).get = "/babylon/checkpointing/v1/params"; @@ -72,26 +68,6 @@ message QueryRawCheckpointListResponse { cosmos.base.query.v1beta1.PageResponse pagination = 2; } -// QueryRecentRawCheckpointListRequest is the request type for the Query/RecentRawCheckpoints -// RPC method. -message QueryRecentRawCheckpointListRequest { - // from_epoch defines the start epoch of the query, which is inclusive - uint64 from_epoch_num = 1; - - // pagination defines an optional pagination for the request. - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryRecentRawCheckpointListResponse is the response type for the Query/RecentRawCheckpoints -// RPC method. -message QueryRecentRawCheckpointListResponse { - // the order is going from the newest to oldest based on the epoch number - repeated RawCheckpointWithMeta raw_checkpoints = 1; - - // pagination defines the pagination in the response. - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - // QueryRawCheckpointRequest is the request type for the Query/RawCheckpoint // RPC method. message QueryRawCheckpointRequest { @@ -105,16 +81,6 @@ message QueryRawCheckpointResponse { RawCheckpointWithMeta raw_checkpoint = 1; } -// QueryLatestCheckpointRequest is the request type for the Query/LatestCheckpoint -// RPC method. -message QueryLatestCheckpointRequest {} - -// QueryLatestCheckpointResponse is the response type for the Query/LatestCheckpoint -// RPC method. -message QueryLatestCheckpointResponse { - RawCheckpointWithMeta latest_checkpoint = 1; -} - // QueryBlsPublicKeyListRequest is the request type for the Query/BlsPublicKeys // RPC method. message QueryBlsPublicKeyListRequest { @@ -161,6 +127,14 @@ message QueryRecentEpochStatusCountResponse { map status_count = 3; } +message QueryLastCheckpointWithStatusRequest { + CheckpointStatus status = 1; +} + +message QueryLastCheckpointWithStatusResponse { + RawCheckpoint raw_checkpoint = 1; +} + // QueryParamsRequest is request type for the Query/Params RPC method. message QueryParamsRequest {} diff --git a/proto/babylon/epoching/v1/query.proto b/proto/babylon/epoching/v1/query.proto index 3333d5676..18fcead52 100644 --- a/proto/babylon/epoching/v1/query.proto +++ b/proto/babylon/epoching/v1/query.proto @@ -21,6 +21,13 @@ service Query { option (google.api.http).get = "/babylon/epoching/v1/epochs/{epoch_num=*}"; } + // EpochsInfo queries the metadata of epochs in a given range, depending on the + // parameters in the pagination request. Th main use case will be querying the + // latest epochs in time order. + rpc EpochsInfo(QueryEpochsInfoRequest) returns (QueryEpochsInfoResponse) { + option (google.api.http).get = "/babylon/epoching/v1/epochs"; + } + // CurrentEpoch queries the current epoch rpc CurrentEpoch(QueryCurrentEpochRequest) returns (QueryCurrentEpochResponse) { option (google.api.http).get = "/babylon/epoching/v1/current_epoch"; @@ -69,6 +76,21 @@ message QueryEpochInfoResponse { babylon.epoching.v1.Epoch epoch = 1; } +message QueryEpochsInfoRequest { + uint64 start_epoch = 1; + uint64 end_epoch = 2; + + // pagination defines whether to have the pagination in the request + cosmos.base.query.v1beta1.PageRequest pagination = 3; +} + +message QueryEpochsInfoResponse { + repeated babylon.epoching.v1.Epoch epochs = 1; + + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + // QueryCurrentEpochRequest is the request type for the Query/CurrentEpoch RPC method message QueryCurrentEpochRequest {} @@ -85,7 +107,7 @@ message QueryEpochMsgsRequest { // epoch_num is the number of epoch of the requested msg queue uint64 epoch_num = 1; - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request cosmos.base.query.v1beta1.PageRequest pagination = 2; } diff --git a/proto/babylon/monitor/genesis.proto b/proto/babylon/monitor/genesis.proto new file mode 100644 index 000000000..fb79098d0 --- /dev/null +++ b/proto/babylon/monitor/genesis.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package babylon.monitor.v1; + +import "gogoproto/gogo.proto"; +import "babylon/monitor/params.proto"; + +option go_package = "github.com/babylonchain/babylon/x/monitor/types"; + +// GenesisState defines the monitor module's genesis state. +message GenesisState { Params params = 1 [ (gogoproto.nullable) = false ]; } diff --git a/proto/babylon/monitor/params.proto b/proto/babylon/monitor/params.proto new file mode 100644 index 000000000..9ffe19c63 --- /dev/null +++ b/proto/babylon/monitor/params.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package babylon.monitor.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/babylonchain/babylon/x/monitor/types"; + +// Params defines the parameters for the module. +message Params { + option (gogoproto.equal) = true; +} diff --git a/proto/babylon/monitor/query.proto b/proto/babylon/monitor/query.proto new file mode 100644 index 000000000..ae2128f31 --- /dev/null +++ b/proto/babylon/monitor/query.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; +package babylon.monitor.v1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "babylon/monitor/params.proto"; + +option go_package = "github.com/babylonchain/babylon/x/monitor/types"; + +// Query defines the gRPC querier service. +service Query { + // Parameters queries the parameters of the module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/babylon/monitor/v1/params"; + } + + // EndedEpochBtcHeight returns the BTC light client height at provided epoch finish + rpc EndedEpochBtcHeight(QueryEndedEpochBtcHeightRequest) returns (QueryEndedEpochBtcHeightResponse) { + option (google.api.http).get = "/babylon/monitor/v1/epochs/{epoch_num}"; + } + + // ReportedCheckpointBtcHeight returns the BTC light client height at which the checkpoint with the given hash is reported back to Babylon + rpc ReportedCheckpointBtcHeight(QueryReportedCheckpointBtcHeightRequest) returns (QueryReportedCheckpointBtcHeightResponse) { + option (google.api.http).get = "/babylon/monitor/v1/checkpoints/{ckpt_hash}"; + } +} + +// QueryParamsRequest is request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is response type for the Query/Params RPC method. +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryEndedEpochBtcHeightRequest { + uint64 epoch_num = 1; +} + +message QueryEndedEpochBtcHeightResponse { + // height of btc light client when epoch ended + uint64 btc_light_client_height = 1; +} + +message QueryReportedCheckpointBtcHeightRequest { + // ckpt_hash is hex encoded byte string of the hash of the checkpoint + string ckpt_hash = 1; +} + +message QueryReportedCheckpointBtcHeightResponse { + // height of btc light client when checkpoint is reported + uint64 btc_light_client_height = 1; +} diff --git a/proto/babylon/zoneconcierge/query.proto b/proto/babylon/zoneconcierge/query.proto index 275c8a112..92d755eb6 100644 --- a/proto/babylon/zoneconcierge/query.proto +++ b/proto/babylon/zoneconcierge/query.proto @@ -3,8 +3,6 @@ package babylon.zoneconcierge.v1; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; -import "tendermint/types/types.proto"; -import "tendermint/crypto/proof.proto"; import "cosmos/base/query/v1beta1/pagination.proto"; import "babylon/btccheckpoint/tx.proto"; import "babylon/btccheckpoint/btccheckpoint.proto"; @@ -22,6 +20,10 @@ service Query { rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/params"; } + // Header queries the CZ header and fork headers at a given height. + rpc Header(QueryHeaderRequest) returns (QueryHeaderResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/chain_info/{chain_id}/header/{height}"; + } // ChainList queries the list of chains that checkpoint to Babylon rpc ChainList(QueryChainListRequest) returns (QueryChainListResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/chains"; @@ -30,10 +32,26 @@ service Query { rpc ChainInfo(QueryChainInfoRequest) returns (QueryChainInfoResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/chain_info/{chain_id}"; } + // EpochChainInfo queries the latest info of a chain in a given epoch of Babylon's view + rpc EpochChainInfo(QueryEpochChainInfoRequest) returns (QueryEpochChainInfoResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/chain_info/{chain_id}/epochs/{epoch_num}"; + } + // ListHeaders queries the headers of a chain in Babylon's view, with pagination support + rpc ListHeaders(QueryListHeadersRequest) returns (QueryListHeadersResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/headers/{chain_id}"; + } + // ListEpochHeaders queries the headers of a chain timestamped in a given epoch of Babylon, with pagination support + rpc ListEpochHeaders(QueryListEpochHeadersRequest) returns (QueryListEpochHeadersResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/headers/{chain_id}/epochs/{epoch_num}"; + } // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs rpc FinalizedChainInfo(QueryFinalizedChainInfoRequest) returns (QueryFinalizedChainInfoResponse) { option (google.api.http).get = "/babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}"; } + // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than the provided CZ height, with proofs + rpc FinalizedChainInfoUntilHeight(QueryFinalizedChainInfoUntilHeightRequest) returns (QueryFinalizedChainInfoUntilHeightResponse) { + option (google.api.http).get = "/babylon/zoneconcierge/v1/finalized_chain_info/{chain_id}/height/{height}"; + } } // QueryParamsRequest is request type for the Query/Params RPC method. @@ -45,12 +63,30 @@ message QueryParamsResponse { Params params = 1 [(gogoproto.nullable) = false]; } +// QueryHeaderRequest is request type for the Query/Header RPC method. +message QueryHeaderRequest { + string chain_id = 1; + uint64 height = 2; +} + +// QueryParamsResponse is response type for the Query/Header RPC method. +message QueryHeaderResponse { + babylon.zoneconcierge.v1.IndexedHeader header = 1; + babylon.zoneconcierge.v1.Forks fork_headers = 2; +} + // QueryChainListRequest is request type for the Query/ChainList RPC method -message QueryChainListRequest {} +message QueryChainListRequest { + // pagination defines whether to have the pagination in the request + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} // QueryChainListResponse is response type for the Query/ChainList RPC method message QueryChainListResponse { + // chain_ids are IDs of the chains in ascending alphabetical order repeated string chain_ids = 1; + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 2; } // QueryChainInfoRequest is request type for the Query/ChainInfo RPC method. @@ -64,6 +100,45 @@ message QueryChainInfoResponse { babylon.zoneconcierge.v1.ChainInfo chain_info = 1; } +// QueryEpochChainInfoRequest is request type for the Query/EpochChainInfo RPC method. +message QueryEpochChainInfoRequest { + uint64 epoch_num = 1; + string chain_id = 2; +} + +// QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo RPC method. +message QueryEpochChainInfoResponse { + // chain_info is the info of the CZ + babylon.zoneconcierge.v1.ChainInfo chain_info = 1; +} + +// QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. +message QueryListHeadersRequest { + string chain_id = 1; + // pagination defines whether to have the pagination in the request + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryListHeadersResponse is response type for the Query/ListHeaders RPC method. +message QueryListHeadersResponse { + // headers is the list of headers + repeated babylon.zoneconcierge.v1.IndexedHeader headers = 1; + // pagination defines the pagination in the response + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryListEpochHeadersRequest is request type for the Query/ListEpochHeaders RPC method. +message QueryListEpochHeadersRequest { + uint64 epoch_num = 1; + string chain_id = 2; +} + +// QueryListEpochHeadersResponse is response type for the Query/ListEpochHeaders RPC method. +message QueryListEpochHeadersResponse { + // headers is the list of headers + repeated babylon.zoneconcierge.v1.IndexedHeader headers = 1; +} + // QueryFinalizedChainInfoRequest is request type for the Query/FinalizedChainInfo RPC method. message QueryFinalizedChainInfoRequest { // chain_id is the ID of the CZ @@ -77,9 +152,6 @@ message QueryFinalizedChainInfoResponse { // finalized_chain_info is the info of the CZ babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 1; - /* - The following fields include metadata related to this chain info - */ // epoch_info is the metadata of the last BTC-finalised epoch babylon.epoching.v1.Epoch epoch_info = 2; // raw_checkpoint is the raw checkpoint of this epoch @@ -87,16 +159,33 @@ message QueryFinalizedChainInfoResponse { // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 4; - /* - The following fields include proofs that attest the chain info is BTC-finalised - */ - // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block - tendermint.types.TxProof proof_tx_in_block = 5; - // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch - tendermint.crypto.Proof proof_header_in_epoch = 6; - // proof_epoch_sealed is the proof that the epoch is sealed - babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 7; - // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger - // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission - repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 8; + // proof is the proof that the chain info is finalized + babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 5; +} + +// QueryFinalizedChainInfoUntilHeightRequest is request type for the Query/FinalizedChainInfoUntilHeight RPC method. +message QueryFinalizedChainInfoUntilHeightRequest { + // chain_id is the ID of the CZ + string chain_id = 1; + // height is the height of the CZ chain + // such that the returned finalised chain info will be no later than this height + uint64 height = 2; + // prove indicates whether the querier wants to get proofs of this timestamp + bool prove = 3; +} + +// QueryFinalizedChainInfoUntilHeightResponse is response type for the Query/FinalizedChainInfoUntilHeight RPC method. +message QueryFinalizedChainInfoUntilHeightResponse { + // finalized_chain_info is the info of the CZ + babylon.zoneconcierge.v1.ChainInfo finalized_chain_info = 1; + + // epoch_info is the metadata of the last BTC-finalised epoch + babylon.epoching.v1.Epoch epoch_info = 2; + // raw_checkpoint is the raw checkpoint of this epoch + babylon.checkpointing.v1.RawCheckpoint raw_checkpoint = 3; + // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch + babylon.btccheckpoint.v1.SubmissionKey btc_submission_key = 4; + + // proof is the proof that the chain info is finalized + babylon.zoneconcierge.v1.ProofFinalizedChainInfo proof = 5; } diff --git a/proto/babylon/zoneconcierge/zoneconcierge.proto b/proto/babylon/zoneconcierge/zoneconcierge.proto index 5d89a43c8..07f219530 100644 --- a/proto/babylon/zoneconcierge/zoneconcierge.proto +++ b/proto/babylon/zoneconcierge/zoneconcierge.proto @@ -1,10 +1,12 @@ syntax = "proto3"; package babylon.zoneconcierge.v1; -import "babylon/checkpointing/bls_key.proto"; -import "babylon/checkpointing/checkpoint.proto"; import "tendermint/types/types.proto"; import "tendermint/crypto/proof.proto"; +import "babylon/btccheckpoint/btccheckpoint.proto"; +import "babylon/checkpointing/bls_key.proto"; +import "babylon/checkpointing/query.proto"; +import "babylon/checkpointing/checkpoint.proto"; option go_package = "github.com/babylonchain/babylon/x/zoneconcierge/types"; @@ -48,10 +50,12 @@ message Forks { message ChainInfo { // chain_id is the ID of the chain string chain_id = 1; - // latest_header is the latest header in the canonical chain of CZ + // latest_header is the latest header in CZ's canonical chain IndexedHeader latest_header = 2; // latest_forks is the latest forks, formed as a series of IndexedHeader (from low to high) Forks latest_forks = 3; + // timestamped_headers_count is the number of timestamped headers in CZ's canonical chain + uint64 timestamped_headers_count = 4; } // ProofEpochSealed is the proof that an epoch is sealed by the sealer header, i.e., the 2nd header of the next epoch @@ -72,3 +76,19 @@ message ProofEpochSealed { // proof_epoch_info is the Merkle proof that the epoch's validator set is committed to `app_hash` of the sealer header tendermint.crypto.ProofOps proof_epoch_val_set = 3; } + +// ProofFinalizedChainInfo is a set of proofs that attest a chain info is BTC-finalised +message ProofFinalizedChainInfo { + /* + The following fields include proofs that attest the chain info is BTC-finalised + */ + // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block + tendermint.types.TxProof proof_tx_in_block = 4; + // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch + tendermint.crypto.Proof proof_header_in_epoch = 5; + // proof_epoch_sealed is the proof that the epoch is sealed + babylon.zoneconcierge.v1.ProofEpochSealed proof_epoch_sealed = 6; + // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger + // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission + repeated babylon.btccheckpoint.v1.TransactionInfo proof_epoch_submitted = 7; +} diff --git a/proto/scripts/protocgen.sh b/proto/scripts/protocgen.sh index d4ff67a7b..84e793d13 100755 --- a/proto/scripts/protocgen.sh +++ b/proto/scripts/protocgen.sh @@ -24,4 +24,4 @@ cd .. cp -r github.com/babylonchain/babylon/* ./ rm -rf github.com -go mod tidy -compat=1.18 +go mod tidy -compat=1.19 diff --git a/test/e2e/configurer/chain/chain.go b/test/e2e/configurer/chain/chain.go index 8b792f597..fd0d5d437 100644 --- a/test/e2e/configurer/chain/chain.go +++ b/test/e2e/configurer/chain/chain.go @@ -151,7 +151,7 @@ func (c *Config) SendIBC(dstChain *Config, recipient string, token sdk.Coin) { // The default node is the first one created. Returns error if no // ndoes created. func (c *Config) GetDefaultNode() (*NodeConfig, error) { - return c.getNodeAtIndex(defaultNodeIndex) + return c.GetNodeAtIndex(defaultNodeIndex) } // GetPersistentPeers returns persistent peers from every node @@ -164,7 +164,7 @@ func (c *Config) GetPersistentPeers() []string { return peers } -func (c *Config) getNodeAtIndex(nodeIndex int) (*NodeConfig, error) { +func (c *Config) GetNodeAtIndex(nodeIndex int) (*NodeConfig, error) { if nodeIndex > len(c.NodeConfigs) { return nil, fmt.Errorf("node index (%d) is greter than the number of nodes available (%d)", nodeIndex, len(c.NodeConfigs)) } diff --git a/test/e2e/configurer/chain/commands.go b/test/e2e/configurer/chain/commands.go index 87e27f02c..c55d6f843 100644 --- a/test/e2e/configurer/chain/commands.go +++ b/test/e2e/configurer/chain/commands.go @@ -1,9 +1,22 @@ package chain import ( + "encoding/hex" "encoding/json" "fmt" + btccheckpointtypes "github.com/babylonchain/babylon/x/btccheckpoint/types" + cttypes "github.com/babylonchain/babylon/x/checkpointing/types" + "github.com/cosmos/cosmos-sdk/types/bech32" + + txformat "github.com/babylonchain/babylon/btctxformatter" + bbn "github.com/babylonchain/babylon/types" + + "github.com/babylonchain/babylon/test/e2e/initialization" + "github.com/babylonchain/babylon/test/e2e/util" + "github.com/babylonchain/babylon/testutil/datagen" + blc "github.com/babylonchain/babylon/x/btclightclient/types" + "github.com/stretchr/testify/require" ) @@ -38,3 +51,114 @@ func (n *NodeConfig) BankSend(amount string, sendAddress string, receiveAddress require.NoError(n.t, err) n.LogActionF("successfully sent bank sent %s from address %s to %s", amount, sendAddress, receiveAddress) } + +func (n *NodeConfig) SendHeaderHex(headerHex string) { + n.LogActionF("btclightclient sending header %s", headerHex) + cmd := []string{"./babylond", "tx", "btclightclient", "insert-header", headerHex, "--from=val", "--gas=500000"} + _, _, err := n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) + require.NoError(n.t, err) + n.LogActionF("successfully inserted header %s", headerHex) +} + +func (n *NodeConfig) InsertNewEmptyBtcHeader() *blc.BTCHeaderInfo { + tip, err := n.QueryTip() + require.NoError(n.t, err) + n.t.Logf("Retrieved current tip of btc headerchain. Height: %d", tip.Height) + child := datagen.GenRandomValidBTCHeaderInfoWithParent(*tip) + n.SendHeaderHex(child.Header.MarshalHex()) + n.WaitUntilBtcHeight(tip.Height + 1) + return child +} + +func (n *NodeConfig) InsertHeader(h *bbn.BTCHeaderBytes) { + tip, err := n.QueryTip() + require.NoError(n.t, err) + n.t.Logf("Retrieved current tip of btc headerchain. Height: %d", tip.Height) + n.SendHeaderHex(h.MarshalHex()) + n.WaitUntilBtcHeight(tip.Height + 1) +} + +func (n *NodeConfig) InsertProofs(p1 *btccheckpointtypes.BTCSpvProof, p2 *btccheckpointtypes.BTCSpvProof) { + n.LogActionF("btccheckpoint sending proofs") + + p1bytes, err := util.Cdc.Marshal(p1) + require.NoError(n.t, err) + p2bytes, err := util.Cdc.Marshal(p2) + require.NoError(n.t, err) + + p1HexBytes := hex.EncodeToString(p1bytes) + p2HexBytes := hex.EncodeToString(p2bytes) + + cmd := []string{"./babylond", "tx", "btccheckpoint", "insert-proofs", p1HexBytes, p2HexBytes, "--from=val"} + _, _, err = n.containerManager.ExecTxCmd(n.t, n.chainId, n.Name, cmd) + require.NoError(n.t, err) + n.LogActionF("successfully inserted btc spv proofs") +} + +func (n *NodeConfig) FinalizeSealedEpochs(startingEpoch uint64, lastEpoch uint64) { + n.LogActionF("start finalizing epoch starting from %d", startingEpoch) + + madeProgress := false + currEpoch := startingEpoch + for { + if currEpoch > lastEpoch { + break + } + + checkpoint, err := n.QueryCheckpointForEpoch(currEpoch) + + require.NoError(n.t, err) + + // can only finalize sealed checkpoints + if checkpoint.Status != cttypes.Sealed { + return + } + + currentBtcTip, err := n.QueryTip() + + require.NoError(n.t, err) + + _, c, err := bech32.DecodeAndConvert(n.PublicAddress) + + require.NoError(n.t, err) + + btcCheckpoint, err := cttypes.FromRawCkptToBTCCkpt(checkpoint.Ckpt, c) + + require.NoError(n.t, err) + + p1, p2, err := txformat.EncodeCheckpointData( + txformat.BabylonTag(initialization.BabylonOpReturnTag), + txformat.CurrentVersion, + btcCheckpoint, + ) + + require.NoError(n.t, err) + + opReturn1 := datagen.CreateBlockWithTransaction(currentBtcTip.Header.ToBlockHeader(), p1) + + opReturn2 := datagen.CreateBlockWithTransaction(opReturn1.HeaderBytes.ToBlockHeader(), p2) + + n.InsertHeader(&opReturn1.HeaderBytes) + n.InsertHeader(&opReturn2.HeaderBytes) + n.InsertProofs(opReturn1.SpvProof, opReturn2.SpvProof) + + n.WaitForCondition(func() bool { + ckpt, err := n.QueryCheckpointForEpoch(currEpoch) + require.NoError(n.t, err) + return ckpt.Status == cttypes.Submitted + }, "Checkpoint should be submitted ") + + madeProgress = true + currEpoch++ + } + + if madeProgress { + // we made progress in above loop, which means the last header of btc chain is + // valid op return header, by finalizing it, we will also finalize all older + // checkpoints + + for i := 0; i < initialization.BabylonBtcFinalizationPeriod; i++ { + n.InsertNewEmptyBtcHeader() + } + } +} diff --git a/test/e2e/configurer/chain/node.go b/test/e2e/configurer/chain/node.go index a98e3989c..9f133091a 100644 --- a/test/e2e/configurer/chain/node.go +++ b/test/e2e/configurer/chain/node.go @@ -111,6 +111,34 @@ func (n *NodeConfig) WaitUntil(doneCondition func(syncInfo coretypes.SyncInfo) b n.t.Errorf("node %s timed out waiting for condition, latest block height was %d", n.Name, latestBlockHeight) } +func (n *NodeConfig) LatestBlockNumber() uint64 { + status, err := n.rpcClient.Status(context.Background()) + require.NoError(n.t, err) + return uint64(status.SyncInfo.LatestBlockHeight) +} + +func (n *NodeConfig) WaitForCondition(doneCondition func() bool, errormsg string) { + for i := 0; i < waitUntilrepeatMax; i++ { + if !doneCondition() { + time.Sleep(waitUntilRepeatPauseTime) + continue + } + return + } + n.t.Errorf("node %s timed out waiting for condition. Msg: %s", n.Name, errormsg) +} + +func (n *NodeConfig) WaitUntilBtcHeight(height uint64) { + var latestBlockHeight uint64 + n.WaitForCondition(func() bool { + btcTip, err := n.QueryTip() + require.NoError(n.t, err) + latestBlockHeight = btcTip.Height + + return latestBlockHeight >= height + }, fmt.Sprintf("Timed out waiting for btc height %d", height)) +} + func (n *NodeConfig) extractOperatorAddressIfValidator() error { if !n.IsValidator { n.t.Logf("node (%s) is not a validator, skipping", n.Name) diff --git a/test/e2e/configurer/chain/queries.go b/test/e2e/configurer/chain/queries.go index ab869a686..3144ca161 100644 --- a/test/e2e/configurer/chain/queries.go +++ b/test/e2e/configurer/chain/queries.go @@ -16,6 +16,11 @@ import ( tmabcitypes "github.com/tendermint/tendermint/abci/types" "github.com/babylonchain/babylon/test/e2e/util" + blc "github.com/babylonchain/babylon/x/btclightclient/types" + ct "github.com/babylonchain/babylon/x/checkpointing/types" + etypes "github.com/babylonchain/babylon/x/epoching/types" + mtypes "github.com/babylonchain/babylon/x/monitor/types" + zctypes "github.com/babylonchain/babylon/x/zoneconcierge/types" ) func (n *NodeConfig) QueryGRPCGateway(path string, parameters ...string) ([]byte, error) { @@ -129,3 +134,120 @@ func (n *NodeConfig) QueryListSnapshots() ([]*tmabcitypes.Snapshot, error) { return listSnapshots.Snapshots, nil } + +// func (n *NodeConfig) QueryContractsFromId(codeId int) ([]string, error) { +// path := fmt.Sprintf("/cosmwasm/wasm/v1/code/%d/contracts", codeId) +// bz, err := n.QueryGRPCGateway(path) + +// require.NoError(n.t, err) + +// var contractsResponse wasmtypes.QueryContractsByCodeResponse +// if err := util.Cdc.UnmarshalJSON(bz, &contractsResponse); err != nil { +// return nil, err +// } + +// return contractsResponse.Contracts, nil +// } + +func (n *NodeConfig) QueryCheckpointForEpoch(epoch uint64) (*ct.RawCheckpointWithMeta, error) { + path := fmt.Sprintf("babylon/checkpointing/v1/raw_checkpoint/%d", epoch) + bz, err := n.QueryGRPCGateway(path) + require.NoError(n.t, err) + + var checkpointingResponse ct.QueryRawCheckpointResponse + if err := util.Cdc.UnmarshalJSON(bz, &checkpointingResponse); err != nil { + return nil, err + } + + return checkpointingResponse.RawCheckpoint, nil +} + +func (n *NodeConfig) QueryBtcBaseHeader() (*blc.BTCHeaderInfo, error) { + bz, err := n.QueryGRPCGateway("babylon/btclightclient/v1/baseheader") + require.NoError(n.t, err) + + var blcResponse blc.QueryBaseHeaderResponse + if err := util.Cdc.UnmarshalJSON(bz, &blcResponse); err != nil { + return nil, err + } + + return blcResponse.Header, nil +} + +func (n *NodeConfig) QueryTip() (*blc.BTCHeaderInfo, error) { + bz, err := n.QueryGRPCGateway("babylon/btclightclient/v1/tip") + require.NoError(n.t, err) + + var blcResponse blc.QueryTipResponse + if err := util.Cdc.UnmarshalJSON(bz, &blcResponse); err != nil { + return nil, err + } + + return blcResponse.Header, nil +} + +func (n *NodeConfig) QueryFinalizedChainInfo(chainId string) (*zctypes.QueryFinalizedChainInfoResponse, error) { + finalizedPath := fmt.Sprintf("babylon/zoneconcierge/v1/finalized_chain_info/%s", chainId) + bz, err := n.QueryGRPCGateway(finalizedPath) + require.NoError(n.t, err) + + var finalizedResponse zctypes.QueryFinalizedChainInfoResponse + if err := util.Cdc.UnmarshalJSON(bz, &finalizedResponse); err != nil { + return nil, err + } + + return &finalizedResponse, nil +} + +func (n *NodeConfig) QueryCheckpointChains() (*[]string, error) { + bz, err := n.QueryGRPCGateway("babylon/zoneconcierge/v1/chains") + require.NoError(n.t, err) + var chainsResponse zctypes.QueryChainListResponse + if err := util.Cdc.UnmarshalJSON(bz, &chainsResponse); err != nil { + return nil, err + } + return &chainsResponse.ChainIds, nil +} + +func (n *NodeConfig) QueryCheckpointChainInfo(chainId string) (*zctypes.ChainInfo, error) { + infoPath := fmt.Sprintf("/babylon/zoneconcierge/v1/chain_info/%s", chainId) + bz, err := n.QueryGRPCGateway(infoPath) + require.NoError(n.t, err) + var infoResponse zctypes.QueryChainInfoResponse + if err := util.Cdc.UnmarshalJSON(bz, &infoResponse); err != nil { + return nil, err + } + return infoResponse.ChainInfo, nil +} + +func (n *NodeConfig) QueryCurrentEpoch() (uint64, error) { + bz, err := n.QueryGRPCGateway("/babylon/epoching/v1/current_epoch") + require.NoError(n.t, err) + var epochResponse etypes.QueryCurrentEpochResponse + if err := util.Cdc.UnmarshalJSON(bz, &epochResponse); err != nil { + return 0, err + } + return epochResponse.CurrentEpoch, nil +} + +func (n *NodeConfig) QueryLightClientHeightEpochEnd(epoch uint64) (uint64, error) { + monitorPath := fmt.Sprintf("/babylon/monitor/v1/epochs/%d", epoch) + bz, err := n.QueryGRPCGateway(monitorPath) + require.NoError(n.t, err) + var mResponse mtypes.QueryEndedEpochBtcHeightResponse + if err := util.Cdc.UnmarshalJSON(bz, &mResponse); err != nil { + return 0, err + } + return mResponse.BtcLightClientHeight, nil +} + +func (n *NodeConfig) QueryLightClientHeightCheckpointReported(ckptHash []byte) (uint64, error) { + monitorPath := fmt.Sprintf("/babylon/monitor/v1/checkpoints/%x", ckptHash) + bz, err := n.QueryGRPCGateway(monitorPath) + require.NoError(n.t, err) + var mResponse mtypes.QueryReportedCheckpointBtcHeightResponse + if err := util.Cdc.UnmarshalJSON(bz, &mResponse); err != nil { + return 0, err + } + return mResponse.BtcLightClientHeight, nil +} diff --git a/test/e2e/containers/containers.go b/test/e2e/containers/containers.go index 622326f67..e97fba0db 100644 --- a/test/e2e/containers/containers.go +++ b/test/e2e/containers/containers.go @@ -61,7 +61,7 @@ func (m *Manager) ExecTxCmd(t *testing.T, chainId string, containerName string, // namely adding flags `--chain-id={chain-id} -b=block --yes --keyring-backend=test "--log_format=json"`, // and searching for `successStr` func (m *Manager) ExecTxCmdWithSuccessString(t *testing.T, chainId string, containerName string, command []string, successStr string) (bytes.Buffer, bytes.Buffer, error) { - allTxArgs := []string{fmt.Sprintf("--chain-id=%s", chainId), "-b=block", "--yes", "--keyring-backend=test", "--log_format=json"} + allTxArgs := []string{fmt.Sprintf("--chain-id=%s", chainId), "-b=block", "--yes", "--keyring-backend=test", "--log_format=json", "--home=/babylondata"} txCommand := append(command, allTxArgs...) return m.ExecCmd(t, containerName, txCommand, successStr) } @@ -206,8 +206,11 @@ func (m *Manager) RunNodeResource(chainId string, containerName, valCondifDir st NetworkID: m.network.Network.ID, User: "root:root", Cmd: []string{"start"}, + Env: []string{ + "HOME=/babylondata", + }, Mounts: []string{ - fmt.Sprintf("%s/:/data/node0/babylond", valCondifDir), + fmt.Sprintf("%s/:/babylondata", valCondifDir), }, } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index d9473d884..182430580 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -3,6 +3,13 @@ package e2e +import ( + "fmt" + + "github.com/babylonchain/babylon/test/e2e/initialization" + ct "github.com/babylonchain/babylon/x/checkpointing/types" +) + // Most simple test, just checking that two chains are up and connected through // ibc func (s *IntegrationTestSuite) TestConnectIbc() { @@ -13,3 +20,44 @@ func (s *IntegrationTestSuite) TestConnectIbc() { _, err = chainB.GetDefaultNode() s.NoError(err) } + +func (s *IntegrationTestSuite) TestIbcCheckpointing() { + chainA := s.configurer.GetChainConfig(0) + + chainA.WaitUntilHeight(25) + + nonValidatorNode, err := chainA.GetNodeAtIndex(2) + s.NoError(err) + + // Finalize epoch 1 and 2, as first headers of opposing chain are in epoch 2 + nonValidatorNode.FinalizeSealedEpochs(1, 2) + + epoch2, err := nonValidatorNode.QueryCheckpointForEpoch(2) + s.NoError(err) + + if epoch2.Status != ct.Finalized { + s.FailNow("Epoch 2 should be finalized") + } + + // Check we have finalized epoch info for opposing chain and some basic assertions + fininfo, err := nonValidatorNode.QueryFinalizedChainInfo(initialization.ChainBID) + s.NoError(err) + // TODO Add more assertion here. Maybe check proofs ? + s.Equal(fininfo.FinalizedChainInfo.ChainId, initialization.ChainBID) + s.Equal(fininfo.EpochInfo.EpochNumber, uint64(2)) + + currEpoch, err := nonValidatorNode.QueryCurrentEpoch() + s.NoError(err) + + heightAtEndedEpoch, err := nonValidatorNode.QueryLightClientHeightEpochEnd(currEpoch - 1) + s.NoError(err) + + if heightAtEndedEpoch == 0 { + // we can only assert, that btc lc height is larger than 0. + s.FailNow(fmt.Sprintf("Light client height should be > 0 on epoch %d", currEpoch-1)) + } + + chainB := s.configurer.GetChainConfig(1) + _, err = chainB.GetDefaultNode() + s.NoError(err) +} diff --git a/test/e2e/initialization/config.go b/test/e2e/initialization/config.go index 7ed392896..77b69f11d 100644 --- a/test/e2e/initialization/config.go +++ b/test/e2e/initialization/config.go @@ -7,6 +7,9 @@ import ( "time" "github.com/babylonchain/babylon/privval" + bbn "github.com/babylonchain/babylon/types" + btccheckpointtypes "github.com/babylonchain/babylon/x/btccheckpoint/types" + blctypes "github.com/babylonchain/babylon/x/btclightclient/types" checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" ed25519 "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" @@ -40,9 +43,12 @@ type NodeConfig struct { const ( // common - BabylonDenom = "ubbn" - MinGasPrice = "0.000" - ValidatorWalletName = "val" + BabylonDenom = "ubbn" + MinGasPrice = "0.000" + ValidatorWalletName = "val" + BabylonOpReturnTag = "bbni" + BabylonBtcConfirmationPeriod = 2 + BabylonBtcFinalizationPeriod = 4 // chainA ChainAID = "bbn-test-a" BabylonBalanceA = 200000000000 @@ -226,6 +232,16 @@ func initGenesis(chain *internalChain, votingPeriod, expeditedVotingPeriod time. return err } + err = updateModuleGenesis(appGenState, blctypes.ModuleName, blctypes.DefaultGenesis(), updateBtcLightClientGenesis) + if err != nil { + return err + } + + err = updateModuleGenesis(appGenState, btccheckpointtypes.ModuleName, btccheckpointtypes.DefaultGenesis(), updateBtccheckpointGenesis) + if err != nil { + return err + } + bz, err := json.MarshalIndent(appGenState, "", " ") if err != nil { return err @@ -278,6 +294,23 @@ func updateCrisisGenesis(crisisGenState *crisistypes.GenesisState) { crisisGenState.ConstantFee.Denom = BabylonDenom } +func updateBtcLightClientGenesis(blcGenState *blctypes.GenesisState) { + blcGenState.Params = blctypes.DefaultParams() + btcSimnetGenesisHex := "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a45068653ffff7f2002000000" + baseBtcHeader, err := bbn.NewBTCHeaderBytesFromHex(btcSimnetGenesisHex) + if err != nil { + panic(err) + } + work := blctypes.CalcWork(&baseBtcHeader) + blcGenState.BaseBtcHeader = *blctypes.NewBTCHeaderInfo(&baseBtcHeader, baseBtcHeader.Hash(), 0, &work) +} + +func updateBtccheckpointGenesis(btccheckpointGenState *btccheckpointtypes.GenesisState) { + btccheckpointGenState.Params = btccheckpointtypes.DefaultParams() + btccheckpointGenState.Params.BtcConfirmationDepth = BabylonBtcConfirmationPeriod + btccheckpointGenState.Params.CheckpointFinalizationTimeout = BabylonBtcFinalizationPeriod +} + func updateGenUtilGenesis(c *internalChain) func(*genutiltypes.GenesisState) { return func(genUtilGenState *genutiltypes.GenesisState) { // generate genesis txs diff --git a/test/e2e/initialization/node.go b/test/e2e/initialization/node.go index 617f81a71..2d990ed9c 100644 --- a/test/e2e/initialization/node.go +++ b/test/e2e/initialization/node.go @@ -9,9 +9,8 @@ import ( "strings" "github.com/babylonchain/babylon/crypto/bls12381" - tmed25519 "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/babylonchain/babylon/privval" + bbn "github.com/babylonchain/babylon/types" sdkcrypto "github.com/cosmos/cosmos-sdk/crypto" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" "github.com/cosmos/cosmos-sdk/crypto/hd" @@ -28,6 +27,7 @@ import ( "github.com/cosmos/go-bip39" "github.com/spf13/viper" tmconfig "github.com/tendermint/tendermint/config" + tmed25519 "github.com/tendermint/tendermint/crypto/ed25519" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/p2p" tmtypes "github.com/tendermint/tendermint/types" @@ -129,6 +129,8 @@ func (n *internalNode) createAppConfig(nodeConfig *NodeConfig) { appConfig.StateSync.SnapshotInterval = nodeConfig.SnapshotInterval appConfig.StateSync.SnapshotKeepRecent = nodeConfig.SnapshotKeepRecent appConfig.SignerConfig.KeyName = ValidatorWalletName + appConfig.BtcConfig.Network = string(bbn.BtcSimnet) + appConfig.BtcConfig.CheckpointTag = BabylonOpReturnTag customTemplate := cmd.DefaultBabylonTemplate() diff --git a/testutil/datagen/btc_header_info.go b/testutil/datagen/btc_header_info.go index 137c25c37..2a705ca98 100644 --- a/testutil/datagen/btc_header_info.go +++ b/testutil/datagen/btc_header_info.go @@ -140,6 +140,33 @@ func GenRandomBTCHeaderInfoWithParent(parent *btclightclienttypes.BTCHeaderInfo) return GenRandomBTCHeaderInfoWithParentAndBits(parent, nil) } +// GenRandomValidBTCHeaderInfoWithParent generates random BTCHeaderInfo object +// with valid proof of work. +// WARNING: if parent is from network with a lot of work (mainnet) it may never finish +// use only with simnet headers +func GenRandomValidBTCHeaderInfoWithParent(parent btclightclienttypes.BTCHeaderInfo) *btclightclienttypes.BTCHeaderInfo { + randHeader := GenRandomBtcdHeader() + parentHeader := parent.Header.ToBlockHeader() + + randHeader.Version = parentHeader.Version + randHeader.PrevBlock = parentHeader.BlockHash() + randHeader.Bits = parentHeader.Bits + randHeader.Timestamp = parentHeader.Timestamp.Add(50 * time.Second) + SolveBlock(randHeader) + + headerBytes := bbn.NewBTCHeaderBytesFromBlockHeader(randHeader) + + accumulatedWork := btclightclienttypes.CalcWork(&headerBytes) + accumulatedWork = btclightclienttypes.CumulativeWork(accumulatedWork, *parent.Work) + + return &btclightclienttypes.BTCHeaderInfo{ + Header: &headerBytes, + Hash: headerBytes.Hash(), + Height: parent.Height + 1, + Work: &accumulatedWork, + } +} + func GenRandomBTCHeaderInfoWithBits(bits *sdk.Uint) *btclightclienttypes.BTCHeaderInfo { return GenRandomBTCHeaderInfoWithParentAndBits(nil, bits) } diff --git a/testutil/datagen/btc_transaction.go b/testutil/datagen/btc_transaction.go index b3ea03b1f..19496ba06 100644 --- a/testutil/datagen/btc_transaction.go +++ b/testutil/datagen/btc_transaction.go @@ -501,7 +501,7 @@ func getExpectedOpReturn(tag txformat.BabylonTag, f []byte, s []byte) []byte { return connected } -func RandomRawCheckpointDataForEpoch(e uint64) *TestRawCheckpointData { +func RandomRawCheckpointDataForEpoch(e uint64) (*TestRawCheckpointData, *txformat.RawBtcCheckpoint) { checkpointData := getRandomCheckpointDataForEpoch(e) rawBTCCkpt := &txformat.RawBtcCheckpoint{ Epoch: checkpointData.epoch, @@ -510,7 +510,7 @@ func RandomRawCheckpointDataForEpoch(e uint64) *TestRawCheckpointData { SubmitterAddress: checkpointData.submitterAddress, BlsSig: checkpointData.blsSig, } - return EncodeRawCkptToTestData(rawBTCCkpt) + return EncodeRawCkptToTestData(rawBTCCkpt), rawBTCCkpt } func EncodeRawCkptToTestData(rawBTCCkpt *txformat.RawBtcCheckpoint) *TestRawCheckpointData { @@ -545,7 +545,7 @@ func GenerateMessageWithRandomSubmitterForEpoch(epoch uint64) *btcctypes.MsgInse tx2 := numInRange(1, 99) // in those tests epoch is not important - raw := RandomRawCheckpointDataForEpoch(epoch) + raw, _ := RandomRawCheckpointDataForEpoch(epoch) blck1 := CreateBlock(0, uint32(numTransactions), uint32(tx1), raw.FirstPart) diff --git a/testutil/datagen/raw_checkpoint.go b/testutil/datagen/raw_checkpoint.go index 8707ac35f..a798171c5 100644 --- a/testutil/datagen/raw_checkpoint.go +++ b/testutil/datagen/raw_checkpoint.go @@ -82,6 +82,16 @@ func GenRandomSequenceRawCheckpointsWithMeta() []*types.RawCheckpointWithMeta { return checkpoints } +func GenSequenceRawCheckpointsWithMeta(tipEpoch uint64) []*types.RawCheckpointWithMeta { + ckpts := make([]*types.RawCheckpointWithMeta, int(tipEpoch)+1) + for e := uint64(0); e <= tipEpoch; e++ { + ckpt := GenRandomRawCheckpointWithMeta() + ckpt.Ckpt.EpochNum = e + ckpts[int(e)] = ckpt + } + return ckpts +} + func GenerateBLSSigs(keys []bls12381.PrivateKey, msg []byte) []bls12381.Signature { var sigs []bls12381.Signature for _, privkey := range keys { diff --git a/testutil/datagen/tendermint.go b/testutil/datagen/tendermint.go index 722b2b5d5..0a1193d31 100644 --- a/testutil/datagen/tendermint.go +++ b/testutil/datagen/tendermint.go @@ -1,6 +1,8 @@ package datagen import ( + "time" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -9,6 +11,7 @@ func GenRandomTMHeader(chainID string, height uint64) *tmproto.Header { return &tmproto.Header{ ChainID: chainID, Height: int64(height), + Time: time.Now(), LastCommitHash: GenRandomByteArray(32), } } diff --git a/x/btccheckpoint/client/cli/query.go b/x/btccheckpoint/client/cli/query.go index 4ecbd7963..73696536e 100644 --- a/x/btccheckpoint/client/cli/query.go +++ b/x/btccheckpoint/client/cli/query.go @@ -26,36 +26,36 @@ func GetQueryCmd(queryRoute string) *cobra.Command { cmd.AddCommand(CmdQueryParams()) - cmd.AddCommand(CmdBtcCheckpointHeight()) + cmd.AddCommand(CmdBtcCheckpointHeightAndHash()) cmd.AddCommand(CmdEpochSubmissions()) return cmd } -func CmdBtcCheckpointHeight() *cobra.Command { +func CmdBtcCheckpointHeightAndHash() *cobra.Command { cmd := &cobra.Command{ - Use: "btc-height ", - Short: "retrieve earliest btc height for given epoch", + Use: "btc-height-hash ", + Short: "retrieve earliest btc height and hash for given epoch", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { clientCtx := client.GetClientContextFromCmd(cmd) queryClient := types.NewQueryClient(clientCtx) - epoch_num, err := strconv.ParseUint(args[0], 10, 64) + epochNum, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err } - params := types.QueryBtcCheckpointHeightRequest{EpochNum: epoch_num} + req := types.QueryBtcCheckpointInfoRequest{EpochNum: epochNum} - res, err := queryClient.BtcCheckpointHeight(context.Background(), ¶ms) + resp, err := queryClient.BtcCheckpointInfo(context.Background(), &req) if err != nil { return err } - return clientCtx.PrintProto(res) + return clientCtx.PrintProto(resp) }, } @@ -65,7 +65,7 @@ func CmdBtcCheckpointHeight() *cobra.Command { func CmdEpochSubmissions() *cobra.Command { cmd := &cobra.Command{ - Use: "epoch-submissions ", + Use: "epoch-submissions ", Short: "all checkpoint submissions for given epoch", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -73,7 +73,7 @@ func CmdEpochSubmissions() *cobra.Command { queryClient := types.NewQueryClient(clientCtx) - epoch_num, err := strconv.ParseUint(args[0], 10, 64) + epochNum, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err @@ -84,7 +84,7 @@ func CmdEpochSubmissions() *cobra.Command { return err } - params := types.QueryEpochSubmissionsRequest{EpochNum: epoch_num, Pagination: pageReq} + params := types.QueryEpochSubmissionsRequest{EpochNum: epochNum, Pagination: pageReq} res, err := queryClient.EpochSubmissions(context.Background(), ¶ms) diff --git a/x/btccheckpoint/client/cli/tx.go b/x/btccheckpoint/client/cli/tx.go index c6358bc6b..752ae5679 100644 --- a/x/btccheckpoint/client/cli/tx.go +++ b/x/btccheckpoint/client/cli/tx.go @@ -7,6 +7,9 @@ import ( "github.com/spf13/cobra" "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + // "github.com/cosmos/cosmos-sdk/client/flags" "github.com/babylonchain/babylon/x/btccheckpoint/types" ) @@ -31,5 +34,47 @@ func GetTxCmd() *cobra.Command { RunE: client.ValidateCmd, } + cmd.AddCommand(CmdTxInsertSpvProofs()) + + return cmd +} + +// TODO this api is not super friendly i.e it is not easy to provide hex encoded +// proto serialized blobs. It would be good to have version which takes some +// other format like json or maybe path to file +func CmdTxInsertSpvProofs() *cobra.Command { + cmd := &cobra.Command{ + Use: "insert-proofs [proof-hex-string] [proof-hex-string]", + Short: "submit proof bytes", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + proof1, err := types.NewSpvProofFromHexBytes(clientCtx.Codec, args[0]) + + if err != nil { + return err + } + + proof2, err := types.NewSpvProofFromHexBytes(clientCtx.Codec, args[1]) + + if err != nil { + return err + } + + msg := &types.MsgInsertBTCSpvProof{ + Submitter: clientCtx.GetFromAddress().String(), + Proofs: []*types.BTCSpvProof{proof1, proof2}, + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + return cmd } diff --git a/x/btccheckpoint/keeper/grpc_query.go b/x/btccheckpoint/keeper/grpc_query.go index 2648777c0..3b1834d51 100644 --- a/x/btccheckpoint/keeper/grpc_query.go +++ b/x/btccheckpoint/keeper/grpc_query.go @@ -3,9 +3,11 @@ package keeper import ( "context" "errors" + "fmt" "math" "github.com/babylonchain/babylon/x/btccheckpoint/types" + "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/query" "google.golang.org/grpc/codes" @@ -14,14 +16,15 @@ import ( var _ types.QueryServer = Keeper{} -func (k Keeper) lowestBtcHeight(ctx sdk.Context, subKey *types.SubmissionKey) (uint64, error) { - // initializing to max, as then every header number will be smaller +func (k Keeper) lowestBtcHeightAndHash(ctx sdk.Context, subKey *types.SubmissionKey) (uint64, []byte, error) { + // initializing to max, as then every header height will be smaller var lowestHeaderNumber uint64 = math.MaxUint64 + var lowestHeaderHash []byte for _, tk := range subKey.Key { if !k.CheckHeaderIsOnMainChain(ctx, tk.Hash) { - return 0, errors.New("one of submission headers not on main chain") + return 0, nil, errors.New("one of submission headers not on main chain") } headerNumber, err := k.GetBlockHeight(ctx, tk.Hash) @@ -35,13 +38,56 @@ func (k Keeper) lowestBtcHeight(ctx sdk.Context, subKey *types.SubmissionKey) (u if headerNumber < lowestHeaderNumber { lowestHeaderNumber = headerNumber + lowestHeaderHash = *tk.Hash } } - return lowestHeaderNumber, nil + return lowestHeaderNumber, lowestHeaderHash, nil } -func (k Keeper) BtcCheckpointHeight(c context.Context, req *types.QueryBtcCheckpointHeightRequest) (*types.QueryBtcCheckpointHeightResponse, error) { +func (k Keeper) getCheckpointInfo(ctx sdk.Context, epochNum uint64, subKeys []*types.SubmissionKey) (*types.BTCCheckpointInfo, error) { + if len(subKeys) == 0 { + return nil, errors.New("empty subKeys") + } + + info := types.BTCCheckpointInfo{ + EpochNumber: epochNum, + EarliestBtcBlockNumber: math.MaxUint64, // initializing to max, as then every header height will be smaller + VigilanteAddressList: []*types.CheckpointAddresses{}, + } + + for _, subKey := range subKeys { + headerNumber, headerHash, err := k.lowestBtcHeightAndHash(ctx, subKey) + if err != nil { + // submission is not valid for some reason, ignore it + continue + } + + // get vigilante address + sd := k.GetSubmissionData(ctx, *subKey) + if sd == nil { + // submission is not valid for some reason, ignore it + continue + } + + // ensure lowest header number and hash + if headerNumber < info.EarliestBtcBlockNumber { + info.EarliestBtcBlockNumber = headerNumber + info.EarliestBtcBlockHash = headerHash + } + // append vigilante addresses + vAddrs := *sd.VigilanteAddresses // make a new copy + info.VigilanteAddressList = append(info.VigilanteAddressList, &vAddrs) + } + + if info.EarliestBtcBlockNumber == math.MaxUint64 { + return nil, errors.New("there is no valid submission for given raw checkpoint") + } + + return &info, nil +} + +func (k Keeper) BtcCheckpointInfo(c context.Context, req *types.QueryBtcCheckpointInfoRequest) (*types.QueryBtcCheckpointInfoResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") } @@ -57,28 +103,71 @@ func (k Keeper) BtcCheckpointHeight(c context.Context, req *types.QueryBtcCheckp return nil, errors.New("checkpoint for given epoch not yet submitted") } - var lowestHeaderNumber uint64 = math.MaxUint64 + ckptInfo, err := k.getCheckpointInfo(ctx, checkpointEpoch, epochData.Key) + if err != nil { + return nil, fmt.Errorf("failed to get lowest BTC height and hash in keys of epoch %d: %w", req.EpochNum, err) + } - // we need to go for each submission in given epoch - for _, submissionKey := range epochData.Key { + resp := &types.QueryBtcCheckpointInfoResponse{ + Info: ckptInfo, + } + return resp, nil +} - headerNumber, err := k.lowestBtcHeight(ctx, submissionKey) +func (k Keeper) BtcCheckpointsInfo(c context.Context, req *types.QueryBtcCheckpointsInfoRequest) (*types.QueryBtcCheckpointsInfoResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } - if err != nil { - // submission is not valid for some reason, ignore it - continue - } + ctx := sdk.UnwrapSDKContext(c) - if headerNumber < lowestHeaderNumber { - lowestHeaderNumber = headerNumber + // parse start_epoch and end_epoch and forward to the pagination request + if req.EndEpoch > 0 { + // this query uses start_epoch and end_epoch to specify range + if req.StartEpoch > req.EndEpoch { + return nil, fmt.Errorf("StartEpoch (%d) should not be larger than EndEpoch (%d)", req.StartEpoch, req.EndEpoch) + } + req.Pagination = &query.PageRequest{ + Key: sdk.Uint64ToBigEndian(req.StartEpoch), + Limit: req.EndEpoch - req.StartEpoch + 1, + Reverse: false, } } - if lowestHeaderNumber == math.MaxUint64 { - return nil, errors.New("there is no valid submission for given raw checkpoint") + store := ctx.KVStore(k.storeKey) + epochDataStore := prefix.NewStore(store, types.EpochDataPrefix) + + ckptInfoList := []*types.BTCCheckpointInfo{} + // iterate over epochDataStore, where key is the epoch number and value is the epoch data + pageRes, err := query.Paginate(epochDataStore, req.Pagination, func(key, value []byte) error { + epochNum := sdk.BigEndianToUint64(key) + var epochData types.EpochData + k.cdc.MustUnmarshal(value, &epochData) + + // Check if we have any submission for given epoch + if len(epochData.Key) == 0 { + return errors.New("checkpoint for given epoch not yet submitted") + } + + ckptInfo, err := k.getCheckpointInfo(ctx, epochNum, epochData.Key) + if err != nil { + return fmt.Errorf("failed to get lowest BTC height and hash in keys of epoch %d: %w", epochNum, err) + } + + // append ckpt info + ckptInfoList = append(ckptInfoList, ckptInfo) + + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - return &types.QueryBtcCheckpointHeightResponse{EarliestBtcBlockNumber: lowestHeaderNumber}, nil + resp := &types.QueryBtcCheckpointsInfoResponse{ + InfoList: ckptInfoList, + Pagination: pageRes, + } + return resp, nil } func getOffset(pageReq *query.PageRequest) uint64 { diff --git a/x/btccheckpoint/keeper/keeper.go b/x/btccheckpoint/keeper/keeper.go index 3c5630121..d49b87b1c 100644 --- a/x/btccheckpoint/keeper/keeper.go +++ b/x/btccheckpoint/keeper/keeper.go @@ -165,20 +165,18 @@ func (k Keeper) GetSubmissionBtcInfo(ctx sdk.Context, sk types.SubmissionKey) (* }, nil } -func (k Keeper) GetCheckpointEpoch(ctx sdk.Context, c []byte) (uint64, error) { - return k.checkpointingKeeper.CheckpointEpoch(ctx, c) -} - func (k Keeper) SubmissionExists(ctx sdk.Context, sk types.SubmissionKey) bool { return k.GetSubmissionData(ctx, sk) != nil } -// Return epoch data for given epoch, if there is not epoch data yet returns nil +// GetEpochData returns epoch data for given epoch, if there is not epoch data yet returns nil func (k Keeper) GetEpochData(ctx sdk.Context, e uint64) *types.EpochData { store := ctx.KVStore(k.storeKey) bytes := store.Get(types.GetEpochIndexKey(e)) - if len(bytes) == 0 { + // note: Cannot check len(bytes) == 0, as empty bytes encoding of types.EpochData + // is epoch data with Status == Submitted and no valid submissions + if bytes == nil { return nil } @@ -187,6 +185,24 @@ func (k Keeper) GetEpochData(ctx sdk.Context, e uint64) *types.EpochData { return ed } +// GetBestSubmission gets the status and the best submission of a given finalized epoch +func (k Keeper) GetBestSubmission(ctx sdk.Context, epochNumber uint64) (types.BtcStatus, *types.SubmissionKey, error) { + // find the btc checkpoint tx index of this epoch + ed := k.GetEpochData(ctx, epochNumber) + if ed == nil { + return 0, nil, types.ErrNoCheckpointsForPreviousEpoch + } + if ed.Status != types.Finalized { + return 0, nil, fmt.Errorf("epoch %d has not been finalized yet", epochNumber) + } + if len(ed.Key) == 0 { + return 0, nil, types.ErrNoCheckpointsForPreviousEpoch + } + bestSubmissionKey := ed.Key[0] // index of checkpoint tx on BTC + + return ed.Status, bestSubmissionKey, nil +} + // checkAncestors checks if there is at least one ancestor in previous epoch submissions // previous epoch submission is considered ancestor when: // - it is on main chain @@ -262,7 +278,6 @@ func (k Keeper) addEpochSubmission( epochNum uint64, sk types.SubmissionKey, sd types.SubmissionData, - epochRawCheckpoint []byte, ) error { ed := k.GetEpochData(ctx, epochNum) @@ -275,7 +290,7 @@ func (k Keeper) addEpochSubmission( // if ed is nil, it means it is our first submission for this epoch if ed == nil { // we do not have any data saved yet - newEd := types.NewEmptyEpochData(epochRawCheckpoint) + newEd := types.NewEmptyEpochData() ed = &newEd } @@ -411,7 +426,6 @@ func (k Keeper) clearEpochData( epoch []byte, epochDataStore prefix.Store, currentEpoch *types.EpochData) { - for _, sk := range currentEpoch.Key { k.deleteSubmission(ctx, *sk) } diff --git a/x/btccheckpoint/keeper/msg_server.go b/x/btccheckpoint/keeper/msg_server.go index 79c37bbe6..69ae6e0d3 100644 --- a/x/btccheckpoint/keeper/msg_server.go +++ b/x/btccheckpoint/keeper/msg_server.go @@ -44,19 +44,22 @@ func (m msgServer) InsertBTCSpvProof(ctx context.Context, req *types.MsgInsertBT return nil, types.ErrInvalidHeader.Wrap(err.Error()) } - rawCheckpointBytes := rawSubmission.GetRawCheckPointBytes() // At this point: // - every proof of inclusion is valid i.e every transaction is proved to be // part of provided block and contains some OP_RETURN data // - header is proved to be part of the chain we know about through BTCLightClient // - this is new checkpoint submission - // Get info about this checkpoints epoch - epochNum, err := m.k.GetCheckpointEpoch(sdkCtx, rawCheckpointBytes) + // Verify if this is expected checkpoint + err = m.k.checkpointingKeeper.VerifyCheckpoint(sdkCtx, rawSubmission.CheckpointData) if err != nil { return nil, err } + // At this point we know this is a valid checkpoint for this epoch as this was validated + // by checkpointing module + epochNum := rawSubmission.CheckpointData.Epoch + err = m.k.checkAncestors(sdkCtx, epochNum, newSubmissionOldestHeaderDepth) if err != nil { @@ -80,7 +83,6 @@ func (m msgServer) InsertBTCSpvProof(ctx context.Context, req *types.MsgInsertBT epochNum, submissionKey, submissionData, - rawCheckpointBytes, ) if err != nil { diff --git a/x/btccheckpoint/keeper/msg_server_test.go b/x/btccheckpoint/keeper/msg_server_test.go index 406bd6044..d473af083 100644 --- a/x/btccheckpoint/keeper/msg_server_test.go +++ b/x/btccheckpoint/keeper/msg_server_test.go @@ -45,11 +45,10 @@ func b2TxIdx(m *btcctypes.MsgInsertBTCSpvProof) uint32 { func InitTestKeepers( t *testing.T, - epoch uint64, ) *TestKeepers { lc := btcctypes.NewMockBTCLightClientKeeper() - cc := btcctypes.NewMockCheckpointingKeeper(epoch) + cc := btcctypes.NewMockCheckpointingKeeper() k, ctx := keepertest.NewBTCCheckpointKeeper(t, lc, cc, chaincfg.SimNetParams.PowLimit) @@ -69,11 +68,7 @@ func (k *TestKeepers) insertProofMsg(msg *btcctypes.MsgInsertBTCSpvProof) (*btcc return k.MsgSrv.InsertBTCSpvProof(k.Ctx, msg) } -func (k *TestKeepers) setEpoch(epoch uint64) { - k.Checkpointing.SetEpoch(epoch) -} - -func (k *TestKeepers) getEpochData(e uint64) *btcctypes.EpochData { +func (k *TestKeepers) GetEpochData(e uint64) *btcctypes.EpochData { return k.BTCCheckpoint.GetEpochData(k.SdkCtx, e) } @@ -88,13 +83,13 @@ func (k *TestKeepers) onTipChange() { func TestRejectDuplicatedSubmission(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -123,12 +118,12 @@ func TestRejectDuplicatedSubmission(t *testing.T) { func TestRejectUnknownToBtcLightClient(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -147,12 +142,12 @@ func TestRejectUnknownToBtcLightClient(t *testing.T) { func TestRejectSubmissionsNotOnMainchain(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -184,12 +179,12 @@ func TestRejectSubmissionsNotOnMainchain(t *testing.T) { func TestSubmitValidNewCheckpoint(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, rawBtcCheckpoint := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) // here we will only have valid unconfirmed submissions - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -201,7 +196,7 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { require.NoErrorf(t, err, "Unexpected message processing error: %v", err) - ed := tk.getEpochData(epoch) + ed := tk.GetEpochData(epoch) if len(ed.Key) == 0 { t.Errorf("There should be at least one key in epoch %d", epoch) @@ -211,10 +206,6 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { t.Errorf("Epoch should be in submitted state after processing message") } - if !bytes.Equal(raw.ExpectedOpReturn, ed.RawCheckpoint) { - t.Errorf("Epoch does not contain expected op return data") - } - submissionKey := ed.Key[0] submissionData := tk.getSubmissionData(*submissionKey) @@ -231,6 +222,10 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { t.Errorf("Submission data with invalid TransactionInfo") } + if !bytes.Equal(rawBtcCheckpoint.SubmitterAddress, submissionData.VigilanteAddresses.Submitter) { + t.Errorf("Submission data does not contain expected submitter address") + } + for i, txInfo := range submissionData.TxsInfo { require.Equal(t, submissionKey.Key[i].Index, txInfo.Key.Index) require.True(t, submissionKey.Key[i].Hash.Eq(txInfo.Key.Hash)) @@ -238,7 +233,7 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { require.Equal(t, msg.Proofs[i].MerkleNodes, txInfo.Proof) } - ed1 := tk.getEpochData(epoch) + ed1 := tk.GetEpochData(epoch) // TODO Add custom equal fo submission key and transaction key to check // it is expected key @@ -249,20 +244,19 @@ func TestSubmitValidNewCheckpoint(t *testing.T) { func TestRejectSubmissionWithoutSubmissionsForPreviousEpoch(t *testing.T) { rand.Seed(time.Now().Unix()) - epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + epoch := uint64(2) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) // here we will only have valid unconfirmed submissions - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) // Now we will return depth enough for moving submission to be submitted tk.BTCLightClient.SetDepth(blck1.HeaderBytes.Hash(), int64(0)) tk.BTCLightClient.SetDepth(blck2.HeaderBytes.Hash(), int64(1)) - tk.Checkpointing.SetEpoch(2) _, err := tk.insertProofMsg(msg) @@ -277,12 +271,12 @@ func TestRejectSubmissionWithoutSubmissionsForPreviousEpoch(t *testing.T) { func TestRejectSubmissionWithoutAncestorsOnMainchainInPreviousEpoch(t *testing.T) { rand.Seed(time.Now().Unix()) epoch := uint64(1) - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) epoch1Block1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) epoch1Block2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) // here we will only have valid unconfirmed submissions - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{epoch1Block1, epoch1Block2}) // Now we will return depth enough for moving submission to be submitted @@ -294,12 +288,9 @@ func TestRejectSubmissionWithoutAncestorsOnMainchainInPreviousEpoch(t *testing.T require.NoErrorf(t, err, "Unexpected message processing error: %v", err) epoch2 := uint64(2) - raw2 := dg.RandomRawCheckpointDataForEpoch(epoch2) + raw2, _ := dg.RandomRawCheckpointDataForEpoch(epoch2) epoch2Block1 := dg.CreateBlock(1, 19, 2, raw2.FirstPart) epoch2Block2 := dg.CreateBlock(2, 14, 7, raw2.SecondPart) - // Submitting checkpoints for epoch 2, there should be at least one submission - // for epoch 1, with headers deeper in chain that in this new submission - tk.Checkpointing.SetEpoch(epoch2) msg2 := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{epoch2Block1, epoch2Block2}) // Both headers are deeper than epoch 1 submission, fail @@ -353,7 +344,7 @@ func TestRejectSubmissionWithoutAncestorsOnMainchainInPreviousEpoch(t *testing.T func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { rand.Seed(time.Now().Unix()) - tk := InitTestKeepers(t, uint64(1)) + tk := InitTestKeepers(t) msg1 := dg.GenerateMessageWithRandomSubmitterForEpoch(1) tk.BTCLightClient.SetDepth(b1Hash(msg1), int64(5)) @@ -367,27 +358,25 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { _, err = tk.insertProofMsg(msg1a) require.NoError(t, err, "failed to insert submission for epoch 1") - tk.setEpoch(2) - msg2 := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg2 := dg.GenerateMessageWithRandomSubmitterForEpoch(2) tk.BTCLightClient.SetDepth(b1Hash(msg2), int64(3)) tk.BTCLightClient.SetDepth(b2Hash(msg2), int64(2)) _, err = tk.insertProofMsg(msg2) require.NoError(t, err, "failed to insert submission for epoch 2") - msg2a := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg2a := dg.GenerateMessageWithRandomSubmitterForEpoch(2) tk.BTCLightClient.SetDepth(b1Hash(msg2a), int64(3)) tk.BTCLightClient.SetDepth(b2Hash(msg2a), int64(2)) _, err = tk.insertProofMsg(msg2a) require.NoError(t, err, "failed to insert submission for epoch 2") - tk.setEpoch(3) - msg3 := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg3 := dg.GenerateMessageWithRandomSubmitterForEpoch(3) tk.BTCLightClient.SetDepth(b1Hash(msg3), int64(1)) tk.BTCLightClient.SetDepth(b2Hash(msg3), int64(0)) _, err = tk.insertProofMsg(msg3) require.NoError(t, err, "failed to insert submission for epoch 3") - msg3a := dg.GenerateMessageWithRandomSubmitterForEpoch(1) + msg3a := dg.GenerateMessageWithRandomSubmitterForEpoch(3) tk.BTCLightClient.SetDepth(b1Hash(msg3a), int64(1)) tk.BTCLightClient.SetDepth(b2Hash(msg3a), int64(0)) _, err = tk.insertProofMsg(msg3a) @@ -395,7 +384,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { for i := 1; i <= 3; i++ { // all 3 epoch must have two submissions - ed := tk.getEpochData(uint64(i)) + ed := tk.GetEpochData(uint64(i)) require.NotNil(t, ed) require.Len(t, ed.Key, 2) require.EqualValues(t, ed.Status, btcctypes.Submitted) @@ -409,7 +398,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { tk.onTipChange() for i := 1; i <= 3; i++ { - ed := tk.getEpochData(uint64(i)) + ed := tk.GetEpochData(uint64(i)) require.NotNil(t, ed) if i == 1 { @@ -430,7 +419,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { for i := 1; i <= 3; i++ { // all 3 epoch must have two submissions - ed := tk.getEpochData(uint64(i)) + ed := tk.GetEpochData(uint64(i)) require.NotNil(t, ed) require.Len(t, ed.Key, 0) require.EqualValues(t, ed.Status, btcctypes.Submitted) @@ -439,7 +428,7 @@ func TestClearChildEpochsWhenNoParenNotOnMainChain(t *testing.T) { func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { rand.Seed(time.Now().Unix()) - tk := InitTestKeepers(t, uint64(1)) + tk := InitTestKeepers(t) defaultParams := btcctypes.DefaultParams() wDeep := defaultParams.CheckpointFinalizationTimeout @@ -461,7 +450,7 @@ func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { _, err = tk.insertProofMsg(msg3) require.NoError(t, err, "failed to insert submission") - ed := tk.getEpochData(uint64(1)) + ed := tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 3) @@ -475,7 +464,7 @@ func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { tk.onTipChange() - ed = tk.getEpochData(uint64(1)) + ed = tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 1) require.Equal(t, ed.Status, btcctypes.Finalized) @@ -488,7 +477,7 @@ func TestLeaveOnlyBestSubmissionWhenEpochFinalized(t *testing.T) { func TestTxIdxShouldBreakTies(t *testing.T) { rand.Seed(time.Now().Unix()) - tk := InitTestKeepers(t, uint64(1)) + tk := InitTestKeepers(t) defaultParams := btcctypes.DefaultParams() wDeep := defaultParams.CheckpointFinalizationTimeout @@ -504,7 +493,7 @@ func TestTxIdxShouldBreakTies(t *testing.T) { _, err = tk.insertProofMsg(msg2) require.NoError(t, err, "failed to insert submission") - ed := tk.getEpochData(uint64(1)) + ed := tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 2) @@ -518,7 +507,7 @@ func TestTxIdxShouldBreakTies(t *testing.T) { tk.onTipChange() - ed = tk.getEpochData(uint64(1)) + ed = tk.GetEpochData(uint64(1)) require.NotNil(t, ed) require.Len(t, ed.Key, 1) require.Equal(t, ed.Status, btcctypes.Finalized) @@ -541,12 +530,12 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { defaultParams := btcctypes.DefaultParams() kDeep := defaultParams.BtcConfirmationDepth wDeep := defaultParams.CheckpointFinalizationTimeout - raw := dg.RandomRawCheckpointDataForEpoch(epoch) + raw, _ := dg.RandomRawCheckpointDataForEpoch(epoch) blck1 := dg.CreateBlock(1, 7, 7, raw.FirstPart) blck2 := dg.CreateBlock(2, 14, 3, raw.SecondPart) - tk := InitTestKeepers(t, epoch) + tk := InitTestKeepers(t) msg := dg.GenerateMessageWithRandomSubmitter([]*dg.BlockCreationResult{blck1, blck2}) @@ -561,7 +550,7 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { } // TODO customs Equality for submission keys - ed := tk.getEpochData(epoch) + ed := tk.GetEpochData(epoch) if len(ed.Key) != 1 { t.Errorf("Unexpected missing submissions") @@ -579,7 +568,7 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { tk.onTipChange() // TODO customs Equality for submission keys to check this are really keys // we are looking for - ed = tk.getEpochData(epoch) + ed = tk.GetEpochData(epoch) if len(ed.Key) != 1 { t.Errorf("Unexpected missing submission") @@ -594,7 +583,7 @@ func TestStateTransitionOfValidSubmission(t *testing.T) { tk.onTipChange() - ed = tk.getEpochData(epoch) + ed = tk.GetEpochData(epoch) if ed == nil || ed.Status != btcctypes.Finalized { t.Errorf("Epoch Data missing of in unexpected state") diff --git a/x/btccheckpoint/types/btccheckpoint.pb.go b/x/btccheckpoint/types/btccheckpoint.pb.go index 8e9bb809e..f85ff42ef 100644 --- a/x/btccheckpoint/types/btccheckpoint.pb.go +++ b/x/btccheckpoint/types/btccheckpoint.pb.go @@ -322,10 +322,8 @@ func (m *TransactionInfo) GetProof() []byte { // depth/block number info, without context (i.e info about chain) is pretty useless // and blockshash in enough to retrieve is from lightclient type SubmissionData struct { - // TODO: this could probably be better typed - // Address of submitter of given checkpoint. Required to payup the reward to - // submitter of given checkpoint - Submitter []byte `protobuf:"bytes,1,opt,name=submitter,proto3" json:"submitter,omitempty"` + // address of the submitter and reporter + VigilanteAddresses *CheckpointAddresses `protobuf:"bytes,1,opt,name=vigilante_addresses,json=vigilanteAddresses,proto3" json:"vigilante_addresses,omitempty"` // txs_info is the two `TransactionInfo`s corresponding to the submission // It is used for // - recovering address of sender of btc transction to payup the reward. @@ -367,9 +365,9 @@ func (m *SubmissionData) XXX_DiscardUnknown() { var xxx_messageInfo_SubmissionData proto.InternalMessageInfo -func (m *SubmissionData) GetSubmitter() []byte { +func (m *SubmissionData) GetVigilanteAddresses() *CheckpointAddresses { if m != nil { - return m.Submitter + return m.VigilanteAddresses } return nil } @@ -396,8 +394,6 @@ type EpochData struct { Key []*SubmissionKey `protobuf:"bytes,1,rep,name=key,proto3" json:"key,omitempty"` // Current btc status of the epoch Status BtcStatus `protobuf:"varint,2,opt,name=status,proto3,enum=babylon.btccheckpoint.v1.BtcStatus" json:"status,omitempty"` - // Required to comunicate with checkpoint module about checkpoint status - RawCheckpoint []byte `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` } func (m *EpochData) Reset() { *m = EpochData{} } @@ -447,9 +443,130 @@ func (m *EpochData) GetStatus() BtcStatus { return Submitted } -func (m *EpochData) GetRawCheckpoint() []byte { +type CheckpointAddresses struct { + // TODO: this could probably be better typed + // Address of the checkpoint submitter, extracted from the checkpoint itself. + Submitter []byte `protobuf:"bytes,1,opt,name=submitter,proto3" json:"submitter,omitempty"` + // Address of the reporter which reported the submissions, calculated from + // submission message MsgInsertBTCSpvProof itself + Reporter []byte `protobuf:"bytes,2,opt,name=reporter,proto3" json:"reporter,omitempty"` +} + +func (m *CheckpointAddresses) Reset() { *m = CheckpointAddresses{} } +func (m *CheckpointAddresses) String() string { return proto.CompactTextString(m) } +func (*CheckpointAddresses) ProtoMessage() {} +func (*CheckpointAddresses) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{6} +} +func (m *CheckpointAddresses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckpointAddresses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckpointAddresses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckpointAddresses) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckpointAddresses.Merge(m, src) +} +func (m *CheckpointAddresses) XXX_Size() int { + return m.Size() +} +func (m *CheckpointAddresses) XXX_DiscardUnknown() { + xxx_messageInfo_CheckpointAddresses.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckpointAddresses proto.InternalMessageInfo + +func (m *CheckpointAddresses) GetSubmitter() []byte { + if m != nil { + return m.Submitter + } + return nil +} + +func (m *CheckpointAddresses) GetReporter() []byte { + if m != nil { + return m.Reporter + } + return nil +} + +type BTCCheckpointInfo struct { + // epoch number of this checkpoint + EpochNumber uint64 `protobuf:"varint,1,opt,name=epoch_number,json=epochNumber,proto3" json:"epoch_number,omitempty"` + // height of earliest BTC block that includes this checkpoint + EarliestBtcBlockNumber uint64 `protobuf:"varint,2,opt,name=earliest_btc_block_number,json=earliestBtcBlockNumber,proto3" json:"earliest_btc_block_number,omitempty"` + // hash of earliest BTC block that includes this checkpoint + EarliestBtcBlockHash []byte `protobuf:"bytes,3,opt,name=earliest_btc_block_hash,json=earliestBtcBlockHash,proto3" json:"earliest_btc_block_hash,omitempty"` + // list of vigilantes' addresses + VigilanteAddressList []*CheckpointAddresses `protobuf:"bytes,4,rep,name=vigilante_address_list,json=vigilanteAddressList,proto3" json:"vigilante_address_list,omitempty"` +} + +func (m *BTCCheckpointInfo) Reset() { *m = BTCCheckpointInfo{} } +func (m *BTCCheckpointInfo) String() string { return proto.CompactTextString(m) } +func (*BTCCheckpointInfo) ProtoMessage() {} +func (*BTCCheckpointInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_da8b9af3dbd18a36, []int{7} +} +func (m *BTCCheckpointInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BTCCheckpointInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BTCCheckpointInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BTCCheckpointInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BTCCheckpointInfo.Merge(m, src) +} +func (m *BTCCheckpointInfo) XXX_Size() int { + return m.Size() +} +func (m *BTCCheckpointInfo) XXX_DiscardUnknown() { + xxx_messageInfo_BTCCheckpointInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_BTCCheckpointInfo proto.InternalMessageInfo + +func (m *BTCCheckpointInfo) GetEpochNumber() uint64 { + if m != nil { + return m.EpochNumber + } + return 0 +} + +func (m *BTCCheckpointInfo) GetEarliestBtcBlockNumber() uint64 { if m != nil { - return m.RawCheckpoint + return m.EarliestBtcBlockNumber + } + return 0 +} + +func (m *BTCCheckpointInfo) GetEarliestBtcBlockHash() []byte { + if m != nil { + return m.EarliestBtcBlockHash + } + return nil +} + +func (m *BTCCheckpointInfo) GetVigilanteAddressList() []*CheckpointAddresses { + if m != nil { + return m.VigilanteAddressList } return nil } @@ -462,6 +579,8 @@ func init() { proto.RegisterType((*TransactionInfo)(nil), "babylon.btccheckpoint.v1.TransactionInfo") proto.RegisterType((*SubmissionData)(nil), "babylon.btccheckpoint.v1.SubmissionData") proto.RegisterType((*EpochData)(nil), "babylon.btccheckpoint.v1.EpochData") + proto.RegisterType((*CheckpointAddresses)(nil), "babylon.btccheckpoint.v1.CheckpointAddresses") + proto.RegisterType((*BTCCheckpointInfo)(nil), "babylon.btccheckpoint.v1.BTCCheckpointInfo") } func init() { @@ -469,47 +588,55 @@ func init() { } var fileDescriptor_da8b9af3dbd18a36 = []byte{ - // 633 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x10, 0xcd, 0x86, 0xc0, 0xef, 0x97, 0x0d, 0x09, 0x68, 0x81, 0xca, 0x42, 0x95, 0x49, 0x53, 0x55, - 0x84, 0x1e, 0x12, 0x95, 0xb6, 0x12, 0xfd, 0x73, 0xc1, 0x49, 0x10, 0x11, 0xe5, 0x8f, 0x6c, 0x73, - 0xe1, 0x62, 0xad, 0x37, 0x9b, 0xd8, 0x22, 0xf1, 0x46, 0xde, 0x05, 0x92, 0xde, 0x2b, 0x55, 0x9c, - 0xaa, 0xde, 0x7b, 0xea, 0xad, 0x9f, 0xa4, 0x47, 0x8e, 0x15, 0x07, 0x54, 0xc1, 0xc7, 0xe8, 0xa5, - 0xf2, 0xae, 0x4b, 0x62, 0x5a, 0xd4, 0x72, 0xf3, 0xcc, 0xbe, 0x99, 0x7d, 0xef, 0xcd, 0x78, 0xe1, - 0x8a, 0x8b, 0xdd, 0x61, 0x97, 0x05, 0x55, 0x57, 0x10, 0xe2, 0x51, 0x72, 0xd8, 0x67, 0x7e, 0x20, - 0x92, 0x51, 0xa5, 0x1f, 0x32, 0xc1, 0x90, 0x16, 0x43, 0x2b, 0xc9, 0xc3, 0xe3, 0x27, 0x8b, 0xf3, - 0x1d, 0xd6, 0x61, 0x12, 0x54, 0x8d, 0xbe, 0x14, 0xbe, 0xf4, 0x03, 0xc0, 0x9c, 0x61, 0xd7, 0xac, - 0xfe, 0xf1, 0x5e, 0xc8, 0x58, 0x1b, 0x2d, 0xc3, 0x19, 0x57, 0x10, 0x47, 0x84, 0x38, 0xe0, 0x98, - 0x08, 0x9f, 0x05, 0x1a, 0x28, 0x82, 0xf2, 0xb4, 0x59, 0x70, 0x05, 0xb1, 0x47, 0x59, 0xb4, 0x0a, - 0x17, 0x6e, 0x00, 0x1d, 0x3f, 0x68, 0xd1, 0x81, 0x96, 0x2e, 0x82, 0x72, 0xde, 0x9c, 0x4b, 0xc2, - 0x9b, 0xd1, 0x11, 0x7a, 0x00, 0xa7, 0x7b, 0x34, 0x3c, 0xec, 0x52, 0x27, 0x60, 0x2d, 0xca, 0xb5, - 0x09, 0xd9, 0x39, 0xa7, 0x72, 0x3b, 0x51, 0x0a, 0x75, 0xe1, 0x02, 0x61, 0x41, 0xdb, 0x0f, 0x7b, - 0x7e, 0xd0, 0x71, 0xa2, 0x1b, 0x3c, 0x8a, 0x5b, 0x34, 0xd4, 0x32, 0x11, 0xd6, 0x58, 0x3b, 0xbf, - 0x58, 0x7a, 0xd6, 0xf1, 0x85, 0x77, 0xe4, 0x56, 0x08, 0xeb, 0x55, 0x63, 0xb5, 0xc4, 0xc3, 0x7e, - 0xf0, 0x2b, 0xa8, 0x8a, 0x61, 0x9f, 0xf2, 0x8a, 0x61, 0xd7, 0x36, 0x65, 0xa9, 0x31, 0x14, 0x94, - 0x9b, 0x73, 0xa3, 0xb6, 0x86, 0x20, 0xea, 0xa4, 0x34, 0x80, 0x85, 0x31, 0x92, 0x5b, 0x74, 0x88, - 0xe6, 0xe1, 0xa4, 0x92, 0x01, 0xa4, 0x0c, 0x15, 0xa0, 0x3d, 0x98, 0xf1, 0x30, 0xf7, 0xa4, 0xb6, - 0x69, 0xe3, 0xf5, 0xf9, 0xc5, 0xd2, 0xda, 0x1d, 0x49, 0x6c, 0x62, 0xee, 0x29, 0x22, 0xb2, 0x53, - 0x69, 0x0b, 0xe6, 0xad, 0x23, 0xb7, 0xe7, 0x73, 0x1e, 0x5f, 0xfc, 0x12, 0x4e, 0x1c, 0xd2, 0xa1, - 0x06, 0x8a, 0x13, 0xe5, 0xdc, 0x6a, 0xb9, 0x72, 0xdb, 0x18, 0x2b, 0x49, 0xbe, 0x66, 0x54, 0x54, - 0x7a, 0x07, 0xe0, 0x4c, 0xc2, 0xec, 0x36, 0x1b, 0xf5, 0x03, 0x77, 0xee, 0x87, 0x8a, 0x30, 0x37, - 0xbe, 0x00, 0x69, 0x35, 0xa6, 0xb1, 0x54, 0x64, 0x53, 0x3f, 0xda, 0x97, 0x78, 0x84, 0x2a, 0x28, - 0x9d, 0x02, 0x58, 0x18, 0xa9, 0xaa, 0x63, 0x81, 0xd1, 0x7d, 0x98, 0xe5, 0x51, 0x46, 0x08, 0x1a, - 0xc6, 0x9b, 0x34, 0x4a, 0xa0, 0x3a, 0xfc, 0x5f, 0x0c, 0xb8, 0xe3, 0x07, 0x6d, 0xa6, 0xa5, 0xa5, - 0xf2, 0x95, 0x7f, 0x62, 0x1a, 0x29, 0x34, 0xff, 0x13, 0x03, 0x2e, 0xa5, 0xce, 0xc3, 0x49, 0xda, - 0x67, 0xc4, 0x93, 0x64, 0x32, 0xa6, 0x0a, 0x4a, 0x5f, 0x00, 0xcc, 0x36, 0xa2, 0x2f, 0xc9, 0xe3, - 0xc5, 0xb8, 0xbd, 0xcb, 0xb7, 0x5f, 0x92, 0x18, 0x8a, 0x72, 0xe3, 0x15, 0x9c, 0xe2, 0x02, 0x8b, - 0x23, 0x2e, 0x8d, 0x28, 0xac, 0x3e, 0xbc, 0xbd, 0xda, 0x10, 0xc4, 0x92, 0x50, 0x33, 0x2e, 0x41, - 0x8f, 0x60, 0x21, 0xc4, 0x27, 0xce, 0x08, 0x16, 0x3b, 0x96, 0x0f, 0xf1, 0x49, 0xed, 0x3a, 0xf9, - 0xf8, 0x23, 0x80, 0xd9, 0xeb, 0x62, 0xb4, 0x02, 0xef, 0x35, 0xf6, 0x76, 0x6b, 0x9b, 0x8e, 0x65, - 0xaf, 0xdb, 0xfb, 0x96, 0x63, 0xed, 0x1b, 0xdb, 0x4d, 0xdb, 0x6e, 0xd4, 0x67, 0x53, 0x8b, 0xf9, - 0xd3, 0x4f, 0xc5, 0xac, 0x15, 0x3b, 0xd8, 0xfa, 0x0d, 0x5a, 0xdb, 0xdd, 0xd9, 0x68, 0x9a, 0xdb, - 0x8d, 0xfa, 0x2c, 0x50, 0xd0, 0x9a, 0x5a, 0xfb, 0x3f, 0x40, 0x37, 0x9a, 0x3b, 0xeb, 0x6f, 0x9a, - 0x07, 0x8d, 0xfa, 0x6c, 0x5a, 0x41, 0x37, 0xfc, 0x00, 0x77, 0xfd, 0xb7, 0xb4, 0xb5, 0x98, 0x79, - 0xff, 0x59, 0x4f, 0x19, 0xbb, 0x5f, 0x2f, 0x75, 0x70, 0x76, 0xa9, 0x83, 0xef, 0x97, 0x3a, 0xf8, - 0x70, 0xa5, 0xa7, 0xce, 0xae, 0xf4, 0xd4, 0xb7, 0x2b, 0x3d, 0x75, 0xf0, 0xfc, 0x6f, 0xdb, 0x3f, - 0xb8, 0xf1, 0x54, 0xc9, 0xbf, 0xc1, 0x9d, 0x92, 0x6f, 0xce, 0xd3, 0x9f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0xc7, 0x34, 0x1c, 0x27, 0xd0, 0x04, 0x00, 0x00, + // 768 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcf, 0x4f, 0xdb, 0x48, + 0x14, 0x8e, 0x43, 0x60, 0xc9, 0x24, 0x04, 0x76, 0x12, 0x58, 0x6f, 0xb4, 0x0a, 0xc1, 0x7b, 0x20, + 0xac, 0xb4, 0x89, 0x96, 0x5d, 0x24, 0xd8, 0xdd, 0x0b, 0x4e, 0x82, 0x88, 0x80, 0x04, 0x39, 0xe6, + 0xc2, 0xa1, 0x96, 0xed, 0x4c, 0xe2, 0x51, 0x1c, 0x4f, 0xe4, 0x99, 0xa0, 0xa4, 0xd7, 0xaa, 0x52, + 0x55, 0xa9, 0x52, 0xd5, 0x7b, 0x4f, 0xfd, 0x67, 0x7a, 0xe8, 0x81, 0x63, 0xc5, 0x01, 0x55, 0xf0, + 0x67, 0xf4, 0x52, 0x79, 0xec, 0xfc, 0x84, 0xa8, 0xe5, 0xe6, 0xf7, 0xde, 0xf7, 0xde, 0xbc, 0xef, + 0x7b, 0x6f, 0xc6, 0x60, 0xc7, 0xd0, 0x8d, 0x81, 0x4d, 0x9c, 0x82, 0xc1, 0x4c, 0xd3, 0x42, 0x66, + 0xbb, 0x4b, 0xb0, 0xc3, 0xa6, 0xad, 0x7c, 0xd7, 0x25, 0x8c, 0x40, 0x31, 0x80, 0xe6, 0xa7, 0x83, + 0x57, 0x7f, 0xa5, 0x53, 0x2d, 0xd2, 0x22, 0x1c, 0x54, 0xf0, 0xbe, 0x7c, 0xbc, 0xf4, 0x55, 0x00, + 0x31, 0x59, 0x2d, 0xd6, 0xbb, 0x57, 0xe7, 0x2e, 0x21, 0x4d, 0xb8, 0x0d, 0x56, 0x0d, 0x66, 0x6a, + 0xcc, 0xd5, 0x1d, 0xaa, 0x9b, 0x0c, 0x13, 0x47, 0x14, 0xb2, 0x42, 0x2e, 0xae, 0x24, 0x0c, 0x66, + 0xaa, 0x63, 0x2f, 0xdc, 0x05, 0xeb, 0x33, 0x40, 0x0d, 0x3b, 0x0d, 0xd4, 0x17, 0xc3, 0x59, 0x21, + 0xb7, 0xa2, 0x24, 0xa7, 0xe1, 0x15, 0x2f, 0x04, 0xb7, 0x40, 0xbc, 0x83, 0xdc, 0xb6, 0x8d, 0x34, + 0x87, 0x34, 0x10, 0x15, 0x17, 0x78, 0xe5, 0x98, 0xef, 0xab, 0x7a, 0x2e, 0x68, 0x83, 0x75, 0x93, + 0x38, 0x4d, 0xec, 0x76, 0xb0, 0xd3, 0xd2, 0xbc, 0x13, 0x2c, 0xa4, 0x37, 0x90, 0x2b, 0x46, 0x3c, + 0xac, 0xbc, 0x7f, 0x73, 0xbb, 0xf9, 0x4f, 0x0b, 0x33, 0xab, 0x67, 0xe4, 0x4d, 0xd2, 0x29, 0x04, + 0x6c, 0x4d, 0x4b, 0xc7, 0xce, 0xd0, 0x28, 0xb0, 0x41, 0x17, 0xd1, 0xbc, 0xac, 0x16, 0x8f, 0x79, + 0xaa, 0x3c, 0x60, 0x88, 0x2a, 0xc9, 0x71, 0x59, 0x99, 0x99, 0x7e, 0x44, 0xea, 0x83, 0xc4, 0x44, + 0x93, 0x27, 0x68, 0x00, 0x53, 0x60, 0xd1, 0xa7, 0x21, 0x70, 0x1a, 0xbe, 0x01, 0xcf, 0x41, 0xc4, + 0xd2, 0xa9, 0xc5, 0xb9, 0xc5, 0xe5, 0xff, 0x6f, 0x6e, 0x37, 0xf7, 0x9f, 0xd8, 0xc4, 0xb1, 0x4e, + 0x2d, 0xbf, 0x11, 0x5e, 0x49, 0x3a, 0x01, 0x2b, 0xf5, 0x9e, 0xd1, 0xc1, 0x94, 0x06, 0x07, 0xff, + 0x0b, 0x16, 0xda, 0x68, 0x20, 0x0a, 0xd9, 0x85, 0x5c, 0x6c, 0x37, 0x97, 0x9f, 0x37, 0xc6, 0xfc, + 0x74, 0xbf, 0x8a, 0x97, 0x24, 0xbd, 0x14, 0xc0, 0xea, 0x94, 0xd8, 0x4d, 0x32, 0xae, 0x27, 0x3c, + 0xb9, 0x1e, 0xcc, 0x82, 0xd8, 0xe4, 0x02, 0x84, 0xfd, 0x31, 0x4d, 0xb8, 0x3c, 0x99, 0xba, 0xde, + 0xbe, 0x04, 0x23, 0xf4, 0x0d, 0xe9, 0x93, 0x00, 0x12, 0x63, 0x56, 0x25, 0x9d, 0xe9, 0xf0, 0x19, + 0x48, 0x5e, 0xe1, 0x16, 0xb6, 0x75, 0x87, 0x21, 0x4d, 0x6f, 0x34, 0x5c, 0x44, 0x29, 0xa2, 0x41, + 0x5b, 0x7f, 0xce, 0x6f, 0xab, 0x38, 0xb2, 0x0e, 0x87, 0x49, 0x0a, 0x1c, 0x55, 0x1a, 0xf9, 0x60, + 0x09, 0x2c, 0xb3, 0x3e, 0xd5, 0xb0, 0xd3, 0x24, 0x62, 0x98, 0x6b, 0xb7, 0xf3, 0x43, 0x5c, 0x3d, + 0x8d, 0x94, 0x9f, 0x58, 0x9f, 0x72, 0xb1, 0x52, 0x60, 0x11, 0x75, 0x89, 0x69, 0x71, 0x3a, 0x11, + 0xc5, 0x37, 0xa4, 0x17, 0x02, 0x88, 0x96, 0xbd, 0x2f, 0xce, 0xe4, 0x60, 0x72, 0x40, 0xdb, 0xf3, + 0x0f, 0x99, 0x1a, 0xab, 0xaf, 0xe7, 0x7f, 0x60, 0x89, 0x32, 0x9d, 0xf5, 0x28, 0x97, 0x32, 0xb1, + 0xfb, 0xfb, 0xfc, 0x6c, 0x99, 0x99, 0x75, 0x0e, 0x55, 0x82, 0x14, 0xa9, 0x06, 0x92, 0x8f, 0x88, + 0x01, 0x7f, 0x03, 0x51, 0xea, 0x9d, 0xc4, 0x18, 0x72, 0x83, 0x2b, 0x3a, 0x76, 0xc0, 0x34, 0x58, + 0x76, 0x51, 0x97, 0xb8, 0x5e, 0xd0, 0x1f, 0xdf, 0xc8, 0x96, 0xde, 0x84, 0xc1, 0xcf, 0xb2, 0x5a, + 0x1c, 0x17, 0xe5, 0x12, 0x6c, 0x81, 0x38, 0x67, 0xad, 0x39, 0xbd, 0x8e, 0x11, 0x94, 0x8c, 0x28, + 0x31, 0xee, 0xab, 0x72, 0x17, 0x3c, 0x00, 0xbf, 0x22, 0xdd, 0xb5, 0x31, 0xa2, 0x8c, 0xdf, 0x4c, + 0xc3, 0x26, 0x66, 0x7b, 0x88, 0x0f, 0x73, 0xfc, 0xc6, 0x10, 0x20, 0x33, 0x53, 0xf6, 0xc2, 0x41, + 0xea, 0x1e, 0xf8, 0xe5, 0x91, 0x54, 0x7e, 0xa7, 0xfc, 0x0d, 0x4a, 0xcd, 0x26, 0x7a, 0x17, 0x06, + 0x9a, 0x60, 0xe3, 0xc1, 0xf6, 0x68, 0x36, 0xa6, 0x4c, 0x8c, 0xf0, 0x31, 0x3c, 0x71, 0x81, 0x52, + 0xb3, 0x0b, 0x74, 0x8a, 0x29, 0xfb, 0xe3, 0x9d, 0x00, 0xa2, 0x23, 0xd9, 0xe1, 0x0e, 0xd8, 0x28, + 0x9f, 0xd7, 0x8a, 0xc7, 0x5a, 0x5d, 0x3d, 0x54, 0x2f, 0xea, 0x5a, 0xfd, 0x42, 0x3e, 0xab, 0xa8, + 0x6a, 0xb9, 0xb4, 0x16, 0x4a, 0xaf, 0xbc, 0x7e, 0x9f, 0x8d, 0xd6, 0x03, 0x91, 0x1b, 0x0f, 0xa0, + 0xc5, 0x5a, 0xf5, 0xa8, 0xa2, 0x9c, 0x95, 0x4b, 0x6b, 0x82, 0x0f, 0x2d, 0xfa, 0x4f, 0xce, 0x23, + 0xd0, 0xa3, 0x4a, 0xf5, 0xf0, 0xb4, 0x72, 0x59, 0x2e, 0xad, 0x85, 0x7d, 0xe8, 0x11, 0x76, 0x74, + 0x1b, 0x3f, 0x47, 0x8d, 0x74, 0xe4, 0xd5, 0x87, 0x4c, 0x48, 0xae, 0x7d, 0xbc, 0xcb, 0x08, 0xd7, + 0x77, 0x19, 0xe1, 0xcb, 0x5d, 0x46, 0x78, 0x7b, 0x9f, 0x09, 0x5d, 0xdf, 0x67, 0x42, 0x9f, 0xef, + 0x33, 0xa1, 0xcb, 0xbd, 0xef, 0xbd, 0x3c, 0xfd, 0x99, 0xdf, 0x04, 0x7f, 0x89, 0x8c, 0x25, 0xfe, + 0xde, 0xff, 0xfd, 0x2d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x76, 0xd4, 0x3c, 0x4c, 0x06, 0x00, 0x00, } func (m *BTCSpvProof) Marshal() (dAtA []byte, err error) { @@ -731,10 +858,15 @@ func (m *SubmissionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x12 } } - if len(m.Submitter) > 0 { - i -= len(m.Submitter) - copy(dAtA[i:], m.Submitter) - i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Submitter))) + if m.VigilanteAddresses != nil { + { + size, err := m.VigilanteAddresses.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } @@ -761,13 +893,6 @@ func (m *EpochData) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.RawCheckpoint) > 0 { - i -= len(m.RawCheckpoint) - copy(dAtA[i:], m.RawCheckpoint) - i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.RawCheckpoint))) - i-- - dAtA[i] = 0x1a - } if m.Status != 0 { i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.Status)) i-- @@ -790,6 +915,97 @@ func (m *EpochData) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CheckpointAddresses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointAddresses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckpointAddresses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Reporter) > 0 { + i -= len(m.Reporter) + copy(dAtA[i:], m.Reporter) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Reporter))) + i-- + dAtA[i] = 0x12 + } + if len(m.Submitter) > 0 { + i -= len(m.Submitter) + copy(dAtA[i:], m.Submitter) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.Submitter))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BTCCheckpointInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BTCCheckpointInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BTCCheckpointInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VigilanteAddressList) > 0 { + for iNdEx := len(m.VigilanteAddressList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VigilanteAddressList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBtccheckpoint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.EarliestBtcBlockHash) > 0 { + i -= len(m.EarliestBtcBlockHash) + copy(dAtA[i:], m.EarliestBtcBlockHash) + i = encodeVarintBtccheckpoint(dAtA, i, uint64(len(m.EarliestBtcBlockHash))) + i-- + dAtA[i] = 0x1a + } + if m.EarliestBtcBlockNumber != 0 { + i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.EarliestBtcBlockNumber)) + i-- + dAtA[i] = 0x10 + } + if m.EpochNumber != 0 { + i = encodeVarintBtccheckpoint(dAtA, i, uint64(m.EpochNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintBtccheckpoint(dAtA []byte, offset int, v uint64) int { offset -= sovBtccheckpoint(v) base := offset @@ -883,8 +1099,8 @@ func (m *SubmissionData) Size() (n int) { } var l int _ = l - l = len(m.Submitter) - if l > 0 { + if m.VigilanteAddresses != nil { + l = m.VigilanteAddresses.Size() n += 1 + l + sovBtccheckpoint(uint64(l)) } if len(m.TxsInfo) > 0 { @@ -914,10 +1130,48 @@ func (m *EpochData) Size() (n int) { if m.Status != 0 { n += 1 + sovBtccheckpoint(uint64(m.Status)) } - l = len(m.RawCheckpoint) + return n +} + +func (m *CheckpointAddresses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Submitter) + if l > 0 { + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + l = len(m.Reporter) + if l > 0 { + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + return n +} + +func (m *BTCCheckpointInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNumber != 0 { + n += 1 + sovBtccheckpoint(uint64(m.EpochNumber)) + } + if m.EarliestBtcBlockNumber != 0 { + n += 1 + sovBtccheckpoint(uint64(m.EarliestBtcBlockNumber)) + } + l = len(m.EarliestBtcBlockHash) if l > 0 { n += 1 + l + sovBtccheckpoint(uint64(l)) } + if len(m.VigilanteAddressList) > 0 { + for _, e := range m.VigilanteAddressList { + l = e.Size() + n += 1 + l + sovBtccheckpoint(uint64(l)) + } + } return n } @@ -1472,9 +1726,9 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Submitter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VigilanteAddresses", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowBtccheckpoint @@ -1484,24 +1738,26 @@ func (m *SubmissionData) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthBtccheckpoint } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthBtccheckpoint } if postIndex > l { return io.ErrUnexpectedEOF } - m.Submitter = append(m.Submitter[:0], dAtA[iNdEx:postIndex]...) - if m.Submitter == nil { - m.Submitter = []byte{} + if m.VigilanteAddresses == nil { + m.VigilanteAddresses = &CheckpointAddresses{} + } + if err := m.VigilanteAddresses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: @@ -1660,9 +1916,215 @@ func (m *EpochData) Unmarshal(dAtA []byte) error { break } } + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointAddresses) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointAddresses: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointAddresses: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Submitter", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Submitter = append(m.Submitter[:0], dAtA[iNdEx:postIndex]...) + if m.Submitter == nil { + m.Submitter = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reporter", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reporter = append(m.Reporter[:0], dAtA[iNdEx:postIndex]...) + if m.Reporter == nil { + m.Reporter = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBtccheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBtccheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BTCCheckpointInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BTCCheckpointInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BTCCheckpointInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNumber", wireType) + } + m.EpochNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockNumber", wireType) + } + m.EarliestBtcBlockNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EarliestBtcBlockNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -1689,9 +2151,43 @@ func (m *EpochData) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RawCheckpoint = append(m.RawCheckpoint[:0], dAtA[iNdEx:postIndex]...) - if m.RawCheckpoint == nil { - m.RawCheckpoint = []byte{} + m.EarliestBtcBlockHash = append(m.EarliestBtcBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.EarliestBtcBlockHash == nil { + m.EarliestBtcBlockHash = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VigilanteAddressList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBtccheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBtccheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBtccheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VigilanteAddressList = append(m.VigilanteAddressList, &CheckpointAddresses{}) + if err := m.VigilanteAddressList[len(m.VigilanteAddressList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: diff --git a/x/btccheckpoint/types/expected_keepers.go b/x/btccheckpoint/types/expected_keepers.go index dedf9afaa..d351b6a5b 100644 --- a/x/btccheckpoint/types/expected_keepers.go +++ b/x/btccheckpoint/types/expected_keepers.go @@ -1,7 +1,9 @@ package types import ( + txformat "github.com/babylonchain/babylon/btctxformatter" bbn "github.com/babylonchain/babylon/types" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/auth/types" ) @@ -30,10 +32,7 @@ type BTCLightClientKeeper interface { } type CheckpointingKeeper interface { - // CheckpointEpoch should return epoch index if provided rawCheckpoint - // passes all checkpointing validations and error otherwise - CheckpointEpoch(ctx sdk.Context, rawCheckpoint []byte) (uint64, error) - + VerifyCheckpoint(ctx sdk.Context, checkpoint txformat.RawBtcCheckpoint) error // It quite mouthfull to have 4 different methods to operate on checkpoint state // but this approach decouples both modules a bit more than having some kind // of shared enum passed into the methods. Both modules are free to evolve their diff --git a/x/btccheckpoint/types/mock_keepers.go b/x/btccheckpoint/types/mock_keepers.go index bbd0243ff..c541246b7 100644 --- a/x/btccheckpoint/types/mock_keepers.go +++ b/x/btccheckpoint/types/mock_keepers.go @@ -3,6 +3,7 @@ package types import ( "errors" + txformat "github.com/babylonchain/babylon/btctxformatter" bbn "github.com/babylonchain/babylon/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -12,7 +13,6 @@ type MockBTCLightClientKeeper struct { } type MockCheckpointingKeeper struct { - epoch uint64 returnError bool } @@ -23,18 +23,13 @@ func NewMockBTCLightClientKeeper() *MockBTCLightClientKeeper { return &lc } -func NewMockCheckpointingKeeper(epoch uint64) *MockCheckpointingKeeper { +func NewMockCheckpointingKeeper() *MockCheckpointingKeeper { mc := MockCheckpointingKeeper{ - epoch: epoch, returnError: false, } return &mc } -func (mc *MockCheckpointingKeeper) SetEpoch(e uint64) { - mc.epoch = e -} - func (mc *MockCheckpointingKeeper) ReturnError() { mc.returnError = true } @@ -61,12 +56,12 @@ func (ck MockBTCLightClientKeeper) MainChainDepth(ctx sdk.Context, headerBytes * } } -func (ck MockCheckpointingKeeper) CheckpointEpoch(ctx sdk.Context, rawCheckpoint []byte) (uint64, error) { +func (ck MockCheckpointingKeeper) VerifyCheckpoint(ctx sdk.Context, checkpoint txformat.RawBtcCheckpoint) error { if ck.returnError { - return 0, errors.New("bad checkpoints") + return errors.New("bad checkpoints") } - return ck.epoch, nil + return nil } // SetCheckpointSubmitted Informs checkpointing module that checkpoint was diff --git a/x/btccheckpoint/types/msgs.go b/x/btccheckpoint/types/msgs.go index d1f18d04d..9c4fccf06 100644 --- a/x/btccheckpoint/types/msgs.go +++ b/x/btccheckpoint/types/msgs.go @@ -72,7 +72,13 @@ func ParseTwoProofs( return nil, err } - sub := NewRawCheckpointSubmission(submitter, *parsedProofs[0], *parsedProofs[1], rawCkptData) + rawCheckpoint, err := txformat.DecodeRawCheckpoint(txformat.CurrentVersion, rawCkptData) + + if err != nil { + return nil, err + } + + sub := NewRawCheckpointSubmission(submitter, *parsedProofs[0], *parsedProofs[1], *rawCheckpoint) return &sub, nil } diff --git a/x/btccheckpoint/types/query.pb.go b/x/btccheckpoint/types/query.pb.go index 22af72a4b..c1227dbfd 100644 --- a/x/btccheckpoint/types/query.pb.go +++ b/x/btccheckpoint/types/query.pb.go @@ -113,23 +113,23 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } -type QueryBtcCheckpointHeightRequest struct { +type QueryBtcCheckpointInfoRequest struct { // Number of epoch for which the earliest checkpointing btc height is requested EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` } -func (m *QueryBtcCheckpointHeightRequest) Reset() { *m = QueryBtcCheckpointHeightRequest{} } -func (m *QueryBtcCheckpointHeightRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointHeightRequest) ProtoMessage() {} -func (*QueryBtcCheckpointHeightRequest) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointInfoRequest) Reset() { *m = QueryBtcCheckpointInfoRequest{} } +func (m *QueryBtcCheckpointInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointInfoRequest) ProtoMessage() {} +func (*QueryBtcCheckpointInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{2} } -func (m *QueryBtcCheckpointHeightRequest) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointHeightRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -139,43 +139,42 @@ func (m *QueryBtcCheckpointHeightRequest) XXX_Marshal(b []byte, deterministic bo return b[:n], nil } } -func (m *QueryBtcCheckpointHeightRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointHeightRequest.Merge(m, src) +func (m *QueryBtcCheckpointInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointInfoRequest.Merge(m, src) } -func (m *QueryBtcCheckpointHeightRequest) XXX_Size() int { +func (m *QueryBtcCheckpointInfoRequest) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointHeightRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointHeightRequest.DiscardUnknown(m) +func (m *QueryBtcCheckpointInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointHeightRequest proto.InternalMessageInfo +var xxx_messageInfo_QueryBtcCheckpointInfoRequest proto.InternalMessageInfo -func (m *QueryBtcCheckpointHeightRequest) GetEpochNum() uint64 { +func (m *QueryBtcCheckpointInfoRequest) GetEpochNum() uint64 { if m != nil { return m.EpochNum } return 0 } -// QueryCurrentEpochResponse is response type for the Query/CurrentEpoch RPC method -type QueryBtcCheckpointHeightResponse struct { - // Earliest btc block number containing given raw checkpoint - EarliestBtcBlockNumber uint64 `protobuf:"varint,1,opt,name=earliest_btc_block_number,json=earliestBtcBlockNumber,proto3" json:"earliest_btc_block_number,omitempty"` +// QueryBtcCheckpointInfoResponse is response type for the Query/BtcCheckpointInfo RPC method +type QueryBtcCheckpointInfoResponse struct { + Info *BTCCheckpointInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *QueryBtcCheckpointHeightResponse) Reset() { *m = QueryBtcCheckpointHeightResponse{} } -func (m *QueryBtcCheckpointHeightResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBtcCheckpointHeightResponse) ProtoMessage() {} -func (*QueryBtcCheckpointHeightResponse) Descriptor() ([]byte, []int) { +func (m *QueryBtcCheckpointInfoResponse) Reset() { *m = QueryBtcCheckpointInfoResponse{} } +func (m *QueryBtcCheckpointInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointInfoResponse) ProtoMessage() {} +func (*QueryBtcCheckpointInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_009c1165ec392ace, []int{3} } -func (m *QueryBtcCheckpointHeightResponse) XXX_Unmarshal(b []byte) error { +func (m *QueryBtcCheckpointInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryBtcCheckpointHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryBtcCheckpointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryBtcCheckpointHeightResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryBtcCheckpointInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -185,25 +184,141 @@ func (m *QueryBtcCheckpointHeightResponse) XXX_Marshal(b []byte, deterministic b return b[:n], nil } } -func (m *QueryBtcCheckpointHeightResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBtcCheckpointHeightResponse.Merge(m, src) +func (m *QueryBtcCheckpointInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointInfoResponse.Merge(m, src) } -func (m *QueryBtcCheckpointHeightResponse) XXX_Size() int { +func (m *QueryBtcCheckpointInfoResponse) XXX_Size() int { return m.Size() } -func (m *QueryBtcCheckpointHeightResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBtcCheckpointHeightResponse.DiscardUnknown(m) +func (m *QueryBtcCheckpointInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryBtcCheckpointHeightResponse proto.InternalMessageInfo +var xxx_messageInfo_QueryBtcCheckpointInfoResponse proto.InternalMessageInfo -func (m *QueryBtcCheckpointHeightResponse) GetEarliestBtcBlockNumber() uint64 { +func (m *QueryBtcCheckpointInfoResponse) GetInfo() *BTCCheckpointInfo { if m != nil { - return m.EarliestBtcBlockNumber + return m.Info + } + return nil +} + +// QueryBtcCheckpointsInfoRequest is request type for the Query/BtcCheckpointsInfo RPC method +type QueryBtcCheckpointsInfoRequest struct { + StartEpoch uint64 `protobuf:"varint,1,opt,name=start_epoch,json=startEpoch,proto3" json:"start_epoch,omitempty"` + EndEpoch uint64 `protobuf:"varint,2,opt,name=end_epoch,json=endEpoch,proto3" json:"end_epoch,omitempty"` + // pagination defines whether to have the pagination in the request + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBtcCheckpointsInfoRequest) Reset() { *m = QueryBtcCheckpointsInfoRequest{} } +func (m *QueryBtcCheckpointsInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointsInfoRequest) ProtoMessage() {} +func (*QueryBtcCheckpointsInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_009c1165ec392ace, []int{4} +} +func (m *QueryBtcCheckpointsInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBtcCheckpointsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBtcCheckpointsInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBtcCheckpointsInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointsInfoRequest.Merge(m, src) +} +func (m *QueryBtcCheckpointsInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBtcCheckpointsInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointsInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBtcCheckpointsInfoRequest proto.InternalMessageInfo + +func (m *QueryBtcCheckpointsInfoRequest) GetStartEpoch() uint64 { + if m != nil { + return m.StartEpoch + } + return 0 +} + +func (m *QueryBtcCheckpointsInfoRequest) GetEndEpoch() uint64 { + if m != nil { + return m.EndEpoch } return 0 } +func (m *QueryBtcCheckpointsInfoRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryBtcCheckpointsInfoResponse is response type for the Query/BtcCheckpointsInfo RPC method +type QueryBtcCheckpointsInfoResponse struct { + InfoList []*BTCCheckpointInfo `protobuf:"bytes,1,rep,name=info_list,json=infoList,proto3" json:"info_list,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBtcCheckpointsInfoResponse) Reset() { *m = QueryBtcCheckpointsInfoResponse{} } +func (m *QueryBtcCheckpointsInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBtcCheckpointsInfoResponse) ProtoMessage() {} +func (*QueryBtcCheckpointsInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_009c1165ec392ace, []int{5} +} +func (m *QueryBtcCheckpointsInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBtcCheckpointsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBtcCheckpointsInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBtcCheckpointsInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBtcCheckpointsInfoResponse.Merge(m, src) +} +func (m *QueryBtcCheckpointsInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBtcCheckpointsInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBtcCheckpointsInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBtcCheckpointsInfoResponse proto.InternalMessageInfo + +func (m *QueryBtcCheckpointsInfoResponse) GetInfoList() []*BTCCheckpointInfo { + if m != nil { + return m.InfoList + } + return nil +} + +func (m *QueryBtcCheckpointsInfoResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + type QueryEpochSubmissionsRequest struct { // Number of epoch for which submissions are requested EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` @@ -214,7 +329,7 @@ func (m *QueryEpochSubmissionsRequest) Reset() { *m = QueryEpochSubmissi func (m *QueryEpochSubmissionsRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochSubmissionsRequest) ProtoMessage() {} func (*QueryEpochSubmissionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_009c1165ec392ace, []int{4} + return fileDescriptor_009c1165ec392ace, []int{6} } func (m *QueryEpochSubmissionsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -267,7 +382,7 @@ func (m *QueryEpochSubmissionsResponse) Reset() { *m = QueryEpochSubmiss func (m *QueryEpochSubmissionsResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochSubmissionsResponse) ProtoMessage() {} func (*QueryEpochSubmissionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_009c1165ec392ace, []int{5} + return fileDescriptor_009c1165ec392ace, []int{7} } func (m *QueryEpochSubmissionsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -313,8 +428,10 @@ func (m *QueryEpochSubmissionsResponse) GetPagination() *query.PageResponse { func init() { proto.RegisterType((*QueryParamsRequest)(nil), "babylon.btccheckpoint.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.btccheckpoint.v1.QueryParamsResponse") - proto.RegisterType((*QueryBtcCheckpointHeightRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightRequest") - proto.RegisterType((*QueryBtcCheckpointHeightResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointHeightResponse") + proto.RegisterType((*QueryBtcCheckpointInfoRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointInfoRequest") + proto.RegisterType((*QueryBtcCheckpointInfoResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointInfoResponse") + proto.RegisterType((*QueryBtcCheckpointsInfoRequest)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsInfoRequest") + proto.RegisterType((*QueryBtcCheckpointsInfoResponse)(nil), "babylon.btccheckpoint.v1.QueryBtcCheckpointsInfoResponse") proto.RegisterType((*QueryEpochSubmissionsRequest)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsRequest") proto.RegisterType((*QueryEpochSubmissionsResponse)(nil), "babylon.btccheckpoint.v1.QueryEpochSubmissionsResponse") } @@ -322,43 +439,48 @@ func init() { func init() { proto.RegisterFile("babylon/btccheckpoint/query.proto", fileDescriptor_009c1165ec392ace) } var fileDescriptor_009c1165ec392ace = []byte{ - // 571 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x8b, 0x13, 0x3f, - 0x1c, 0xc6, 0x9b, 0xfd, 0xf5, 0x57, 0x34, 0x7b, 0x91, 0xec, 0x22, 0xb5, 0xae, 0xb3, 0x75, 0x40, - 0x5b, 0xc5, 0x4d, 0x68, 0x17, 0x95, 0x2a, 0xec, 0xa1, 0xe2, 0x1f, 0x10, 0xd6, 0xb5, 0xe2, 0x45, - 0x90, 0x92, 0x84, 0x30, 0x1d, 0xda, 0x99, 0xcc, 0x36, 0x99, 0x62, 0x11, 0x2f, 0xfa, 0x02, 0x14, - 0xbc, 0xf8, 0x06, 0x7c, 0x15, 0x82, 0xe7, 0x3d, 0x2e, 0x78, 0xf1, 0x24, 0xd2, 0xfa, 0x42, 0x64, - 0x32, 0x69, 0xbb, 0x5d, 0x3b, 0xb4, 0x7a, 0x2b, 0xc9, 0xf3, 0x7c, 0x9f, 0x4f, 0xf3, 0x7c, 0x5b, - 0x78, 0x99, 0x51, 0x36, 0xec, 0xc9, 0x90, 0x30, 0xcd, 0x79, 0x47, 0xf0, 0x6e, 0x24, 0xfd, 0x50, - 0x93, 0xc3, 0x58, 0xf4, 0x87, 0x38, 0xea, 0x4b, 0x2d, 0x51, 0xd1, 0x4a, 0xf0, 0x9c, 0x04, 0x0f, - 0x6a, 0xa5, 0x4d, 0x4f, 0x7a, 0xd2, 0x88, 0x48, 0xf2, 0x29, 0xd5, 0x97, 0xb6, 0x3c, 0x29, 0xbd, - 0x9e, 0x20, 0x34, 0xf2, 0x09, 0x0d, 0x43, 0xa9, 0xa9, 0xf6, 0x65, 0xa8, 0xec, 0xed, 0x75, 0x2e, - 0x55, 0x20, 0x15, 0x61, 0x54, 0x89, 0x34, 0x86, 0x0c, 0x6a, 0x4c, 0x68, 0x5a, 0x23, 0x11, 0xf5, - 0xfc, 0xd0, 0x88, 0xad, 0xd6, 0x5d, 0x0c, 0x17, 0xd1, 0x3e, 0x0d, 0x26, 0xf3, 0xae, 0x2d, 0xd6, - 0xcc, 0xb3, 0x1a, 0xa9, 0xbb, 0x09, 0xd1, 0xd3, 0x24, 0xf0, 0xc0, 0xf8, 0x5b, 0xe2, 0x30, 0x16, - 0x4a, 0xbb, 0xcf, 0xe1, 0xc6, 0xdc, 0xa9, 0x8a, 0x64, 0xa8, 0x04, 0xda, 0x83, 0x85, 0x34, 0xa7, - 0x08, 0xca, 0xa0, 0xba, 0x5e, 0x2f, 0xe3, 0xac, 0x67, 0xc0, 0xa9, 0xb3, 0x99, 0x3f, 0xfa, 0xb1, - 0x9d, 0x6b, 0x59, 0x97, 0xbb, 0x07, 0xb7, 0xcd, 0xd8, 0xa6, 0xe6, 0xf7, 0xa6, 0xea, 0x47, 0xc2, - 0xf7, 0x3a, 0xda, 0x26, 0xa3, 0x8b, 0xf0, 0xac, 0x88, 0x24, 0xef, 0xb4, 0xc3, 0x38, 0x30, 0x29, - 0xf9, 0xd6, 0x19, 0x73, 0xb0, 0x1f, 0x07, 0xee, 0x4b, 0x58, 0xce, 0xf6, 0x5b, 0xc6, 0x06, 0xbc, - 0x20, 0x68, 0xbf, 0xe7, 0x0b, 0xa5, 0xdb, 0x4c, 0xf3, 0x36, 0xeb, 0x49, 0xde, 0x4d, 0xa6, 0x31, - 0xd1, 0xb7, 0x03, 0xcf, 0x4f, 0x04, 0x4d, 0xcd, 0x9b, 0xc9, 0xf5, 0xbe, 0xb9, 0x75, 0xdf, 0x01, - 0xb8, 0x65, 0xe6, 0xdf, 0x4f, 0x02, 0x9f, 0xc5, 0x2c, 0xf0, 0x95, 0x4a, 0x6a, 0x5a, 0x05, 0x0e, - 0x3d, 0x80, 0x70, 0x56, 0x56, 0x71, 0xcd, 0x3c, 0xd0, 0x55, 0x9c, 0x36, 0x8b, 0x93, 0x66, 0x71, - 0xba, 0x40, 0xb6, 0x59, 0x7c, 0x40, 0x3d, 0x61, 0x07, 0xb7, 0x4e, 0x38, 0xdd, 0xcf, 0x00, 0x5e, - 0xca, 0xa0, 0xb0, 0x5f, 0xf1, 0x2e, 0xcc, 0x77, 0xc5, 0x30, 0x29, 0xe1, 0xbf, 0xea, 0x7a, 0xbd, - 0x92, 0x5d, 0xc2, 0xcc, 0xfc, 0x58, 0x0c, 0x5b, 0xc6, 0x84, 0x1e, 0x2e, 0xc0, 0xac, 0x2c, 0xc5, - 0x4c, 0x93, 0x4f, 0x72, 0xd6, 0x3f, 0xe5, 0xe1, 0xff, 0x86, 0x13, 0xbd, 0x07, 0xb0, 0x90, 0xf6, - 0x8d, 0x6e, 0x64, 0xc3, 0xfc, 0xb9, 0x66, 0xa5, 0x9d, 0x15, 0xd5, 0x69, 0xba, 0x5b, 0x7d, 0xfb, - 0xed, 0xd7, 0xc7, 0x35, 0x17, 0x95, 0xc9, 0xe2, 0xfd, 0x1e, 0xd4, 0xec, 0xcf, 0x00, 0x7d, 0x01, - 0x70, 0x63, 0xc1, 0x92, 0xa0, 0xc6, 0x92, 0xc0, 0xec, 0xc5, 0x2c, 0xdd, 0xf9, 0x17, 0xab, 0x05, - 0xdf, 0x31, 0xe0, 0x15, 0x74, 0x25, 0x1b, 0xfc, 0xf5, 0x74, 0xb1, 0xde, 0xa0, 0xaf, 0x00, 0x9e, - 0x3b, 0x5d, 0x3e, 0xba, 0xb5, 0x24, 0x3f, 0x63, 0x67, 0x4b, 0xb7, 0xff, 0xda, 0x67, 0xa1, 0x1b, - 0x06, 0x7a, 0x17, 0xd5, 0x56, 0x82, 0x26, 0x6a, 0x36, 0xa2, 0xf9, 0xe4, 0x68, 0xe4, 0x80, 0xe3, - 0x91, 0x03, 0x7e, 0x8e, 0x1c, 0xf0, 0x61, 0xec, 0xe4, 0x8e, 0xc7, 0x4e, 0xee, 0xfb, 0xd8, 0xc9, - 0xbd, 0xb8, 0xe9, 0xf9, 0xba, 0x13, 0x33, 0xcc, 0x65, 0x30, 0x19, 0xcb, 0x3b, 0xd4, 0x0f, 0xa7, - 0x19, 0xaf, 0x4e, 0xa5, 0xe8, 0x61, 0x24, 0x14, 0x2b, 0x98, 0x3f, 0xab, 0xdd, 0xdf, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x35, 0xfb, 0xc6, 0x34, 0x9a, 0x05, 0x00, 0x00, + // 653 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x4d, 0x6f, 0xd3, 0x30, + 0x18, 0xc7, 0xeb, 0xae, 0x9b, 0x36, 0xef, 0x02, 0x66, 0x87, 0x92, 0x8d, 0xac, 0x8b, 0x04, 0x1d, + 0x2f, 0x4b, 0xd4, 0x4d, 0x30, 0x26, 0x10, 0x48, 0x9d, 0x78, 0x13, 0x08, 0x46, 0x81, 0x0b, 0x97, + 0xca, 0xc9, 0xbc, 0x34, 0x5a, 0x63, 0x67, 0xb5, 0x53, 0x51, 0x21, 0x2e, 0xf0, 0x01, 0x40, 0xe2, + 0x33, 0xc0, 0x89, 0x23, 0x5c, 0x91, 0xb8, 0xed, 0x38, 0x89, 0x0b, 0x27, 0x84, 0x5a, 0x3e, 0x08, + 0x8a, 0xe3, 0xf5, 0x3d, 0x6a, 0x3b, 0x71, 0xab, 0x9c, 0xe7, 0xef, 0xff, 0xcf, 0x7f, 0x3f, 0x8f, + 0x0b, 0x57, 0x6c, 0x6c, 0x37, 0xaa, 0x8c, 0x5a, 0xb6, 0x70, 0x9c, 0x0a, 0x71, 0xf6, 0x03, 0xe6, + 0x51, 0x61, 0x1d, 0x84, 0xa4, 0xd6, 0x30, 0x83, 0x1a, 0x13, 0x0c, 0x65, 0x55, 0x89, 0xd9, 0x53, + 0x62, 0xd6, 0x0b, 0xda, 0x82, 0xcb, 0x5c, 0x26, 0x8b, 0xac, 0xe8, 0x57, 0x5c, 0xaf, 0x2d, 0xb9, + 0x8c, 0xb9, 0x55, 0x62, 0xe1, 0xc0, 0xb3, 0x30, 0xa5, 0x4c, 0x60, 0xe1, 0x31, 0xca, 0xd5, 0xd7, + 0x4b, 0x0e, 0xe3, 0x3e, 0xe3, 0x96, 0x8d, 0x39, 0x89, 0x6d, 0xac, 0x7a, 0xc1, 0x26, 0x02, 0x17, + 0xac, 0x00, 0xbb, 0x1e, 0x95, 0xc5, 0xaa, 0xd6, 0x18, 0x0e, 0x17, 0xe0, 0x1a, 0xf6, 0x8f, 0xf7, + 0xbb, 0x38, 0xbc, 0xa6, 0x97, 0x55, 0x96, 0x1a, 0x0b, 0x10, 0x3d, 0x8d, 0x0c, 0x77, 0xa4, 0xbe, + 0x44, 0x0e, 0x42, 0xc2, 0x85, 0xf1, 0x02, 0x9e, 0xe9, 0x59, 0xe5, 0x01, 0xa3, 0x9c, 0xa0, 0x5b, + 0x70, 0x26, 0xf6, 0xc9, 0x82, 0x1c, 0x58, 0x9d, 0x5f, 0xcf, 0x99, 0x49, 0x31, 0x98, 0xb1, 0xb2, + 0x98, 0x39, 0xfc, 0xbd, 0x9c, 0x2a, 0x29, 0x95, 0x71, 0x13, 0x9e, 0x93, 0xdb, 0x16, 0x85, 0xb3, + 0xdd, 0xae, 0x7e, 0x40, 0xf7, 0x98, 0xf2, 0x45, 0x8b, 0x70, 0x8e, 0x04, 0xcc, 0xa9, 0x94, 0x69, + 0xe8, 0x4b, 0x8f, 0x4c, 0x69, 0x56, 0x2e, 0x3c, 0x0e, 0x7d, 0x03, 0x43, 0x3d, 0x49, 0xad, 0xf8, + 0x6e, 0xc3, 0x8c, 0x47, 0xf7, 0x98, 0xa2, 0xbb, 0x9c, 0x4c, 0x57, 0x7c, 0xbe, 0xdd, 0xb7, 0x85, + 0x14, 0x1a, 0x9f, 0xc1, 0x30, 0x0f, 0xde, 0x8d, 0xb8, 0x0c, 0xe7, 0xb9, 0xc0, 0x35, 0x51, 0x96, + 0x5c, 0x0a, 0x12, 0xca, 0xa5, 0x3b, 0xd1, 0x8a, 0x3c, 0x03, 0xdd, 0x55, 0x9f, 0xd3, 0xea, 0x0c, + 0x74, 0x37, 0xfe, 0x78, 0x17, 0xc2, 0xce, 0x8d, 0x66, 0xa7, 0x24, 0xe7, 0x05, 0x33, 0xbe, 0x7e, + 0x33, 0xba, 0x7e, 0x33, 0xee, 0x32, 0x75, 0xfd, 0xe6, 0x0e, 0x76, 0x89, 0x72, 0x2e, 0x75, 0x29, + 0x8d, 0xaf, 0x00, 0x2e, 0x27, 0x82, 0xaa, 0x34, 0xee, 0xc3, 0xb9, 0xe8, 0x50, 0xe5, 0xaa, 0xc7, + 0x45, 0x16, 0xe4, 0xa6, 0x26, 0x8d, 0x64, 0x36, 0x52, 0x3f, 0xf2, 0xb8, 0x40, 0xf7, 0x7a, 0xa8, + 0xd3, 0x92, 0x3a, 0x3f, 0x92, 0x3a, 0xc6, 0xe8, 0xc1, 0x7e, 0x07, 0xe0, 0x92, 0xc4, 0x96, 0x69, + 0x3c, 0x0b, 0x6d, 0xdf, 0xe3, 0x3c, 0x1a, 0x84, 0x71, 0x1a, 0xa0, 0x2f, 0xbc, 0xf4, 0x89, 0xc3, + 0xfb, 0x04, 0x54, 0x1f, 0x0e, 0x52, 0xa8, 0xe8, 0x6e, 0xc0, 0xcc, 0x3e, 0x69, 0x70, 0x95, 0x5a, + 0x3e, 0x39, 0xb5, 0x8e, 0xf8, 0x21, 0x69, 0x94, 0xa4, 0xe8, 0xbf, 0xa5, 0xb5, 0xfe, 0x63, 0x1a, + 0x4e, 0x4b, 0x4e, 0xf4, 0x1e, 0xc0, 0x99, 0x78, 0xa2, 0xd0, 0x95, 0x64, 0x98, 0xc1, 0x41, 0xd6, + 0xd6, 0xc6, 0xac, 0x8e, 0xdd, 0x8d, 0xd5, 0xb7, 0x3f, 0xff, 0x7e, 0x4c, 0x1b, 0x28, 0x67, 0x0d, + 0x7f, 0x41, 0xea, 0x05, 0xf5, 0xd0, 0xa0, 0x6f, 0x00, 0x9e, 0x1e, 0x18, 0x44, 0xb4, 0x39, 0xc2, + 0x2e, 0x69, 0xf0, 0xb5, 0xeb, 0x93, 0x0b, 0x15, 0xf2, 0x9a, 0x44, 0xce, 0xa3, 0xf3, 0xc9, 0xc8, + 0xaf, 0xdb, 0x2d, 0xf5, 0x06, 0x7d, 0x01, 0x10, 0x0d, 0xce, 0x0c, 0x9a, 0xc8, 0xbf, 0xfb, 0x3d, + 0xd0, 0xb6, 0x4e, 0xa0, 0x54, 0xe8, 0x2b, 0x12, 0x7d, 0x11, 0x9d, 0x4d, 0x44, 0x47, 0xdf, 0x01, + 0x3c, 0xd5, 0xdf, 0xa5, 0xe8, 0xda, 0x08, 0xcb, 0x84, 0xe1, 0xd2, 0x36, 0x27, 0xd6, 0x29, 0xd0, + 0x2d, 0x09, 0xba, 0x81, 0x0a, 0x63, 0x65, 0x6c, 0xf1, 0xce, 0x16, 0xc5, 0x27, 0x87, 0x4d, 0x1d, + 0x1c, 0x35, 0x75, 0xf0, 0xa7, 0xa9, 0x83, 0x0f, 0x2d, 0x3d, 0x75, 0xd4, 0xd2, 0x53, 0xbf, 0x5a, + 0x7a, 0xea, 0xe5, 0x55, 0xd7, 0x13, 0x95, 0xd0, 0x36, 0x1d, 0xe6, 0x1f, 0x6f, 0xeb, 0x54, 0xb0, + 0x47, 0xdb, 0x1e, 0xaf, 0xfa, 0x5c, 0x44, 0x23, 0x20, 0xdc, 0x9e, 0x91, 0xff, 0x5b, 0x1b, 0xff, + 0x02, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x86, 0x0a, 0xe9, 0xa5, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -375,8 +497,10 @@ const _ = grpc.SupportPackageIsVersion4 type QueryClient interface { // Parameters queries the parameters of the module. Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) - // BtcCheckpointHeight returns earliest block height for given rawcheckpoint - BtcCheckpointHeight(ctx context.Context, in *QueryBtcCheckpointHeightRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightResponse, error) + // BtcCheckpointInfo returns checkpoint info for a given epoch + BtcCheckpointInfo(ctx context.Context, in *QueryBtcCheckpointInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointInfoResponse, error) + // BtcCheckpointsInfo returns checkpoint info for a range of epochs + BtcCheckpointsInfo(ctx context.Context, in *QueryBtcCheckpointsInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsInfoResponse, error) EpochSubmissions(ctx context.Context, in *QueryEpochSubmissionsRequest, opts ...grpc.CallOption) (*QueryEpochSubmissionsResponse, error) } @@ -397,9 +521,18 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts . return out, nil } -func (c *queryClient) BtcCheckpointHeight(ctx context.Context, in *QueryBtcCheckpointHeightRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointHeightResponse, error) { - out := new(QueryBtcCheckpointHeightResponse) - err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeight", in, out, opts...) +func (c *queryClient) BtcCheckpointInfo(ctx context.Context, in *QueryBtcCheckpointInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointInfoResponse, error) { + out := new(QueryBtcCheckpointInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) BtcCheckpointsInfo(ctx context.Context, in *QueryBtcCheckpointsInfoRequest, opts ...grpc.CallOption) (*QueryBtcCheckpointsInfoResponse, error) { + out := new(QueryBtcCheckpointsInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.btccheckpoint.v1.Query/BtcCheckpointsInfo", in, out, opts...) if err != nil { return nil, err } @@ -419,8 +552,10 @@ func (c *queryClient) EpochSubmissions(ctx context.Context, in *QueryEpochSubmis type QueryServer interface { // Parameters queries the parameters of the module. Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) - // BtcCheckpointHeight returns earliest block height for given rawcheckpoint - BtcCheckpointHeight(context.Context, *QueryBtcCheckpointHeightRequest) (*QueryBtcCheckpointHeightResponse, error) + // BtcCheckpointInfo returns checkpoint info for a given epoch + BtcCheckpointInfo(context.Context, *QueryBtcCheckpointInfoRequest) (*QueryBtcCheckpointInfoResponse, error) + // BtcCheckpointsInfo returns checkpoint info for a range of epochs + BtcCheckpointsInfo(context.Context, *QueryBtcCheckpointsInfoRequest) (*QueryBtcCheckpointsInfoResponse, error) EpochSubmissions(context.Context, *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) } @@ -431,8 +566,11 @@ type UnimplementedQueryServer struct { func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") } -func (*UnimplementedQueryServer) BtcCheckpointHeight(ctx context.Context, req *QueryBtcCheckpointHeightRequest) (*QueryBtcCheckpointHeightResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointHeight not implemented") +func (*UnimplementedQueryServer) BtcCheckpointInfo(ctx context.Context, req *QueryBtcCheckpointInfoRequest) (*QueryBtcCheckpointInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointInfo not implemented") +} +func (*UnimplementedQueryServer) BtcCheckpointsInfo(ctx context.Context, req *QueryBtcCheckpointsInfoRequest) (*QueryBtcCheckpointsInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BtcCheckpointsInfo not implemented") } func (*UnimplementedQueryServer) EpochSubmissions(ctx context.Context, req *QueryEpochSubmissionsRequest) (*QueryEpochSubmissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EpochSubmissions not implemented") @@ -460,20 +598,38 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _Query_BtcCheckpointHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBtcCheckpointHeightRequest) +func _Query_BtcCheckpointInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBtcCheckpointInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).BtcCheckpointHeight(ctx, in) + return srv.(QueryServer).BtcCheckpointInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointHeight", + FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).BtcCheckpointHeight(ctx, req.(*QueryBtcCheckpointHeightRequest)) + return srv.(QueryServer).BtcCheckpointInfo(ctx, req.(*QueryBtcCheckpointInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_BtcCheckpointsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBtcCheckpointsInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).BtcCheckpointsInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.btccheckpoint.v1.Query/BtcCheckpointsInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).BtcCheckpointsInfo(ctx, req.(*QueryBtcCheckpointsInfoRequest)) } return interceptor(ctx, in, info, handler) } @@ -505,8 +661,12 @@ var _Query_serviceDesc = grpc.ServiceDesc{ Handler: _Query_Params_Handler, }, { - MethodName: "BtcCheckpointHeight", - Handler: _Query_BtcCheckpointHeight_Handler, + MethodName: "BtcCheckpointInfo", + Handler: _Query_BtcCheckpointInfo_Handler, + }, + { + MethodName: "BtcCheckpointsInfo", + Handler: _Query_BtcCheckpointsInfo_Handler, }, { MethodName: "EpochSubmissions", @@ -573,7 +733,7 @@ func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointHeightRequest) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -583,12 +743,12 @@ func (m *QueryBtcCheckpointHeightRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryBtcCheckpointHeightRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -601,7 +761,7 @@ func (m *QueryBtcCheckpointHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *QueryBtcCheckpointHeightResponse) Marshal() (dAtA []byte, err error) { +func (m *QueryBtcCheckpointInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -611,24 +771,125 @@ func (m *QueryBtcCheckpointHeightResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryBtcCheckpointHeightResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryBtcCheckpointHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *QueryBtcCheckpointInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.EarliestBtcBlockNumber != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.EarliestBtcBlockNumber)) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryBtcCheckpointsInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBtcCheckpointsInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBtcCheckpointsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EndEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EndEpoch)) + i-- + dAtA[i] = 0x10 + } + if m.StartEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.StartEpoch)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } +func (m *QueryBtcCheckpointsInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBtcCheckpointsInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBtcCheckpointsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.InfoList) > 0 { + for iNdEx := len(m.InfoList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InfoList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *QueryEpochSubmissionsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -749,7 +1010,7 @@ func (m *QueryParamsResponse) Size() (n int) { return n } -func (m *QueryBtcCheckpointHeightRequest) Size() (n int) { +func (m *QueryBtcCheckpointInfoRequest) Size() (n int) { if m == nil { return 0 } @@ -761,14 +1022,53 @@ func (m *QueryBtcCheckpointHeightRequest) Size() (n int) { return n } -func (m *QueryBtcCheckpointHeightResponse) Size() (n int) { +func (m *QueryBtcCheckpointInfoResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.EarliestBtcBlockNumber != 0 { - n += 1 + sovQuery(uint64(m.EarliestBtcBlockNumber)) + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBtcCheckpointsInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartEpoch != 0 { + n += 1 + sovQuery(uint64(m.StartEpoch)) + } + if m.EndEpoch != 0 { + n += 1 + sovQuery(uint64(m.EndEpoch)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBtcCheckpointsInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.InfoList) > 0 { + for _, e := range m.InfoList { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) } return n } @@ -947,7 +1247,7 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointHeightRequest) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -970,10 +1270,10 @@ func (m *QueryBtcCheckpointHeightRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1016,7 +1316,7 @@ func (m *QueryBtcCheckpointHeightRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryBtcCheckpointHeightResponse) Unmarshal(dAtA []byte) error { +func (m *QueryBtcCheckpointInfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1039,17 +1339,227 @@ func (m *QueryBtcCheckpointHeightResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryBtcCheckpointInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBtcCheckpointHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryBtcCheckpointInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &BTCCheckpointInfo{} + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBtcCheckpointsInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBtcCheckpointsInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBtcCheckpointsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EarliestBtcBlockNumber", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartEpoch", wireType) + } + m.StartEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndEpoch", wireType) + } + m.EndEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBtcCheckpointsInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBtcCheckpointsInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBtcCheckpointsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InfoList", wireType) } - m.EarliestBtcBlockNumber = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -1059,11 +1569,62 @@ func (m *QueryBtcCheckpointHeightResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EarliestBtcBlockNumber |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InfoList = append(m.InfoList, &BTCCheckpointInfo{}) + if err := m.InfoList[len(m.InfoList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) diff --git a/x/btccheckpoint/types/query.pb.gw.go b/x/btccheckpoint/types/query.pb.gw.go index 7e4a5dd86..06ee42575 100644 --- a/x/btccheckpoint/types/query.pb.gw.go +++ b/x/btccheckpoint/types/query.pb.gw.go @@ -51,8 +51,8 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal } -func request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointHeightRequest +func request_Query_BtcCheckpointInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointInfoRequest var metadata runtime.ServerMetadata var ( @@ -73,13 +73,13 @@ func request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler runtime. return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := client.BtcCheckpointHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.BtcCheckpointInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBtcCheckpointHeightRequest +func local_request_Query_BtcCheckpointInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointInfoRequest var metadata runtime.ServerMetadata var ( @@ -100,7 +100,43 @@ func local_request_Query_BtcCheckpointHeight_0(ctx context.Context, marshaler ru return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) } - msg, err := server.BtcCheckpointHeight(ctx, &protoReq) + msg, err := server.BtcCheckpointInfo(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_BtcCheckpointsInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_BtcCheckpointsInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointsInfoRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BtcCheckpointsInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_BtcCheckpointsInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBtcCheckpointsInfoRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_BtcCheckpointsInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BtcCheckpointsInfo(ctx, &protoReq) return msg, metadata, err } @@ -206,7 +242,30 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_BtcCheckpointHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_BtcCheckpointInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_BtcCheckpointInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_BtcCheckpointsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -217,7 +276,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_BtcCheckpointHeight_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_BtcCheckpointsInfo_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -225,7 +284,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_BtcCheckpointHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -313,7 +372,7 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_BtcCheckpointHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BtcCheckpointInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -322,14 +381,34 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_BtcCheckpointHeight_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_BtcCheckpointInfo_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_BtcCheckpointHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BtcCheckpointInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_BtcCheckpointsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_BtcCheckpointsInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_BtcCheckpointsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -359,7 +438,9 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie var ( pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "btccheckpoint", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_BtcCheckpointHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "btccheckpoint", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_BtcCheckpointInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"babylon", "btccheckpoint", "v1", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_BtcCheckpointsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"babylon", "btccheckpoint", "v1"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_EpochSubmissions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"babylon", "btccheckpoint", "v1", "epoch_num", "submissions"}, "", runtime.AssumeColonVerbOpt(false))) ) @@ -367,7 +448,9 @@ var ( var ( forward_Query_Params_0 = runtime.ForwardResponseMessage - forward_Query_BtcCheckpointHeight_0 = runtime.ForwardResponseMessage + forward_Query_BtcCheckpointInfo_0 = runtime.ForwardResponseMessage + + forward_Query_BtcCheckpointsInfo_0 = runtime.ForwardResponseMessage forward_Query_EpochSubmissions_0 = runtime.ForwardResponseMessage ) diff --git a/x/btccheckpoint/types/types.go b/x/btccheckpoint/types/types.go index 75ce93c56..97dfc1211 100644 --- a/x/btccheckpoint/types/types.go +++ b/x/btccheckpoint/types/types.go @@ -1,9 +1,12 @@ package types import ( + "encoding/hex" "fmt" + "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/types" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -13,10 +16,10 @@ import ( // Modelling proofs as separate Proof1 and Proof2, as this is more explicit than // []*ParsedProof. type RawCheckpointSubmission struct { - Submitter sdk.AccAddress + Reporter sdk.AccAddress Proof1 ParsedProof Proof2 ParsedProof - checkpointData []byte + CheckpointData btctxformatter.RawBtcCheckpoint } // SubmissionBtcInfo encapsualte important information about submission posistion @@ -37,13 +40,13 @@ func NewRawCheckpointSubmission( a sdk.AccAddress, p1 ParsedProof, p2 ParsedProof, - checkpointData []byte, + checkpointData btctxformatter.RawBtcCheckpoint, ) RawCheckpointSubmission { r := RawCheckpointSubmission{ - Submitter: a, + Reporter: a, Proof1: p1, Proof2: p2, - checkpointData: checkpointData, + CheckpointData: checkpointData, } return r @@ -53,13 +56,6 @@ func (s *RawCheckpointSubmission) GetProofs() []*ParsedProof { return []*ParsedProof{&s.Proof1, &s.Proof2} } -func (s *RawCheckpointSubmission) GetRawCheckPointBytes() []byte { - checkpointDataCopy := make([]byte, len(s.checkpointData)) - // return copy, to avoid someone modifing original - copy(checkpointDataCopy, s.checkpointData) - return checkpointDataCopy -} - func (s *RawCheckpointSubmission) GetFirstBlockHash() types.BTCHeaderHashBytes { return s.Proof1.BlockHash } @@ -95,9 +91,12 @@ func (rsc *RawCheckpointSubmission) GetSubmissionKey() SubmissionKey { func (rsc *RawCheckpointSubmission) GetSubmissionData(epochNum uint64, txsInfo []*TransactionInfo) SubmissionData { return SubmissionData{ - Submitter: rsc.Submitter.Bytes(), - TxsInfo: txsInfo, - Epoch: epochNum, + VigilanteAddresses: &CheckpointAddresses{ + Reporter: rsc.Reporter.Bytes(), + Submitter: rsc.CheckpointData.SubmitterAddress, + }, + TxsInfo: txsInfo, + Epoch: epochNum, } } @@ -112,11 +111,10 @@ func (sk *SubmissionKey) GetKeyBlockHashes() []*types.BTCHeaderHashBytes { return hashes } -func NewEmptyEpochData(rawCheckpointBytes []byte) EpochData { +func NewEmptyEpochData() EpochData { return EpochData{ - Key: []*SubmissionKey{}, - Status: Submitted, - RawCheckpoint: rawCheckpointBytes, + Key: []*SubmissionKey{}, + Status: Submitted, } } @@ -172,3 +170,20 @@ func (ti *TransactionInfo) ValidateBasic() error { } return nil } + +func NewSpvProofFromHexBytes(c codec.Codec, proof string) (*BTCSpvProof, error) { + bytes, err := hex.DecodeString(proof) + + if err != nil { + return nil, err + } + + var p BTCSpvProof + err = c.Unmarshal(bytes, &p) + + if err != nil { + return nil, err + } + + return &p, nil +} diff --git a/x/btclightclient/client/cli/tx.go b/x/btclightclient/client/cli/tx.go index 2cc8d0561..043ef24ba 100644 --- a/x/btclightclient/client/cli/tx.go +++ b/x/btclightclient/client/cli/tx.go @@ -2,6 +2,7 @@ package cli import ( "fmt" + "github.com/babylonchain/babylon/x/btclightclient/types" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" diff --git a/x/btclightclient/keeper/keeper.go b/x/btclightclient/keeper/keeper.go index deba138d6..59792d21d 100644 --- a/x/btclightclient/keeper/keeper.go +++ b/x/btclightclient/keeper/keeper.go @@ -236,3 +236,7 @@ func (k Keeper) IsAncestor(ctx sdk.Context, parentHashBytes *bbn.BTCHeaderHashBy // Return whether the last element of the ancestry is equal to the parent return ancestry[len(ancestry)-1].Eq(parentHeader), nil } + +func (k Keeper) GetTipInfo(ctx sdk.Context) *types.BTCHeaderInfo { + return k.headersState(ctx).GetTip() +} diff --git a/x/checkpointing/abci.go b/x/checkpointing/abci.go index feba0e8d0..c7348f161 100644 --- a/x/checkpointing/abci.go +++ b/x/checkpointing/abci.go @@ -26,7 +26,7 @@ func BeginBlocker(ctx sdk.Context, k keeper.Keeper, req abci.RequestBeginBlock) if epoch.IsFirstBlock(ctx) { err := k.InitValidatorBLSSet(ctx) if err != nil { - panic(fmt.Errorf("failed to store validator BLS set")) + panic(fmt.Errorf("failed to store validator BLS set: %w", err)) } } if epoch.IsSecondBlock(ctx) { diff --git a/x/checkpointing/client/cli/tx.go b/x/checkpointing/client/cli/tx.go index d22211c75..d4986d8cb 100644 --- a/x/checkpointing/client/cli/tx.go +++ b/x/checkpointing/client/cli/tx.go @@ -2,6 +2,8 @@ package cli import ( "fmt" + "os" + "path/filepath" "strconv" "strings" @@ -105,9 +107,17 @@ before running the command (e.g., via babylond create-bls-key).`)) return tx.GenerateOrBroadcastTxWithFactory(clientCtx, txf, msg) } + // HACK: test cases need to setup the path where the priv validator BLS key is going to be set + // so we redefine the FlagHome here. Since we can't import `app` due to a cyclic dependency, + // we have to duplicate the definition here. + // If this changes, the `DefaultHomeDir` flag at `app/app.go` needs to change as well. + userHomeDir, err := os.UserHomeDir() + if err != nil { + panic(err) + } - cmd.Flags().String(flags.FlagHome, "", "The node home directory") - _ = cmd.MarkFlagRequired(flags.FlagHome) + defaultNodeHome := filepath.Join(userHomeDir, ".babylond") + cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The node home directory") return cmd } diff --git a/x/checkpointing/client/cli/tx_test.go b/x/checkpointing/client/cli/tx_test.go index 236634ff6..c7150d3cc 100644 --- a/x/checkpointing/client/cli/tx_test.go +++ b/x/checkpointing/client/cli/tx_test.go @@ -22,6 +22,11 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/staking/client/cli" + "github.com/babylonchain/babylon/app" + "github.com/babylonchain/babylon/app/params" + "github.com/babylonchain/babylon/privval" + testutilcli "github.com/babylonchain/babylon/testutil/cli" + checkpointcli "github.com/babylonchain/babylon/x/checkpointing/client/cli" abci "github.com/tendermint/tendermint/abci/types" tmconfig "github.com/tendermint/tendermint/config" tmbytes "github.com/tendermint/tendermint/libs/bytes" @@ -30,12 +35,6 @@ import ( rpcclientmock "github.com/tendermint/tendermint/rpc/client/mock" coretypes "github.com/tendermint/tendermint/rpc/core/types" tmtypes "github.com/tendermint/tendermint/types" - - "github.com/babylonchain/babylon/app" - "github.com/babylonchain/babylon/app/params" - "github.com/babylonchain/babylon/privval" - testutilcli "github.com/babylonchain/babylon/testutil/cli" - checkpointcli "github.com/babylonchain/babylon/x/checkpointing/client/cli" ) type mockTendermintRPC struct { diff --git a/x/checkpointing/keeper/bls_signer.go b/x/checkpointing/keeper/bls_signer.go index 5a73234db..56009ccad 100644 --- a/x/checkpointing/keeper/bls_signer.go +++ b/x/checkpointing/keeper/bls_signer.go @@ -2,9 +2,10 @@ package keeper import ( "fmt" - epochingtypes "github.com/babylonchain/babylon/x/epoching/types" "time" + epochingtypes "github.com/babylonchain/babylon/x/epoching/types" + "github.com/babylonchain/babylon/client/tx" "github.com/babylonchain/babylon/crypto/bls12381" "github.com/babylonchain/babylon/types/retry" diff --git a/x/checkpointing/keeper/grpc_query_checkpoint.go b/x/checkpointing/keeper/grpc_query_checkpoint.go index 108fb8bbc..120a03f6d 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint.go @@ -2,6 +2,7 @@ package keeper import ( "context" + "fmt" "github.com/babylonchain/babylon/x/checkpointing/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -23,16 +24,17 @@ func (k Keeper) RawCheckpointList(ctx context.Context, req *types.QueryRawCheckp store := k.CheckpointsState(sdkCtx).checkpoints pageRes, err := query.FilteredPaginate(store, req.Pagination, func(_ []byte, value []byte, accumulate bool) (bool, error) { - if accumulate { - ckptWithMeta, err := types.BytesToCkptWithMeta(k.cdc, value) - if err != nil { - return false, err - } - if ckptWithMeta.Status == req.Status { + ckptWithMeta, err := types.BytesToCkptWithMeta(k.cdc, value) + if err != nil { + return false, err + } + if ckptWithMeta.Status == req.Status { + if accumulate { checkpointList = append(checkpointList, ckptWithMeta) } + return true, nil } - return true, nil + return false, nil }) if err != nil { @@ -80,10 +82,9 @@ func (k Keeper) RecentEpochStatusCount(ctx context.Context, req *types.QueryRece } sdkCtx := sdk.UnwrapSDKContext(ctx) - // minus 1 is because the current epoch is not finished - tipEpoch := k.GetEpoch(sdkCtx).EpochNumber - 1 - if tipEpoch < 0 { //nolint:staticcheck // uint64 doesn't go below zero but we want to let people know that's an invalid request. - return nil, status.Error(codes.InvalidArgument, "invalid request") + tipEpoch, err := k.GetLastCheckpointedEpoch(sdkCtx) + if err != nil { + return nil, fmt.Errorf("failed to get the last checkpointed epoch") } targetEpoch := tipEpoch - req.EpochCount + 1 if targetEpoch < 0 { //nolint:staticcheck // uint64 doesn't go below zero @@ -109,10 +110,42 @@ func (k Keeper) RecentEpochStatusCount(ctx context.Context, req *types.QueryRece }, nil } -func (k Keeper) RecentRawCheckpointList(c context.Context, req *types.QueryRecentRawCheckpointListRequest) (*types.QueryRecentRawCheckpointListResponse, error) { - panic("TODO: implement this") +// LastCheckpointWithStatus returns the last checkpoint with the given status +// if the checkpoint with the given status does not exist, return the last +// checkpoint that is more mature than the given status +func (k Keeper) LastCheckpointWithStatus(ctx context.Context, req *types.QueryLastCheckpointWithStatusRequest) (*types.QueryLastCheckpointWithStatusResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + tipCheckpointedEpoch, err := k.GetLastCheckpointedEpoch(sdkCtx) + if err != nil { + return nil, fmt.Errorf("failed to get the last checkpointed epoch number: %w", err) + } + for e := int(tipCheckpointedEpoch); e >= 0; e-- { + ckpt, err := k.GetRawCheckpoint(sdkCtx, uint64(e)) + if err != nil { + return nil, fmt.Errorf("failed to get the raw checkpoint at epoch %v: %w", e, err) + } + if ckpt.Status == req.Status || ckpt.IsMoreMatureThanStatus(req.Status) { + return &types.QueryLastCheckpointWithStatusResponse{RawCheckpoint: ckpt.Ckpt}, nil + } + } + return nil, fmt.Errorf("cannot find checkpoint with status %v", req.Status) } -func (k Keeper) LatestCheckpoint(c context.Context, req *types.QueryLatestCheckpointRequest) (*types.QueryLatestCheckpointResponse, error) { - panic("TODO: implement this") +// GetLastCheckpointedEpoch returns the last epoch number that associates with a checkpoint +func (k Keeper) GetLastCheckpointedEpoch(ctx sdk.Context) (uint64, error) { + curEpoch := k.GetEpoch(ctx).EpochNumber + if curEpoch <= 0 { + return 0, fmt.Errorf("current epoch should be more than 0") + } + // minus 1 is because the current epoch is not ended + tipEpoch := curEpoch - 1 + _, err := k.GetRawCheckpoint(ctx, tipEpoch) + if err != nil { + return 0, fmt.Errorf("cannot get raw checkpoint at epoch %v", tipEpoch) + } + return tipEpoch, nil } diff --git a/x/checkpointing/keeper/grpc_query_checkpoint_test.go b/x/checkpointing/keeper/grpc_query_checkpoint_test.go index 00c55378a..6f6f08e54 100644 --- a/x/checkpointing/keeper/grpc_query_checkpoint_test.go +++ b/x/checkpointing/keeper/grpc_query_checkpoint_test.go @@ -1,6 +1,9 @@ package keeper_test import ( + "context" + "github.com/babylonchain/babylon/x/checkpointing/keeper" + "github.com/cosmos/cosmos-sdk/types/query" "math/rand" "testing" @@ -82,3 +85,106 @@ func FuzzQueryStatusCount(f *testing.F) { require.Equal(t, expectedResp, resp) }) } + +func FuzzQueryLastCheckpointWithStatus(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + // test querying recent epoch counts with each status in recent epochs + tipEpoch := datagen.RandomInt(100) + 10 + ctrl := gomock.NewController(t) + defer ctrl.Finish() + ek := mocks.NewMockEpochingKeeper(ctrl) + ek.EXPECT().GetEpoch(gomock.Any()).Return(&epochingtypes.Epoch{EpochNumber: tipEpoch}).AnyTimes() + ckptKeeper, ctx, _ := testkeeper.CheckpointingKeeper(t, ek, nil, client.Context{}) + checkpoints := datagen.GenSequenceRawCheckpointsWithMeta(tipEpoch) + finalizedEpoch := datagen.RandomInt(int(tipEpoch)) + for e := uint64(0); e < tipEpoch; e++ { + if e <= finalizedEpoch { + checkpoints[int(e)].Status = types.Finalized + } else { + checkpoints[int(e)].Status = types.Sealed + } + err := ckptKeeper.AddRawCheckpoint(ctx, checkpoints[int(e)]) + require.NoError(t, err) + } + // request the last finalized checkpoint + req := types.NewQueryLastCheckpointWithStatus(types.Finalized) + expectedResp := &types.QueryLastCheckpointWithStatusResponse{ + RawCheckpoint: checkpoints[int(finalizedEpoch)].Ckpt, + } + resp, err := ckptKeeper.LastCheckpointWithStatus(ctx, req) + require.NoError(t, err) + require.Equal(t, expectedResp, resp) + + // request the last confirmed checkpoint + req = types.NewQueryLastCheckpointWithStatus(types.Confirmed) + expectedResp = &types.QueryLastCheckpointWithStatusResponse{ + RawCheckpoint: checkpoints[int(finalizedEpoch)].Ckpt, + } + resp, err = ckptKeeper.LastCheckpointWithStatus(ctx, req) + require.NoError(t, err) + require.Equal(t, expectedResp, resp) + }) +} + +//func TestQueryRawCheckpointList(t *testing.T) { +func FuzzQueryRawCheckpointList(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + tipEpoch := datagen.RandomInt(10) + 10 + ctrl := gomock.NewController(t) + defer ctrl.Finish() + ek := mocks.NewMockEpochingKeeper(ctrl) + ek.EXPECT().GetEpoch(gomock.Any()).Return(&epochingtypes.Epoch{EpochNumber: tipEpoch}).AnyTimes() + ckptKeeper, ctx, _ := testkeeper.CheckpointingKeeper(t, ek, nil, client.Context{}) + checkpoints := datagen.GenSequenceRawCheckpointsWithMeta(tipEpoch) + finalizedEpoch := datagen.RandomInt(int(tipEpoch)) + + // add Sealed and Finalized checkpoints + for e := uint64(0); e <= tipEpoch; e++ { + if e <= finalizedEpoch { + checkpoints[int(e)].Status = types.Finalized + } else { + checkpoints[int(e)].Status = types.Sealed + } + err := ckptKeeper.AddRawCheckpoint(ctx, checkpoints[int(e)]) + require.NoError(t, err) + } + + finalizedCheckpoints := checkpoints[:finalizedEpoch+1] + testRawCheckpointListWithType(t, ckptKeeper, ctx, finalizedCheckpoints, 0, types.Finalized) + sealedCheckpoints := checkpoints[finalizedEpoch+1:] + testRawCheckpointListWithType(t, ckptKeeper, ctx, sealedCheckpoints, finalizedEpoch+1, types.Sealed) + }) +} + +func testRawCheckpointListWithType( + t *testing.T, + ckptKeeper *keeper.Keeper, + ctx context.Context, + checkpointList []*types.RawCheckpointWithMeta, + baseEpoch uint64, + status types.CheckpointStatus, +) { + limit := datagen.RandomInt(len(checkpointList)+1) + 1 + pagination := &query.PageRequest{Limit: limit, CountTotal: true} + req := types.NewQueryRawCheckpointListRequest(pagination, status) + + resp, err := ckptKeeper.RawCheckpointList(ctx, req) + require.NoError(t, err) + require.Equal(t, uint64(len(checkpointList)), resp.Pagination.Total) + for ckptsRetrieved := uint64(0); ckptsRetrieved < uint64(len(checkpointList)); ckptsRetrieved += limit { + resp, err := ckptKeeper.RawCheckpointList(ctx, req) + require.NoError(t, err) + for i, ckpt := range resp.RawCheckpoints { + require.Equal(t, baseEpoch+ckptsRetrieved+uint64(i), ckpt.Ckpt.EpochNum) + require.Equal(t, status, ckpt.Status) + } + pagination = &query.PageRequest{Key: resp.Pagination.NextKey, Limit: limit} + req = types.NewQueryRawCheckpointListRequest(pagination, status) + } +} diff --git a/x/checkpointing/keeper/hooks.go b/x/checkpointing/keeper/hooks.go index 730c0d176..fa29134d2 100644 --- a/x/checkpointing/keeper/hooks.go +++ b/x/checkpointing/keeper/hooks.go @@ -16,7 +16,7 @@ func (k Keeper) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) e return nil } -// AfterRawCheckpointConfirmed - call hook if registered +// AfterRawCheckpointConfirmed - call hook if the checkpoint is confirmed func (k Keeper) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { if k.hooks != nil { return k.hooks.AfterRawCheckpointConfirmed(ctx, epoch) @@ -24,10 +24,25 @@ func (k Keeper) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error return nil } -// AfterRawCheckpointFinalized - call hook if registered +func (k Keeper) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *types.RawCheckpoint) error { + if k.hooks != nil { + return k.hooks.AfterRawCheckpointForgotten(ctx, ckpt) + } + return nil +} + +// AfterRawCheckpointFinalized - call hook if the checkpoint is finalized func (k Keeper) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error { if k.hooks != nil { return k.hooks.AfterRawCheckpointFinalized(ctx, epoch) } return nil } + +// AfterRawCheckpointBlsSigVerified - call hook if the checkpoint's BLS sig is verified +func (k Keeper) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *types.RawCheckpoint) error { + if k.hooks != nil { + return k.hooks.AfterRawCheckpointBlsSigVerified(ctx, ckpt) + } + return nil +} diff --git a/x/checkpointing/keeper/keeper.go b/x/checkpointing/keeper/keeper.go index cc12155db..69c0e0647 100644 --- a/x/checkpointing/keeper/keeper.go +++ b/x/checkpointing/keeper/keeper.go @@ -3,6 +3,7 @@ package keeper import ( "errors" "fmt" + txformat "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/crypto/bls12381" "github.com/babylonchain/babylon/x/checkpointing/types" @@ -85,6 +86,11 @@ func (k Keeper) addBlsSig(ctx sdk.Context, sig *types.BlsSig) error { return nil } + if !sig.LastCommitHash.Equal(*ckptWithMeta.Ckpt.LastCommitHash) { + // processed BlsSig message is for invalid last commit hash + return types.ErrInvalidLastCommitHash + } + // get signer's address signerAddr, err := sdk.ValAddressFromBech32(sig.SignerAddress) if err != nil { @@ -109,27 +115,29 @@ func (k Keeper) addBlsSig(ctx sdk.Context, sig *types.BlsSig) error { } // accumulate BLS signatures - updated, err := ckptWithMeta.Accumulate( - vals, signerAddr, signerBlsKey, *sig.BlsSig, k.GetTotalVotingPower(ctx, sig.GetEpochNum())) - if err != nil { - return err - } - - if updated { - err = k.UpdateCheckpoint(ctx, ckptWithMeta) - } + err = ckptWithMeta.Accumulate(vals, signerAddr, signerBlsKey, *sig.BlsSig, k.GetTotalVotingPower(ctx, sig.GetEpochNum())) if err != nil { return err } - if updated && ckptWithMeta.Status == types.Sealed { + if ckptWithMeta.Status == types.Sealed { + // emit event err = ctx.EventManager().EmitTypedEvent( &types.EventCheckpointSealed{Checkpoint: ckptWithMeta}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint sealed event for epoch %v", ckptWithMeta.Ckpt.EpochNum) + k.Logger(ctx).Error("failed to emit checkpoint sealed event for epoch %v", ckptWithMeta.Ckpt.EpochNum) } - ctx.Logger().Info(fmt.Sprintf("Checkpointing: checkpoint for epoch %v is Sealed", ckptWithMeta.Ckpt.EpochNum)) + // record state update of Sealed + ckptWithMeta.RecordStateUpdate(ctx, types.Sealed) + // log in console + k.Logger(ctx).Info(fmt.Sprintf("Checkpointing: checkpoint for epoch %v is Sealed", ckptWithMeta.Ckpt.EpochNum)) + } + + // if reaching this line, it means ckptWithMeta is updated, + // and we need to write the updated ckptWithMeta back to KVStore + if err := k.UpdateCheckpoint(ctx, ckptWithMeta); err != nil { + return err } return nil @@ -154,29 +162,29 @@ func (k Keeper) AddRawCheckpoint(ctx sdk.Context, ckptWithMeta *types.RawCheckpo func (k Keeper) BuildRawCheckpoint(ctx sdk.Context, epochNum uint64, lch types.LastCommitHash) (*types.RawCheckpointWithMeta, error) { ckptWithMeta := types.NewCheckpointWithMeta(types.NewCheckpoint(epochNum, lch), types.Accumulating) + ckptWithMeta.RecordStateUpdate(ctx, types.Accumulating) // record the state update of Accumulating err := k.AddRawCheckpoint(ctx, ckptWithMeta) if err != nil { return nil, err } - ctx.Logger().Info(fmt.Sprintf("Checkpointing: a new raw checkpoint is built for epoch %v", epochNum)) + k.Logger(ctx).Info(fmt.Sprintf("Checkpointing: a new raw checkpoint is built for epoch %v", epochNum)) return ckptWithMeta, nil } -// CheckpointEpoch verifies checkpoint from BTC and returns epoch number if -// it equals to the existing raw checkpoint. Otherwise, it further verifies +// VerifyCheckpoint verifies checkpoint from BTC. It verifies // the raw checkpoint and decides whether it is an invalid checkpoint or a // conflicting checkpoint. A conflicting checkpoint indicates the existence // of a fork -func (k Keeper) CheckpointEpoch(ctx sdk.Context, btcCkptBytes []byte) (uint64, error) { - ckptWithMeta, err := k.verifyCkptBytes(ctx, btcCkptBytes) +func (k Keeper) VerifyCheckpoint(ctx sdk.Context, checkpoint txformat.RawBtcCheckpoint) error { + _, err := k.verifyCkptBytes(ctx, &checkpoint) if err != nil { if errors.Is(err, types.ErrConflictingCheckpoint) { panic(err) } - return 0, err + return err } - return ckptWithMeta.Ckpt.EpochNum, nil + return nil } // verifyCkptBytes verifies checkpoint from BTC. A checkpoint is valid if @@ -184,10 +192,10 @@ func (k Keeper) CheckpointEpoch(ctx sdk.Context, btcCkptBytes []byte) (uint64, e // the raw checkpoint and decides whether it is an invalid checkpoint or a // conflicting checkpoint. A conflicting checkpoint indicates the existence // of a fork -func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.RawCheckpointWithMeta, error) { - ckpt, err := types.FromBTCCkptBytesToRawCkpt(btcCkptBytes) +func (k Keeper) verifyCkptBytes(ctx sdk.Context, rawCheckpoint *txformat.RawBtcCheckpoint) (*types.RawCheckpointWithMeta, error) { + ckpt, err := types.FromBTCCkptToRawCkpt(rawCheckpoint) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to decode raw checkpoint from BTC raw checkpoint: %w", err) } // sanity check err = ckpt.ValidateBasic() @@ -196,11 +204,16 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.Ra } ckptWithMeta, err := k.GetRawCheckpoint(ctx, ckpt.EpochNum) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to fetch the raw checkpoint at epoch %d from database: %w", ckpt.EpochNum, err) } // can skip the checks if it is identical with the local checkpoint that is not accumulating if ckptWithMeta.Ckpt.Equal(ckpt) && ckptWithMeta.Status != types.Accumulating { + // record verified checkpoint + err = k.AfterRawCheckpointBlsSigVerified(ctx, ckpt) + if err != nil { + return nil, fmt.Errorf("failed to record verified checkpoint of epoch %d for monitoring: %w", ckpt.EpochNum, err) + } return ckptWithMeta, nil } @@ -209,7 +222,7 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.Ra totalPower := k.GetTotalVotingPower(ctx, ckpt.EpochNum) signerSet, err := k.GetValidatorSet(ctx, ckpt.EpochNum).FindSubset(ckpt.Bitmap) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get the signer set via bitmap of epoch %d: %w", ckpt.EpochNum, err) } var sum int64 signersPubKeys := make([]bls12381.PublicKey, len(signerSet)) @@ -232,6 +245,12 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.Ra return nil, types.ErrInvalidRawCheckpoint.Wrap("invalid BLS multi-sig") } + // record verified checkpoint + err = k.AfterRawCheckpointBlsSigVerified(ctx, ckpt) + if err != nil { + return nil, fmt.Errorf("failed to record verified checkpoint of epoch %d for monitoring: %w", ckpt.EpochNum, err) + } + // now the checkpoint's multi-sig is valid, if the lastcommithash is the // same with that of the local checkpoint, it means it is valid except that // it is signed by a different signer set @@ -240,7 +259,7 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.Ra } // multi-sig is valid but the quorum is on a different branch, meaning conflicting is observed - ctx.Logger().Error(types.ErrConflictingCheckpoint.Wrapf("epoch %v", ckpt.EpochNum).Error()) + k.Logger(ctx).Error(types.ErrConflictingCheckpoint.Wrapf("epoch %v", ckpt.EpochNum).Error()) // report conflicting checkpoint event err = ctx.EventManager().EmitTypedEvent( &types.EventConflictingCheckpoint{ @@ -255,57 +274,68 @@ func (k Keeper) verifyCkptBytes(ctx sdk.Context, btcCkptBytes []byte) (*types.Ra return nil, types.ErrConflictingCheckpoint } -// SetCheckpointSubmitted sets the status of a checkpoint to SUBMITTED +func (k *Keeper) SetEpochingKeeper(ek types.EpochingKeeper) { + k.epochingKeeper = ek +} + +// SetCheckpointSubmitted sets the status of a checkpoint to SUBMITTED, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointSubmitted(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Sealed, types.Submitted) err := ctx.EventManager().EmitTypedEvent( &types.EventCheckpointSubmitted{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint submitted event for epoch %v", ckpt.Ckpt.EpochNum) + k.Logger(ctx).Error("failed to emit checkpoint submitted event for epoch %v", ckpt.Ckpt.EpochNum) } } -// SetCheckpointConfirmed sets the status of a checkpoint to CONFIRMED +// SetCheckpointConfirmed sets the status of a checkpoint to CONFIRMED, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointConfirmed(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Submitted, types.Confirmed) err := ctx.EventManager().EmitTypedEvent( &types.EventCheckpointConfirmed{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint confirmed event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to emit checkpoint confirmed event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } // invoke hook if err := k.AfterRawCheckpointConfirmed(ctx, epoch); err != nil { - ctx.Logger().Error("failed to trigger checkpoint confirmed hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to trigger checkpoint confirmed hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } } -// SetCheckpointFinalized sets the status of a checkpoint to FINALIZED +// SetCheckpointFinalized sets the status of a checkpoint to FINALIZED, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointFinalized(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Confirmed, types.Finalized) err := ctx.EventManager().EmitTypedEvent( &types.EventCheckpointFinalized{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint finalized event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to emit checkpoint finalized event for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } // invoke hook, which is currently subscribed by ZoneConcierge if err := k.AfterRawCheckpointFinalized(ctx, epoch); err != nil { - ctx.Logger().Error("failed to trigger checkpoint finalized hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) + k.Logger(ctx).Error("failed to trigger checkpoint finalized hook for epoch %v: %v", ckpt.Ckpt.EpochNum, err) } } +// SetCheckpointForgotten rolls back the status of a checkpoint to Sealed, +// and records the associated state update in lifecycle func (k Keeper) SetCheckpointForgotten(ctx sdk.Context, epoch uint64) { ckpt := k.setCheckpointStatus(ctx, epoch, types.Submitted, types.Sealed) err := ctx.EventManager().EmitTypedEvent( &types.EventCheckpointForgotten{Checkpoint: ckpt}, ) if err != nil { - ctx.Logger().Error("failed to emit checkpoint forgotten event for epoch %v", ckpt.Ckpt.EpochNum) + k.Logger(ctx).Error("failed to emit checkpoint forgotten event for epoch %v", ckpt.Ckpt.EpochNum) } } +// setCheckpointStatus sets a ckptWithMeta to the given state, +// and records the state update in its lifecycle func (k Keeper) setCheckpointStatus(ctx sdk.Context, epoch uint64, from types.CheckpointStatus, to types.CheckpointStatus) *types.RawCheckpointWithMeta { ckptWithMeta, err := k.GetRawCheckpoint(ctx, epoch) if err != nil { @@ -319,13 +349,14 @@ func (k Keeper) setCheckpointStatus(ctx sdk.Context, epoch uint64, from types.Ch return nil } } - ckptWithMeta.Status = to - err = k.UpdateCheckpoint(ctx, ckptWithMeta) + ckptWithMeta.Status = to // set status + ckptWithMeta.RecordStateUpdate(ctx, to) // record state update to the lifecycle + err = k.UpdateCheckpoint(ctx, ckptWithMeta) // write back to KVStore if err != nil { panic("failed to update checkpoint status") } statusChangeMsg := fmt.Sprintf("Checkpointing: checkpoint status for epoch %v successfully changed from %v to %v", epoch, from.String(), to.String()) - ctx.Logger().Info(statusChangeMsg) + k.Logger(ctx).Info(statusChangeMsg) return ckptWithMeta } diff --git a/x/checkpointing/keeper/keeper_test.go b/x/checkpointing/keeper/keeper_test.go index 70fd5430a..37eebec6d 100644 --- a/x/checkpointing/keeper/keeper_test.go +++ b/x/checkpointing/keeper/keeper_test.go @@ -1,12 +1,13 @@ package keeper_test import ( + "math/rand" + "testing" + "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/crypto/bls12381" "github.com/boljen/go-bitmap" sdk "github.com/cosmos/cosmos-sdk/types" - "math/rand" - "testing" "github.com/babylonchain/babylon/testutil/datagen" testkeeper "github.com/babylonchain/babylon/testutil/keeper" @@ -68,37 +69,86 @@ func FuzzKeeperSetCheckpointStatus(f *testing.F) { ek := mocks.NewMockEpochingKeeper(ctrl) ckptKeeper, ctx, _ := testkeeper.CheckpointingKeeper(t, ek, nil, client.Context{}) + /* new accumulating checkpoint*/ mockCkptWithMeta := datagen.GenRandomRawCheckpointWithMeta() mockCkptWithMeta.Status = types.Accumulating + mockCkptWithMeta.RecordStateUpdate(ctx, types.Accumulating) epoch := mockCkptWithMeta.Ckpt.EpochNum + require.Len(t, mockCkptWithMeta.Lifecycle, 1) + require.Equal(t, curStateUpdate(ctx, types.Accumulating), mockCkptWithMeta.Lifecycle[0]) - _ = ckptKeeper.AddRawCheckpoint( + err := ckptKeeper.AddRawCheckpoint( ctx, mockCkptWithMeta, ) + require.NoError(t, err) + + /* incorrect state transition of a checkpoint */ + // ensure status and lifecycle from an incorrect state transition + // will not be recorded ckptKeeper.SetCheckpointSubmitted(ctx, epoch) status, err := ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Accumulating, status) + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 1) + require.Equal(t, curStateUpdate(ctx, types.Accumulating), mockCkptWithMeta.Lifecycle[0]) + + /* Accumulating -> Sealed */ + ctx = updateRandomCtx(ctx) mockCkptWithMeta.Status = types.Sealed + mockCkptWithMeta.RecordStateUpdate(ctx, types.Sealed) err = ckptKeeper.UpdateCheckpoint(ctx, mockCkptWithMeta) require.NoError(t, err) + // ensure status is updated + status, err = ckptKeeper.GetStatus(ctx, epoch) + require.NoError(t, err) + require.Equal(t, types.Sealed, status) + // ensure state update of Sealed is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 2) + require.Equal(t, curStateUpdate(ctx, types.Sealed), mockCkptWithMeta.Lifecycle[1]) + + /* Sealed -> Submitted */ + ctx = updateRandomCtx(ctx) ckptKeeper.SetCheckpointSubmitted(ctx, epoch) + // ensure status is updated status, err = ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Submitted, status) - ckptKeeper.SetCheckpointConfirmed(ctx, epoch) - status, err = ckptKeeper.GetStatus(ctx, epoch) + // ensure state update of Submitted is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) require.NoError(t, err) - require.Equal(t, types.Confirmed, status) + require.Len(t, mockCkptWithMeta.Lifecycle, 3) + require.Equal(t, curStateUpdate(ctx, types.Submitted), mockCkptWithMeta.Lifecycle[2]) + + /* Submitted -> Confirmed */ + ctx = updateRandomCtx(ctx) ckptKeeper.SetCheckpointConfirmed(ctx, epoch) + // ensure status is updated status, err = ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Confirmed, status) + // ensure state update of Confirmed is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 4) + require.Equal(t, curStateUpdate(ctx, types.Confirmed), mockCkptWithMeta.Lifecycle[3]) + + /* Confirmed -> Finalized */ + ctx = updateRandomCtx(ctx) ckptKeeper.SetCheckpointFinalized(ctx, epoch) + // ensure status is updated status, err = ckptKeeper.GetStatus(ctx, epoch) require.NoError(t, err) require.Equal(t, types.Finalized, status) + // ensure state update of Finalized is recorded + mockCkptWithMeta, err = ckptKeeper.GetRawCheckpoint(ctx, epoch) + require.NoError(t, err) + require.Len(t, mockCkptWithMeta.Lifecycle, 5) + require.Equal(t, curStateUpdate(ctx, types.Finalized), mockCkptWithMeta.Lifecycle[4]) }) } @@ -138,32 +188,32 @@ func FuzzKeeperCheckpointEpoch(f *testing.F) { ) // 1. check valid checkpoint - btcCkptBytes := makeBtcCkptBytes( + rawBtcCheckpoint := makeBtcCkptBytes( localCkptWithMeta.Ckpt.EpochNum, localCkptWithMeta.Ckpt.LastCommitHash.MustMarshal(), localCkptWithMeta.Ckpt.Bitmap, localCkptWithMeta.Ckpt.BlsMultiSig.Bytes(), t, ) - epoch, err := ckptKeeper.CheckpointEpoch(ctx, btcCkptBytes) + + err := ckptKeeper.VerifyCheckpoint(ctx, *rawBtcCheckpoint) require.NoError(t, err) - require.Equal(t, localCkptWithMeta.Ckpt.EpochNum, epoch) // 2. check a checkpoint with invalid sig - btcCkptBytes = makeBtcCkptBytes( + rawBtcCheckpoint = makeBtcCkptBytes( localCkptWithMeta.Ckpt.EpochNum, localCkptWithMeta.Ckpt.LastCommitHash.MustMarshal(), localCkptWithMeta.Ckpt.Bitmap, datagen.GenRandomByteArray(btctxformatter.BlsSigLength), t, ) - _, err = ckptKeeper.CheckpointEpoch(ctx, btcCkptBytes) + err = ckptKeeper.VerifyCheckpoint(ctx, *rawBtcCheckpoint) require.ErrorIs(t, err, types.ErrInvalidRawCheckpoint) // 3. check a conflicting checkpoint; signed on a random lastcommithash conflictLastCommitHash := datagen.GenRandomByteArray(btctxformatter.LastCommitHashLength) msgBytes = append(sdk.Uint64ToBigEndian(localCkptWithMeta.Ckpt.EpochNum), conflictLastCommitHash...) - btcCkptBytes = makeBtcCkptBytes( + rawBtcCheckpoint = makeBtcCkptBytes( localCkptWithMeta.Ckpt.EpochNum, conflictLastCommitHash, localCkptWithMeta.Ckpt.Bitmap, @@ -171,12 +221,12 @@ func FuzzKeeperCheckpointEpoch(f *testing.F) { t, ) require.Panics(t, func() { - _, _ = ckptKeeper.CheckpointEpoch(ctx, btcCkptBytes) + _ = ckptKeeper.VerifyCheckpoint(ctx, *rawBtcCheckpoint) }) }) } -func makeBtcCkptBytes(epoch uint64, lch []byte, bitmap []byte, blsSig []byte, t *testing.T) []byte { +func makeBtcCkptBytes(epoch uint64, lch []byte, bitmap []byte, blsSig []byte, t *testing.T) *btctxformatter.RawBtcCheckpoint { tag := datagen.GenRandomByteArray(btctxformatter.TagLength) babylonTag := btctxformatter.BabylonTag(tag[:btctxformatter.TagLength]) address := datagen.GenRandomByteArray(btctxformatter.AddressLength) @@ -201,5 +251,22 @@ func makeBtcCkptBytes(epoch uint64, lch []byte, bitmap []byte, blsSig []byte, t ckptData, err := btctxformatter.ConnectParts(btctxformatter.CurrentVersion, decodedFirst.Data, decodedSecond.Data) require.NoError(t, err) - return ckptData + rawCheckpoint, err := btctxformatter.DecodeRawCheckpoint(btctxformatter.CurrentVersion, ckptData) + require.NoError(t, err) + + return rawCheckpoint +} + +func curStateUpdate(ctx sdk.Context, status types.CheckpointStatus) *types.CheckpointStateUpdate { + height, time := ctx.BlockHeight(), ctx.BlockTime() + return &types.CheckpointStateUpdate{ + State: status, + BlockHeight: uint64(height), + BlockTime: &time, + } +} + +func updateRandomCtx(ctx sdk.Context) sdk.Context { + header := datagen.GenRandomTMHeader("test", datagen.RandomInt(1000)) + return ctx.WithBlockHeader(*header) } diff --git a/x/checkpointing/keeper/msg_server_test.go b/x/checkpointing/keeper/msg_server_test.go index 09628e884..786b93a1a 100644 --- a/x/checkpointing/keeper/msg_server_test.go +++ b/x/checkpointing/keeper/msg_server_test.go @@ -4,6 +4,7 @@ import ( "math/rand" "testing" + "cosmossdk.io/math" "github.com/babylonchain/babylon/app" appparams "github.com/babylonchain/babylon/app/params" "github.com/babylonchain/babylon/crypto/bls12381" @@ -19,6 +20,82 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" ) +// FuzzWrappedCreateValidator_InsufficientTokens tests adding new validators with zero voting power +// It ensures that validators with zero voting power (i.e., with tokens fewer than sdk.DefaultPowerReduction) +// are unbonded, thus are not included in the validator set +func FuzzWrappedCreateValidator_InsufficientTokens(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 4) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + // a genesis validator is generate for setup + helper := testepoching.NewHelper(t) + ek := helper.EpochingKeeper + ck := helper.App.CheckpointingKeeper + msgServer := checkpointingkeeper.NewMsgServerImpl(ck) + + // BeginBlock of block 1, and thus entering epoch 1 + ctx := helper.BeginBlock() + epoch := ek.GetEpoch(ctx) + require.Equal(t, uint64(1), epoch.EpochNumber) + + n := rand.Intn(3) + 1 + addrs := app.AddTestAddrs(helper.App, helper.Ctx, n, sdk.NewInt(100000000)) + + // add n new validators with zero voting power via MsgWrappedCreateValidator + wcvMsgs := make([]*types.MsgWrappedCreateValidator, n) + for i := 0; i < n; i++ { + msg, err := buildMsgWrappedCreateValidatorWithAmount(addrs[i], sdk.DefaultPowerReduction.SubRaw(1)) + require.NoError(t, err) + wcvMsgs[i] = msg + _, err = msgServer.WrappedCreateValidator(ctx, msg) + require.NoError(t, err) + blsPK, err := ck.GetBlsPubKey(ctx, sdk.ValAddress(addrs[i])) + require.NoError(t, err) + require.True(t, msg.Key.Pubkey.Equal(blsPK)) + } + require.Len(t, ek.GetCurrentEpochMsgs(ctx), n) + + // EndBlock of block 1 + ctx = helper.EndBlock() + + // go to BeginBlock of block 11, and thus entering epoch 2 + for i := uint64(0); i < ek.GetParams(ctx).EpochInterval; i++ { + ctx = helper.GenAndApplyEmptyBlock() + } + epoch = ek.GetEpoch(ctx) + require.Equal(t, uint64(2), epoch.EpochNumber) + // ensure epoch 2 has initialised an empty msg queue + require.Empty(t, ek.GetCurrentEpochMsgs(ctx)) + + // ensure the length of current validator set equals to 1 + // since one genesis validator was added when setup + // the rest n validators have zero voting power and thus are ruled out + valSet = ck.GetValidatorSet(ctx, 2) + require.Equal(t, 1, len(valSet)) + + // ensure all validators (not just validators in the val set) have correct bond status + // - the 1st validator is bonded + // - all the rest are unbonded since they have zero voting power + iterator := helper.StakingKeeper.ValidatorsPowerStoreIterator(ctx) + defer iterator.Close() + count := 0 + for ; iterator.Valid(); iterator.Next() { + valAddr := sdk.ValAddress(iterator.Value()) + val, found := helper.StakingKeeper.GetValidator(ctx, valAddr) + require.True(t, found) + count++ + if count == 1 { + require.Equal(t, stakingtypes.Bonded, val.Status) + } else { + require.Equal(t, stakingtypes.Unbonded, val.Status) + } + } + require.Equal(t, len(wcvMsgs)+1, count) + }) +} + // FuzzWrappedCreateValidator tests adding new validators via // MsgWrappedCreateValidator, which first registers BLS pubkey // and then unwrapped into MsgCreateValidator and enqueued into @@ -86,6 +163,31 @@ func FuzzWrappedCreateValidator(f *testing.F) { }) } +func TestInvalidLastCommitHash(t *testing.T) { + helper := testepoching.NewHelperWithValSet(t) + ck := helper.App.CheckpointingKeeper + msgServer := checkpointingkeeper.NewMsgServerImpl(ck) + // needed to init total voting power + helper.BeginBlock() + + epoch := uint64(1) + validLch := datagen.GenRandomByteArray(32) + // correct checkpoint for epoch 1 + _, err := ck.BuildRawCheckpoint(helper.Ctx, epoch, validLch) + require.NoError(t, err) + + // Malicious validator created message with valid bls signature but for invalid + // commit hash + invalidLch := datagen.GenRandomByteArray(32) + val0Info := helper.ValBlsPrivKeys[0] + signBytes := append(sdk.Uint64ToBigEndian(epoch), invalidLch...) + sig := bls12381.Sign(val0Info.BlsKey, signBytes) + msg := types.NewMsgAddBlsSig(epoch, invalidLch, sig, val0Info.Address) + + _, err = msgServer.AddBlsSig(helper.Ctx, msg) + require.ErrorIs(t, err, types.ErrInvalidLastCommitHash) +} + func buildMsgWrappedCreateValidator(addr sdk.AccAddress) (*types.MsgWrappedCreateValidator, error) { tmValPrivkey := ed25519.GenPrivKey() bondTokens := sdk.TokensFromConsensusPower(10, sdk.DefaultPowerReduction) @@ -113,3 +215,30 @@ func buildMsgWrappedCreateValidator(addr sdk.AccAddress) (*types.MsgWrappedCreat return types.NewMsgWrappedCreateValidator(createValidatorMsg, &blsPubKey, pop) } + +func buildMsgWrappedCreateValidatorWithAmount(addr sdk.AccAddress, bondTokens math.Int) (*types.MsgWrappedCreateValidator, error) { + tmValPrivkey := ed25519.GenPrivKey() + bondCoin := sdk.NewCoin(appparams.DefaultBondDenom, bondTokens) + description := stakingtypes.NewDescription("foo_moniker", "", "", "", "") + commission := stakingtypes.NewCommissionRates(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()) + + pk, err := codec.FromTmPubKeyInterface(tmValPrivkey.PubKey()) + if err != nil { + return nil, err + } + + createValidatorMsg, err := stakingtypes.NewMsgCreateValidator( + sdk.ValAddress(addr), pk, bondCoin, description, commission, sdk.OneInt(), + ) + if err != nil { + return nil, err + } + blsPrivKey := bls12381.GenPrivKey() + pop, err := privval.BuildPoP(tmValPrivkey, blsPrivKey) + if err != nil { + return nil, err + } + blsPubKey := blsPrivKey.PubKey() + + return types.NewMsgWrappedCreateValidator(createValidatorMsg, &blsPubKey, pop) +} diff --git a/x/checkpointing/spec/01_state.md b/x/checkpointing/spec/01_state.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/02_keepers.md b/x/checkpointing/spec/02_keepers.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/03_messages.md b/x/checkpointing/spec/03_messages.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/04_events.md b/x/checkpointing/spec/04_events.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/05_params.md b/x/checkpointing/spec/05_params.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/README.md b/x/checkpointing/spec/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/x/checkpointing/spec/registration.md b/x/checkpointing/spec/registration.md deleted file mode 100644 index ad49cee97..000000000 --- a/x/checkpointing/spec/registration.md +++ /dev/null @@ -1,43 +0,0 @@ -# Registration - -To participate in the checkpointing, a validator needs to also register its BLS public key. - -## Register a Validator - -The original registration is done via a transaction that carries a `MsgCreateValidator` message. -To register a BLS public key, we need a wrapper message called `MsgWrappedCreateValidator` processed by the `Checkpointing` module. -This message wraps the original `MsgCreateValidator` message as well as a BLS public key and a `Proof-of-Possession` (PoP) for registering BLS public key. -The execution of `MsgWrappedCreateValidator` is as follows. - -1. The `Checkpointing` module first processes `MsgWrappedCreateValidator` to register the validator's BLS key. If success, then -2. extract `MsgCreateValidator` and deliver `MsgCreateValidator` to the epoching module's message queue, which will be processed at the end of this epoch. If success, the registration is succeeded. -3. Otherwise, the registration fails and the validator should register again with the same keys. - -## Genesis - -Genesis validators are registered via the legacy `genutil` module from the Cosmos-SDK, which processes `MsgCreateValidator` messages contained in genesis transactions. -The BLS keys are registered as `GenesisState` in the checkpointing module. -The checkpointing module's `ValidateGenesis` should ensure that each genesis validator has both an Ed25519 key and BLS key which are bonded by PoP. - -## Proof of Possession - -The purpose of PoP is to prove that one validator owns: -1. the corresponding BLS private key; -2. the corresponding Ed25519 private key associated with the public key in the `MsgCreateValidator` message. - -To achieve that, PoP is calculated as follows. - -`PoP = sign(key = BLS_sk, data = sign(key = Ed25519_sk, data = BLS_pk)]` - -Since the delegator already relates its account with the validator's Ed25519 key through the signatures in `MsgCreateValidator`, the adversary cannot do registration with the same PoP. - -## Verification - -To verify PoP, first we need to ensure that the BLS public key has never been registered by a different validator, -and that the current validator hasn't already registered a different BLS public key. Then, verify - -``` -MsgWrappedCreateValidator.BLS_pk ?= decrypt(key = Ed25519_pk, data = decrypt(key = BLS_pk, data = PoP)) -``` - -If verification passes, the `Checkpointing` module stores the BLS public key and associates it with the validator. diff --git a/x/checkpointing/testckpt/helper.go b/x/checkpointing/testckpt/helper.go index 0efb6281a..b30e68b24 100644 --- a/x/checkpointing/testckpt/helper.go +++ b/x/checkpointing/testckpt/helper.go @@ -1,16 +1,25 @@ package testckpt import ( + "testing" + + "cosmossdk.io/math" "github.com/babylonchain/babylon/app" + appparams "github.com/babylonchain/babylon/app/params" + "github.com/babylonchain/babylon/crypto/bls12381" "github.com/babylonchain/babylon/testutil/datagen" "github.com/babylonchain/babylon/x/checkpointing/keeper" "github.com/babylonchain/babylon/x/checkpointing/types" + "github.com/babylonchain/babylon/x/epoching" epochingkeeper "github.com/babylonchain/babylon/x/epoching/keeper" "github.com/cosmos/cosmos-sdk/baseapp" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/stretchr/testify/require" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "testing" ) // Helper is a structure which wraps the entire app and exposes functionalities for testing the epoching module @@ -22,6 +31,7 @@ type Helper struct { CheckpointingKeeper *keeper.Keeper MsgSrvr types.MsgServer QueryClient types.QueryClient + StakingKeeper *stakingkeeper.Keeper EpochingKeeper *epochingkeeper.Keeper GenAccs []authtypes.GenesisAccount @@ -35,6 +45,7 @@ func NewHelper(t *testing.T, n int) *Helper { checkpointingKeeper := app.CheckpointingKeeper epochingKeeper := app.EpochingKeeper + stakingKeeper := app.StakingKeeper querier := keeper.Querier{Keeper: checkpointingKeeper} queryHelper := baseapp.NewQueryServerTestHelper(ctx, app.InterfaceRegistry()) types.RegisterQueryServer(queryHelper, querier) @@ -48,7 +59,56 @@ func NewHelper(t *testing.T, n int) *Helper { CheckpointingKeeper: &checkpointingKeeper, MsgSrvr: msgSrvr, QueryClient: queryClient, + StakingKeeper: &stakingKeeper, EpochingKeeper: &epochingKeeper, GenAccs: accs, } } + +// CreateValidator calls handler to create a new staking validator +func (h *Helper) CreateValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, stakeAmount math.Int, ok bool) { + coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) + h.createValidator(addr, pk, blsPK, pop, coin, ok) +} + +// CreateValidatorWithValPower calls handler to create a new staking validator with zero commission +func (h *Helper) CreateValidatorWithValPower(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, valPower int64, ok bool) math.Int { + amount := h.StakingKeeper.TokensFromConsensusPower(h.Ctx, valPower) + coin := sdk.NewCoin(appparams.DefaultBondDenom, amount) + h.createValidator(addr, pk, blsPK, pop, coin, ok) + return amount +} + +// CreateValidatorMsg returns a message used to create validator in this service. +func (h *Helper) CreateValidatorMsg(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, stakeAmount math.Int) *types.MsgWrappedCreateValidator { + coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) + msg, err := stakingtypes.NewMsgCreateValidator(addr, pk, coin, stakingtypes.Description{}, ZeroCommission(), sdk.OneInt()) + require.NoError(h.t, err) + wmsg, err := types.NewMsgWrappedCreateValidator(msg, blsPK, pop) + require.NoError(h.t, err) + return wmsg +} + +func (h *Helper) createValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, blsPK *bls12381.PublicKey, pop *types.ProofOfPossession, coin sdk.Coin, ok bool) { + msg := h.CreateValidatorMsg(addr, pk, blsPK, pop, coin.Amount) + h.Handle(msg, ok) +} + +// Handle calls epoching handler on a given message +func (h *Helper) Handle(msg sdk.Msg, ok bool) *sdk.Result { + handler := epoching.NewHandler(*h.EpochingKeeper) + res, err := handler(h.Ctx, msg) + if ok { + require.NoError(h.t, err) + require.NotNil(h.t, res) + } else { + require.Error(h.t, err) + require.Nil(h.t, res) + } + return res +} + +// ZeroCommission constructs a commission rates with all zeros. +func ZeroCommission() stakingtypes.CommissionRates { + return stakingtypes.NewCommissionRates(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()) +} diff --git a/x/checkpointing/types/checkpoint.pb.go b/x/checkpointing/types/checkpoint.pb.go index 491364660..7754a8e5b 100644 --- a/x/checkpointing/types/checkpoint.pb.go +++ b/x/checkpointing/types/checkpoint.pb.go @@ -10,15 +10,19 @@ import ( _ "github.com/cosmos/cosmos-proto" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "google.golang.org/protobuf/types/known/timestamppb" io "io" math "math" math_bits "math/bits" + time "time" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -134,6 +138,9 @@ type RawCheckpointWithMeta struct { BlsAggrPk *github_com_babylonchain_babylon_crypto_bls12381.PublicKey `protobuf:"bytes,3,opt,name=bls_aggr_pk,json=blsAggrPk,proto3,customtype=github.com/babylonchain/babylon/crypto/bls12381.PublicKey" json:"bls_aggr_pk,omitempty"` // power_sum defines the accumulated voting power for the checkpoint PowerSum uint64 `protobuf:"varint,4,opt,name=power_sum,json=powerSum,proto3" json:"power_sum,omitempty"` + // lifecycle defines the lifecycle of this checkpoint, i.e., each state transition and + // the time (in both timestamp and block height) of this transition. + Lifecycle []*CheckpointStateUpdate `protobuf:"bytes,5,rep,name=lifecycle,proto3" json:"lifecycle,omitempty"` } func (m *RawCheckpointWithMeta) Reset() { *m = RawCheckpointWithMeta{} } @@ -190,6 +197,76 @@ func (m *RawCheckpointWithMeta) GetPowerSum() uint64 { return 0 } +func (m *RawCheckpointWithMeta) GetLifecycle() []*CheckpointStateUpdate { + if m != nil { + return m.Lifecycle + } + return nil +} + +type CheckpointStateUpdate struct { + // state defines the event of a state transition towards this state + State CheckpointStatus `protobuf:"varint,1,opt,name=state,proto3,enum=babylon.checkpointing.v1.CheckpointStatus" json:"state,omitempty"` + // block_height is the height of the Babylon block that triggers the state update + BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` + // block_time is the timestamp in the Babylon block that triggers the state update + BlockTime *time.Time `protobuf:"bytes,3,opt,name=block_time,json=blockTime,proto3,stdtime" json:"block_time,omitempty"` +} + +func (m *CheckpointStateUpdate) Reset() { *m = CheckpointStateUpdate{} } +func (m *CheckpointStateUpdate) String() string { return proto.CompactTextString(m) } +func (*CheckpointStateUpdate) ProtoMessage() {} +func (*CheckpointStateUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_63ff05f0a47b36f7, []int{2} +} +func (m *CheckpointStateUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckpointStateUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckpointStateUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckpointStateUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckpointStateUpdate.Merge(m, src) +} +func (m *CheckpointStateUpdate) XXX_Size() int { + return m.Size() +} +func (m *CheckpointStateUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_CheckpointStateUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckpointStateUpdate proto.InternalMessageInfo + +func (m *CheckpointStateUpdate) GetState() CheckpointStatus { + if m != nil { + return m.State + } + return Accumulating +} + +func (m *CheckpointStateUpdate) GetBlockHeight() uint64 { + if m != nil { + return m.BlockHeight + } + return 0 +} + +func (m *CheckpointStateUpdate) GetBlockTime() *time.Time { + if m != nil { + return m.BlockTime + } + return nil +} + // BlsSig wraps the BLS sig with meta data. type BlsSig struct { // epoch_num defines the epoch number that the BLS sig is signed on @@ -207,7 +284,7 @@ func (m *BlsSig) Reset() { *m = BlsSig{} } func (m *BlsSig) String() string { return proto.CompactTextString(m) } func (*BlsSig) ProtoMessage() {} func (*BlsSig) Descriptor() ([]byte, []int) { - return fileDescriptor_63ff05f0a47b36f7, []int{2} + return fileDescriptor_63ff05f0a47b36f7, []int{3} } func (m *BlsSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,6 +331,7 @@ func init() { proto.RegisterEnum("babylon.checkpointing.v1.CheckpointStatus", CheckpointStatus_name, CheckpointStatus_value) proto.RegisterType((*RawCheckpoint)(nil), "babylon.checkpointing.v1.RawCheckpoint") proto.RegisterType((*RawCheckpointWithMeta)(nil), "babylon.checkpointing.v1.RawCheckpointWithMeta") + proto.RegisterType((*CheckpointStateUpdate)(nil), "babylon.checkpointing.v1.CheckpointStateUpdate") proto.RegisterType((*BlsSig)(nil), "babylon.checkpointing.v1.BlsSig") } @@ -262,47 +340,56 @@ func init() { } var fileDescriptor_63ff05f0a47b36f7 = []byte{ - // 640 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x5f, 0x6b, 0xd3, 0x50, - 0x1c, 0x6d, 0xb6, 0x52, 0xd7, 0xbb, 0xb5, 0x84, 0xe0, 0xa4, 0x56, 0xc8, 0xca, 0x40, 0x1d, 0x7b, - 0x48, 0xd9, 0x86, 0xe0, 0xdf, 0x87, 0x34, 0xed, 0xb4, 0xac, 0xed, 0x46, 0xd2, 0x2a, 0x0c, 0x24, - 0xdc, 0xa4, 0x31, 0xb9, 0xf4, 0xde, 0xdc, 0x90, 0x7b, 0xe3, 0xac, 0x9f, 0x40, 0xf6, 0xe4, 0xb3, - 0x30, 0x10, 0xfc, 0x32, 0x3e, 0xee, 0x51, 0xf6, 0x30, 0x64, 0x7b, 0x19, 0xfa, 0x25, 0x24, 0x37, - 0x65, 0xae, 0x1b, 0x43, 0x10, 0x7d, 0xcb, 0xef, 0xe4, 0x9c, 0x1f, 0xf7, 0x9c, 0x73, 0xb9, 0xe0, - 0x9e, 0x03, 0x9d, 0x31, 0xa6, 0x61, 0xdd, 0x0d, 0x3c, 0x77, 0x14, 0x51, 0x14, 0x72, 0x14, 0xfa, - 0x17, 0x26, 0x2d, 0x8a, 0x29, 0xa7, 0x4a, 0x65, 0xc2, 0xd3, 0xa6, 0x78, 0xda, 0xdb, 0xb5, 0xea, - 0x6d, 0x97, 0x32, 0x42, 0x99, 0x2d, 0x78, 0xf5, 0x6c, 0xc8, 0x44, 0xd5, 0x9b, 0x3e, 0xf5, 0x69, - 0x86, 0xa7, 0x5f, 0x19, 0xba, 0xfc, 0x53, 0x02, 0x25, 0x13, 0xee, 0x19, 0xe7, 0x8b, 0x94, 0x3b, - 0xa0, 0xe8, 0x45, 0xd4, 0x0d, 0xec, 0x30, 0x21, 0x15, 0xa9, 0x26, 0xad, 0xe4, 0xcd, 0x39, 0x01, - 0xf4, 0x12, 0xa2, 0x3c, 0x05, 0x32, 0x86, 0x8c, 0xdb, 0x2e, 0x25, 0x04, 0x71, 0x3b, 0x80, 0x2c, - 0xa8, 0xcc, 0xd4, 0xa4, 0x95, 0x85, 0x86, 0x72, 0x74, 0xbc, 0x54, 0xee, 0x40, 0xc6, 0x0d, 0xf1, - 0xeb, 0x05, 0x64, 0x81, 0x59, 0xc6, 0x53, 0xb3, 0x72, 0x0b, 0x14, 0x1c, 0xc4, 0x09, 0x8c, 0x2a, - 0xb3, 0xa9, 0xc6, 0x9c, 0x4c, 0x0a, 0x04, 0x25, 0x07, 0x33, 0x9b, 0x24, 0x98, 0x23, 0x9b, 0x21, - 0xbf, 0x92, 0x17, 0x2b, 0x9f, 0x1d, 0x1d, 0x2f, 0x3d, 0xf2, 0x11, 0x0f, 0x12, 0x47, 0x73, 0x29, - 0xa9, 0x4f, 0x5c, 0xbb, 0x01, 0x44, 0x61, 0xfd, 0x3c, 0xaa, 0x78, 0x1c, 0x71, 0x5a, 0x77, 0x30, - 0x5b, 0x5b, 0xdf, 0x78, 0xb8, 0xa6, 0x59, 0xc8, 0x0f, 0x21, 0x4f, 0x62, 0xcf, 0x9c, 0x77, 0x30, - 0xeb, 0xa6, 0x2b, 0x2d, 0xe4, 0x3f, 0xce, 0x9f, 0x7d, 0x5e, 0x92, 0x96, 0x3f, 0xcd, 0x80, 0xc5, - 0x29, 0xb7, 0xaf, 0x10, 0x0f, 0xba, 0x1e, 0x87, 0xca, 0x13, 0x90, 0x77, 0x47, 0x11, 0x17, 0x86, - 0xe7, 0xd7, 0xef, 0x6b, 0xd7, 0x25, 0xac, 0x4d, 0xc9, 0x4d, 0x21, 0x52, 0x1a, 0xa0, 0xc0, 0x38, - 0xe4, 0x09, 0x13, 0x59, 0x94, 0xd7, 0x57, 0xaf, 0x97, 0xff, 0xd6, 0x5a, 0x42, 0x61, 0x4e, 0x94, - 0xca, 0x6b, 0x90, 0x9e, 0xd7, 0x86, 0xbe, 0x1f, 0xdb, 0xd1, 0x28, 0x0b, 0xe8, 0xef, 0x12, 0xd8, - 0x49, 0x1c, 0x8c, 0xdc, 0x2d, 0x6f, 0x6c, 0x16, 0x1d, 0xcc, 0x74, 0xdf, 0x8f, 0x77, 0x46, 0x69, - 0xab, 0x11, 0xdd, 0xf3, 0x62, 0x9b, 0x25, 0x44, 0xc4, 0x9b, 0x37, 0xe7, 0x04, 0x60, 0x25, 0x64, - 0x12, 0xce, 0x99, 0x04, 0x0a, 0x0d, 0xcc, 0x2c, 0xe4, 0xff, 0xcf, 0x3b, 0xf0, 0x12, 0xdc, 0x48, - 0x7d, 0xa6, 0x2d, 0xcf, 0xfe, 0x8b, 0x96, 0x0b, 0x4e, 0x76, 0xe4, 0xbb, 0xa0, 0xcc, 0x90, 0x1f, - 0x7a, 0xb1, 0x0d, 0x87, 0xc3, 0xd8, 0x63, 0x4c, 0xb8, 0x2c, 0x9a, 0xa5, 0x0c, 0xd5, 0x33, 0x50, - 0x58, 0xcd, 0xad, 0xfe, 0x90, 0x80, 0x7c, 0xb9, 0x09, 0x45, 0x03, 0x15, 0x63, 0x6b, 0xa7, 0x6f, - 0x5b, 0x7d, 0xbd, 0x3f, 0xb0, 0x6c, 0xdd, 0x30, 0x06, 0xdd, 0x41, 0x47, 0xef, 0xb7, 0x7b, 0xcf, - 0xe5, 0x5c, 0x55, 0xde, 0x3f, 0xa8, 0x2d, 0xe8, 0xae, 0x9b, 0x90, 0x04, 0xc3, 0xb4, 0x4d, 0x65, - 0x19, 0x28, 0x17, 0xf9, 0x56, 0x4b, 0xef, 0xb4, 0x9a, 0xb2, 0x54, 0x05, 0xfb, 0x07, 0xb5, 0x82, - 0xe5, 0x41, 0xec, 0x0d, 0x95, 0x15, 0xb0, 0x38, 0xc5, 0x19, 0x34, 0xba, 0xed, 0x7e, 0xbf, 0xd5, - 0x94, 0x67, 0xaa, 0xa5, 0xfd, 0x83, 0x5a, 0xd1, 0x4a, 0x1c, 0x82, 0x38, 0xbf, 0xca, 0x34, 0xb6, - 0x7b, 0x9b, 0x6d, 0xb3, 0xdb, 0x6a, 0xca, 0xb3, 0x19, 0xd3, 0xa0, 0xe1, 0x1b, 0x14, 0x93, 0xab, - 0xcc, 0xcd, 0x76, 0x4f, 0xef, 0xb4, 0x77, 0x5b, 0x4d, 0x39, 0x9f, 0x31, 0x37, 0x51, 0x08, 0x31, - 0x7a, 0xef, 0x0d, 0xab, 0xf9, 0x0f, 0x5f, 0xd4, 0x5c, 0x63, 0xfb, 0xeb, 0x89, 0x2a, 0x1d, 0x9e, - 0xa8, 0xd2, 0xf7, 0x13, 0x55, 0xfa, 0x78, 0xaa, 0xe6, 0x0e, 0x4f, 0xd5, 0xdc, 0xb7, 0x53, 0x35, - 0xb7, 0xfb, 0xe0, 0x4f, 0xb1, 0xbf, 0xbb, 0xf4, 0x12, 0xf1, 0x71, 0xe4, 0x31, 0xa7, 0x20, 0x9e, - 0x8e, 0x8d, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x0d, 0xb7, 0x12, 0xaf, 0x04, 0x00, 0x00, + // 773 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0xdb, 0x8e, 0xdb, 0x44, + 0x18, 0xc7, 0xe3, 0x5d, 0x37, 0x34, 0xb3, 0x07, 0x59, 0x23, 0x16, 0x85, 0x20, 0x25, 0x61, 0x25, + 0x20, 0xea, 0x85, 0xad, 0x4d, 0x85, 0xc4, 0x51, 0xe0, 0x1c, 0x96, 0x46, 0x4d, 0xd2, 0x95, 0x9d, + 0x80, 0x54, 0x09, 0x59, 0xe3, 0xc9, 0xec, 0x78, 0x94, 0xb1, 0xc7, 0xf2, 0x8c, 0x29, 0xe1, 0x09, + 0xd0, 0x5e, 0xf5, 0x05, 0x56, 0x42, 0xe2, 0x51, 0xb8, 0xe1, 0xb2, 0x97, 0xa8, 0x17, 0x05, 0xed, + 0xde, 0x14, 0x78, 0x09, 0xe4, 0x71, 0x7a, 0x48, 0xcb, 0x8a, 0x83, 0xe0, 0x2e, 0xdf, 0xdf, 0xff, + 0xff, 0xc4, 0xdf, 0xef, 0x9b, 0xcf, 0xe0, 0xed, 0x10, 0x85, 0x2b, 0x2e, 0x12, 0x07, 0x47, 0x04, + 0x2f, 0x53, 0xc1, 0x12, 0xc5, 0x12, 0xfa, 0x5c, 0x65, 0xa7, 0x99, 0x50, 0x02, 0xd6, 0xd7, 0x3e, + 0x7b, 0xc3, 0x67, 0x7f, 0x75, 0xd4, 0x68, 0x51, 0x21, 0x28, 0x27, 0x8e, 0xf6, 0x85, 0xf9, 0xa9, + 0xa3, 0x58, 0x4c, 0xa4, 0x42, 0x71, 0x5a, 0x46, 0x1b, 0xaf, 0x63, 0x21, 0x63, 0x21, 0x03, 0x5d, + 0x39, 0x65, 0xb1, 0x7e, 0xf4, 0x2a, 0x15, 0x54, 0x94, 0x7a, 0xf1, 0xab, 0x54, 0x0f, 0x7f, 0x37, + 0xc0, 0x9e, 0x87, 0xee, 0xf5, 0x9f, 0xfe, 0x13, 0x7c, 0x03, 0xd4, 0x48, 0x2a, 0x70, 0x14, 0x24, + 0x79, 0x5c, 0x37, 0xda, 0x46, 0xc7, 0xf4, 0xae, 0x6b, 0x61, 0x9a, 0xc7, 0xf0, 0x23, 0x60, 0x71, + 0x24, 0x55, 0x80, 0x45, 0x1c, 0x33, 0x15, 0x44, 0x48, 0x46, 0xf5, 0xad, 0xb6, 0xd1, 0xd9, 0xed, + 0xc1, 0x87, 0x8f, 0x5a, 0xfb, 0x63, 0x24, 0x55, 0x5f, 0x3f, 0xba, 0x85, 0x64, 0xe4, 0xed, 0xf3, + 0x8d, 0x1a, 0xbe, 0x06, 0xaa, 0x21, 0x53, 0x31, 0x4a, 0xeb, 0xdb, 0x45, 0xc6, 0x5b, 0x57, 0x10, + 0x81, 0xbd, 0x90, 0xcb, 0x20, 0xce, 0xb9, 0x62, 0x81, 0x64, 0xb4, 0x6e, 0xea, 0x23, 0x3f, 0x7e, + 0xf8, 0xa8, 0xf5, 0x3e, 0x65, 0x2a, 0xca, 0x43, 0x1b, 0x8b, 0xd8, 0x59, 0x63, 0xc1, 0x11, 0x62, + 0x89, 0xf3, 0x94, 0x65, 0xb6, 0x4a, 0x95, 0x70, 0x42, 0x2e, 0x8f, 0xba, 0x37, 0xdf, 0x3b, 0xb2, + 0x7d, 0x46, 0x13, 0xa4, 0xf2, 0x8c, 0x78, 0x3b, 0x21, 0x97, 0x93, 0xe2, 0x48, 0x9f, 0xd1, 0x0f, + 0xcc, 0xc7, 0xdf, 0xb5, 0x8c, 0xc3, 0x5f, 0xb7, 0xc0, 0xc1, 0x46, 0xb7, 0x5f, 0x30, 0x15, 0x4d, + 0x88, 0x42, 0xf0, 0x43, 0x60, 0xe2, 0x65, 0xaa, 0x74, 0xc3, 0x3b, 0xdd, 0x77, 0xec, 0xab, 0x46, + 0x60, 0x6f, 0xc4, 0x3d, 0x1d, 0x82, 0x3d, 0x50, 0x95, 0x0a, 0xa9, 0x5c, 0x6a, 0x16, 0xfb, 0xdd, + 0x1b, 0x57, 0xc7, 0x9f, 0x65, 0x7d, 0x9d, 0xf0, 0xd6, 0x49, 0xf8, 0x25, 0x28, 0xde, 0x37, 0x40, + 0x94, 0x66, 0x41, 0xba, 0x2c, 0x01, 0xfd, 0x3b, 0x02, 0x27, 0x79, 0xc8, 0x19, 0xbe, 0x4d, 0x56, + 0x5e, 0x2d, 0xe4, 0xd2, 0xa5, 0x34, 0x3b, 0x59, 0x16, 0x53, 0x4d, 0xc5, 0x3d, 0x92, 0x05, 0x32, + 0x8f, 0x35, 0x5e, 0xd3, 0xbb, 0xae, 0x05, 0x3f, 0x8f, 0xe1, 0x04, 0xd4, 0x38, 0x3b, 0x25, 0x78, + 0x85, 0x39, 0xa9, 0x5f, 0x6b, 0x6f, 0x77, 0x76, 0xba, 0xce, 0xdf, 0x6d, 0x81, 0xcc, 0xd3, 0x05, + 0x52, 0xc4, 0x7b, 0x76, 0xc2, 0x9a, 0xf5, 0x0f, 0x06, 0x38, 0xf8, 0x53, 0x2b, 0xfc, 0x14, 0x5c, + 0x2b, 0x9a, 0x26, 0x1a, 0xf6, 0x3f, 0xa3, 0x55, 0x06, 0xe1, 0x9b, 0x60, 0x37, 0xe4, 0x02, 0x2f, + 0x83, 0x88, 0x30, 0x1a, 0x29, 0x8d, 0xdd, 0x2c, 0x06, 0x2e, 0xf0, 0xf2, 0x96, 0x96, 0xe0, 0x27, + 0x00, 0x94, 0x96, 0x62, 0x45, 0x34, 0xce, 0x9d, 0x6e, 0xc3, 0x2e, 0xf7, 0xc7, 0x7e, 0xb2, 0x3f, + 0xf6, 0xec, 0xc9, 0xfe, 0xf4, 0xcc, 0xfb, 0x3f, 0xb7, 0x8c, 0x82, 0x98, 0xc0, 0xcb, 0x42, 0x5d, + 0x77, 0xf1, 0xd8, 0x00, 0xd5, 0x1e, 0x97, 0x3e, 0xa3, 0xff, 0xe7, 0x62, 0x7c, 0x0e, 0x5e, 0x29, + 0x86, 0x5f, 0x5c, 0xfd, 0xed, 0xff, 0xe2, 0xea, 0x57, 0xc3, 0xf2, 0x95, 0xdf, 0x02, 0xfb, 0x92, + 0xd1, 0x84, 0x64, 0x01, 0x5a, 0x2c, 0x32, 0x22, 0xa5, 0x1e, 0x7d, 0xcd, 0xdb, 0x2b, 0x55, 0xb7, + 0x14, 0x75, 0xab, 0x95, 0x1b, 0xbf, 0x19, 0xc0, 0x7a, 0x11, 0x38, 0xb4, 0x41, 0xbd, 0x7f, 0xfb, + 0x64, 0x16, 0xf8, 0x33, 0x77, 0x36, 0xf7, 0x03, 0xb7, 0xdf, 0x9f, 0x4f, 0xe6, 0x63, 0x77, 0x36, + 0x9a, 0x7e, 0x66, 0x55, 0x1a, 0xd6, 0xd9, 0x79, 0x7b, 0xd7, 0xc5, 0x38, 0x8f, 0x73, 0x8e, 0x8a, + 0xa1, 0xc1, 0x43, 0x00, 0x9f, 0xf7, 0xfb, 0x43, 0x77, 0x3c, 0x1c, 0x58, 0x46, 0x03, 0x9c, 0x9d, + 0xb7, 0xab, 0x3e, 0x41, 0x9c, 0x2c, 0x60, 0x07, 0x1c, 0x6c, 0x78, 0xe6, 0xbd, 0xc9, 0x68, 0x36, + 0x1b, 0x0e, 0xac, 0xad, 0xc6, 0xde, 0xd9, 0x79, 0xbb, 0xe6, 0xe7, 0x61, 0xcc, 0x94, 0x7a, 0xd9, + 0xd9, 0xbf, 0x33, 0x3d, 0x1e, 0x79, 0x93, 0xe1, 0xc0, 0xda, 0x2e, 0x9d, 0x7d, 0x91, 0x9c, 0xb2, + 0x2c, 0x7e, 0xd9, 0x79, 0x3c, 0x9a, 0xba, 0xe3, 0xd1, 0xdd, 0xe1, 0xc0, 0x32, 0x4b, 0xe7, 0x31, + 0x4b, 0x10, 0x67, 0xdf, 0x90, 0x45, 0xc3, 0xfc, 0xf6, 0xfb, 0x66, 0xa5, 0x77, 0xe7, 0xc7, 0x8b, + 0xa6, 0xf1, 0xe0, 0xa2, 0x69, 0xfc, 0x72, 0xd1, 0x34, 0xee, 0x5f, 0x36, 0x2b, 0x0f, 0x2e, 0x9b, + 0x95, 0x9f, 0x2e, 0x9b, 0x95, 0xbb, 0xef, 0xfe, 0x15, 0xf6, 0xaf, 0x5f, 0xf8, 0x7e, 0xab, 0x55, + 0x4a, 0x64, 0x58, 0xd5, 0x77, 0xea, 0xe6, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xd0, 0xbc, + 0xe5, 0xe5, 0x05, 0x00, 0x00, } func (this *RawCheckpoint) Equal(that interface{}) bool { @@ -381,6 +468,48 @@ func (this *RawCheckpointWithMeta) Equal(that interface{}) bool { if this.PowerSum != that1.PowerSum { return false } + if len(this.Lifecycle) != len(that1.Lifecycle) { + return false + } + for i := range this.Lifecycle { + if !this.Lifecycle[i].Equal(that1.Lifecycle[i]) { + return false + } + } + return true +} +func (this *CheckpointStateUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CheckpointStateUpdate) + if !ok { + that2, ok := that.(CheckpointStateUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.State != that1.State { + return false + } + if this.BlockHeight != that1.BlockHeight { + return false + } + if that1.BlockTime == nil { + if this.BlockTime != nil { + return false + } + } else if !this.BlockTime.Equal(*that1.BlockTime) { + return false + } return true } func (m *RawCheckpoint) Marshal() (dAtA []byte, err error) { @@ -462,6 +591,20 @@ func (m *RawCheckpointWithMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Lifecycle) > 0 { + for iNdEx := len(m.Lifecycle) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Lifecycle[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckpoint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if m.PowerSum != 0 { i = encodeVarintCheckpoint(dAtA, i, uint64(m.PowerSum)) i-- @@ -499,6 +642,49 @@ func (m *RawCheckpointWithMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CheckpointStateUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointStateUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckpointStateUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockTime != nil { + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.BlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.BlockTime):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintCheckpoint(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + } + if m.BlockHeight != 0 { + i = encodeVarintCheckpoint(dAtA, i, uint64(m.BlockHeight)) + i-- + dAtA[i] = 0x10 + } + if m.State != 0 { + i = encodeVarintCheckpoint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *BlsSig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -613,6 +799,31 @@ func (m *RawCheckpointWithMeta) Size() (n int) { if m.PowerSum != 0 { n += 1 + sovCheckpoint(uint64(m.PowerSum)) } + if len(m.Lifecycle) > 0 { + for _, e := range m.Lifecycle { + l = e.Size() + n += 1 + l + sovCheckpoint(uint64(l)) + } + } + return n +} + +func (m *CheckpointStateUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovCheckpoint(uint64(m.State)) + } + if m.BlockHeight != 0 { + n += 1 + sovCheckpoint(uint64(m.BlockHeight)) + } + if m.BlockTime != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.BlockTime) + n += 1 + l + sovCheckpoint(uint64(l)) + } return n } @@ -957,6 +1168,164 @@ func (m *RawCheckpointWithMeta) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Lifecycle = append(m.Lifecycle, &CheckpointStateUpdate{}) + if err := m.Lifecycle[len(m.Lifecycle)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCheckpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointStateUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointStateUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointStateUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= CheckpointStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) + } + m.BlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckpoint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckpoint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockTime == nil { + m.BlockTime = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.BlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipCheckpoint(dAtA[iNdEx:]) diff --git a/x/checkpointing/types/errors.go b/x/checkpointing/types/errors.go index ab9c5ed08..95fd9cdcb 100644 --- a/x/checkpointing/types/errors.go +++ b/x/checkpointing/types/errors.go @@ -16,4 +16,5 @@ var ( ErrBlsKeyAlreadyExist = sdkerrors.Register(ModuleName, 1210, "BLS public key already exists") ErrBlsPrivKeyDoesNotExist = sdkerrors.Register(ModuleName, 1211, "BLS private key does not exist") ErrConflictingCheckpoint = sdkerrors.Register(ModuleName, 1212, "Conflicting checkpoint is found") + ErrInvalidLastCommitHash = sdkerrors.Register(ModuleName, 1213, "Provided last commit hash is Invalid") ) diff --git a/x/checkpointing/types/expected_keepers.go b/x/checkpointing/types/expected_keepers.go index bc2c0417d..2d3645970 100644 --- a/x/checkpointing/types/expected_keepers.go +++ b/x/checkpointing/types/expected_keepers.go @@ -34,7 +34,9 @@ type EpochingKeeper interface { // CheckpointingHooks event hooks for raw checkpoint object (noalias) type CheckpointingHooks interface { - AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error // Must be called when a BLS key is registered - AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is CONFIRMED - AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is FINALIZED + AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error // Must be called when a BLS key is registered + AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is CONFIRMED + AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *RawCheckpoint) error // Must be called when a raw checkpoint is FORGOTTEN + AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error // Must be called when a raw checkpoint is FINALIZED + AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *RawCheckpoint) error // Must be called when a raw checkpoint's multi-sig is verified } diff --git a/x/checkpointing/types/hooks.go b/x/checkpointing/types/hooks.go index 8e0519e63..50a97c406 100644 --- a/x/checkpointing/types/hooks.go +++ b/x/checkpointing/types/hooks.go @@ -31,6 +31,13 @@ func (h MultiCheckpointingHooks) AfterRawCheckpointConfirmed(ctx sdk.Context, ep return nil } +func (h MultiCheckpointingHooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *RawCheckpoint) error { + for i := range h { + return h[i].AfterRawCheckpointForgotten(ctx, ckpt) + } + return nil +} + func (h MultiCheckpointingHooks) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error { for i := range h { if err := h[i].AfterRawCheckpointFinalized(ctx, epoch); err != nil { @@ -39,3 +46,12 @@ func (h MultiCheckpointingHooks) AfterRawCheckpointFinalized(ctx sdk.Context, ep } return nil } + +func (h MultiCheckpointingHooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *RawCheckpoint) error { + for i := range h { + if err := h[i].AfterRawCheckpointBlsSigVerified(ctx, ckpt); err != nil { + return err + } + } + return nil +} diff --git a/x/checkpointing/types/msgs.go b/x/checkpointing/types/msgs.go index e5f551de0..aee1604ab 100644 --- a/x/checkpointing/types/msgs.go +++ b/x/checkpointing/types/msgs.go @@ -77,7 +77,12 @@ func (m *MsgWrappedCreateValidator) ValidateBasic() error { if err != nil { return err } - ok := m.VerifyPoP(m.MsgCreateValidator.Pubkey.GetCachedValue().(*ed255192.PubKey)) + var pubKey ed255192.PubKey + err = pubKey.Unmarshal(m.MsgCreateValidator.Pubkey.GetValue()) + if err != nil { + return err + } + ok := m.VerifyPoP(&pubKey) if !ok { return errors.New("the proof-of-possession is not valid") } diff --git a/x/checkpointing/types/querier.go b/x/checkpointing/types/querier.go index 984fcb043..b27ede678 100644 --- a/x/checkpointing/types/querier.go +++ b/x/checkpointing/types/querier.go @@ -22,3 +22,7 @@ func NewQueryEpochStatusRequest(epochNum uint64) *QueryEpochStatusRequest { func NewQueryRecentEpochStatusCountRequest(epochNum uint64) *QueryRecentEpochStatusCountRequest { return &QueryRecentEpochStatusCountRequest{EpochCount: epochNum} } + +func NewQueryLastCheckpointWithStatus(status CheckpointStatus) *QueryLastCheckpointWithStatusRequest { + return &QueryLastCheckpointWithStatusRequest{Status: status} +} diff --git a/x/checkpointing/types/query.pb.go b/x/checkpointing/types/query.pb.go index 428e18dca..5450cf6a9 100644 --- a/x/checkpointing/types/query.pb.go +++ b/x/checkpointing/types/query.pb.go @@ -142,118 +142,6 @@ func (m *QueryRawCheckpointListResponse) GetPagination() *query.PageResponse { return nil } -// QueryRecentRawCheckpointListRequest is the request type for the Query/RecentRawCheckpoints -// RPC method. -type QueryRecentRawCheckpointListRequest struct { - // from_epoch defines the start epoch of the query, which is inclusive - FromEpochNum uint64 `protobuf:"varint,1,opt,name=from_epoch_num,json=fromEpochNum,proto3" json:"from_epoch_num,omitempty"` - // pagination defines an optional pagination for the request. - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryRecentRawCheckpointListRequest) Reset() { *m = QueryRecentRawCheckpointListRequest{} } -func (m *QueryRecentRawCheckpointListRequest) String() string { return proto.CompactTextString(m) } -func (*QueryRecentRawCheckpointListRequest) ProtoMessage() {} -func (*QueryRecentRawCheckpointListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{2} -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryRecentRawCheckpointListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRecentRawCheckpointListRequest.Merge(m, src) -} -func (m *QueryRecentRawCheckpointListRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryRecentRawCheckpointListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRecentRawCheckpointListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRecentRawCheckpointListRequest proto.InternalMessageInfo - -func (m *QueryRecentRawCheckpointListRequest) GetFromEpochNum() uint64 { - if m != nil { - return m.FromEpochNum - } - return 0 -} - -func (m *QueryRecentRawCheckpointListRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryRecentRawCheckpointListResponse is the response type for the Query/RecentRawCheckpoints -// RPC method. -type QueryRecentRawCheckpointListResponse struct { - // the order is going from the newest to oldest based on the epoch number - RawCheckpoints []*RawCheckpointWithMeta `protobuf:"bytes,1,rep,name=raw_checkpoints,json=rawCheckpoints,proto3" json:"raw_checkpoints,omitempty"` - // pagination defines the pagination in the response. - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryRecentRawCheckpointListResponse) Reset() { *m = QueryRecentRawCheckpointListResponse{} } -func (m *QueryRecentRawCheckpointListResponse) String() string { return proto.CompactTextString(m) } -func (*QueryRecentRawCheckpointListResponse) ProtoMessage() {} -func (*QueryRecentRawCheckpointListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{3} -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryRecentRawCheckpointListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRecentRawCheckpointListResponse.Merge(m, src) -} -func (m *QueryRecentRawCheckpointListResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryRecentRawCheckpointListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRecentRawCheckpointListResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRecentRawCheckpointListResponse proto.InternalMessageInfo - -func (m *QueryRecentRawCheckpointListResponse) GetRawCheckpoints() []*RawCheckpointWithMeta { - if m != nil { - return m.RawCheckpoints - } - return nil -} - -func (m *QueryRecentRawCheckpointListResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - // QueryRawCheckpointRequest is the request type for the Query/RawCheckpoint // RPC method. type QueryRawCheckpointRequest struct { @@ -265,7 +153,7 @@ func (m *QueryRawCheckpointRequest) Reset() { *m = QueryRawCheckpointReq func (m *QueryRawCheckpointRequest) String() string { return proto.CompactTextString(m) } func (*QueryRawCheckpointRequest) ProtoMessage() {} func (*QueryRawCheckpointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{4} + return fileDescriptor_a0fdb8f0f85bb51e, []int{2} } func (m *QueryRawCheckpointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -311,7 +199,7 @@ func (m *QueryRawCheckpointResponse) Reset() { *m = QueryRawCheckpointRe func (m *QueryRawCheckpointResponse) String() string { return proto.CompactTextString(m) } func (*QueryRawCheckpointResponse) ProtoMessage() {} func (*QueryRawCheckpointResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{5} + return fileDescriptor_a0fdb8f0f85bb51e, []int{3} } func (m *QueryRawCheckpointResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -347,90 +235,6 @@ func (m *QueryRawCheckpointResponse) GetRawCheckpoint() *RawCheckpointWithMeta { return nil } -// QueryLatestCheckpointRequest is the request type for the Query/LatestCheckpoint -// RPC method. -type QueryLatestCheckpointRequest struct { -} - -func (m *QueryLatestCheckpointRequest) Reset() { *m = QueryLatestCheckpointRequest{} } -func (m *QueryLatestCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLatestCheckpointRequest) ProtoMessage() {} -func (*QueryLatestCheckpointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{6} -} -func (m *QueryLatestCheckpointRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLatestCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLatestCheckpointRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLatestCheckpointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLatestCheckpointRequest.Merge(m, src) -} -func (m *QueryLatestCheckpointRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLatestCheckpointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLatestCheckpointRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLatestCheckpointRequest proto.InternalMessageInfo - -// QueryLatestCheckpointResponse is the response type for the Query/LatestCheckpoint -// RPC method. -type QueryLatestCheckpointResponse struct { - LatestCheckpoint *RawCheckpointWithMeta `protobuf:"bytes,1,opt,name=latest_checkpoint,json=latestCheckpoint,proto3" json:"latest_checkpoint,omitempty"` -} - -func (m *QueryLatestCheckpointResponse) Reset() { *m = QueryLatestCheckpointResponse{} } -func (m *QueryLatestCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLatestCheckpointResponse) ProtoMessage() {} -func (*QueryLatestCheckpointResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{7} -} -func (m *QueryLatestCheckpointResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLatestCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLatestCheckpointResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLatestCheckpointResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLatestCheckpointResponse.Merge(m, src) -} -func (m *QueryLatestCheckpointResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLatestCheckpointResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLatestCheckpointResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLatestCheckpointResponse proto.InternalMessageInfo - -func (m *QueryLatestCheckpointResponse) GetLatestCheckpoint() *RawCheckpointWithMeta { - if m != nil { - return m.LatestCheckpoint - } - return nil -} - // QueryBlsPublicKeyListRequest is the request type for the Query/BlsPublicKeys // RPC method. type QueryBlsPublicKeyListRequest struct { @@ -444,7 +248,7 @@ func (m *QueryBlsPublicKeyListRequest) Reset() { *m = QueryBlsPublicKeyL func (m *QueryBlsPublicKeyListRequest) String() string { return proto.CompactTextString(m) } func (*QueryBlsPublicKeyListRequest) ProtoMessage() {} func (*QueryBlsPublicKeyListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{8} + return fileDescriptor_a0fdb8f0f85bb51e, []int{4} } func (m *QueryBlsPublicKeyListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +303,7 @@ func (m *QueryBlsPublicKeyListResponse) Reset() { *m = QueryBlsPublicKey func (m *QueryBlsPublicKeyListResponse) String() string { return proto.CompactTextString(m) } func (*QueryBlsPublicKeyListResponse) ProtoMessage() {} func (*QueryBlsPublicKeyListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{9} + return fileDescriptor_a0fdb8f0f85bb51e, []int{5} } func (m *QueryBlsPublicKeyListResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -552,7 +356,7 @@ func (m *QueryEpochStatusRequest) Reset() { *m = QueryEpochStatusRequest func (m *QueryEpochStatusRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochStatusRequest) ProtoMessage() {} func (*QueryEpochStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{10} + return fileDescriptor_a0fdb8f0f85bb51e, []int{6} } func (m *QueryEpochStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -598,7 +402,7 @@ func (m *QueryEpochStatusResponse) Reset() { *m = QueryEpochStatusRespon func (m *QueryEpochStatusResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochStatusResponse) ProtoMessage() {} func (*QueryEpochStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{11} + return fileDescriptor_a0fdb8f0f85bb51e, []int{7} } func (m *QueryEpochStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -645,7 +449,7 @@ func (m *QueryRecentEpochStatusCountRequest) Reset() { *m = QueryRecentE func (m *QueryRecentEpochStatusCountRequest) String() string { return proto.CompactTextString(m) } func (*QueryRecentEpochStatusCountRequest) ProtoMessage() {} func (*QueryRecentEpochStatusCountRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{12} + return fileDescriptor_a0fdb8f0f85bb51e, []int{8} } func (m *QueryRecentEpochStatusCountRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -693,7 +497,7 @@ func (m *QueryRecentEpochStatusCountResponse) Reset() { *m = QueryRecent func (m *QueryRecentEpochStatusCountResponse) String() string { return proto.CompactTextString(m) } func (*QueryRecentEpochStatusCountResponse) ProtoMessage() {} func (*QueryRecentEpochStatusCountResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{13} + return fileDescriptor_a0fdb8f0f85bb51e, []int{9} } func (m *QueryRecentEpochStatusCountResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -743,6 +547,94 @@ func (m *QueryRecentEpochStatusCountResponse) GetStatusCount() map[string]uint64 return nil } +type QueryLastCheckpointWithStatusRequest struct { + Status CheckpointStatus `protobuf:"varint,1,opt,name=status,proto3,enum=babylon.checkpointing.v1.CheckpointStatus" json:"status,omitempty"` +} + +func (m *QueryLastCheckpointWithStatusRequest) Reset() { *m = QueryLastCheckpointWithStatusRequest{} } +func (m *QueryLastCheckpointWithStatusRequest) String() string { return proto.CompactTextString(m) } +func (*QueryLastCheckpointWithStatusRequest) ProtoMessage() {} +func (*QueryLastCheckpointWithStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0fdb8f0f85bb51e, []int{10} +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLastCheckpointWithStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLastCheckpointWithStatusRequest.Merge(m, src) +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryLastCheckpointWithStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLastCheckpointWithStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLastCheckpointWithStatusRequest proto.InternalMessageInfo + +func (m *QueryLastCheckpointWithStatusRequest) GetStatus() CheckpointStatus { + if m != nil { + return m.Status + } + return Accumulating +} + +type QueryLastCheckpointWithStatusResponse struct { + RawCheckpoint *RawCheckpoint `protobuf:"bytes,1,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` +} + +func (m *QueryLastCheckpointWithStatusResponse) Reset() { *m = QueryLastCheckpointWithStatusResponse{} } +func (m *QueryLastCheckpointWithStatusResponse) String() string { return proto.CompactTextString(m) } +func (*QueryLastCheckpointWithStatusResponse) ProtoMessage() {} +func (*QueryLastCheckpointWithStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0fdb8f0f85bb51e, []int{11} +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLastCheckpointWithStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLastCheckpointWithStatusResponse.Merge(m, src) +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryLastCheckpointWithStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLastCheckpointWithStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLastCheckpointWithStatusResponse proto.InternalMessageInfo + +func (m *QueryLastCheckpointWithStatusResponse) GetRawCheckpoint() *RawCheckpoint { + if m != nil { + return m.RawCheckpoint + } + return nil +} + // QueryParamsRequest is request type for the Query/Params RPC method. type QueryParamsRequest struct { } @@ -751,7 +643,7 @@ func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } func (*QueryParamsRequest) ProtoMessage() {} func (*QueryParamsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{14} + return fileDescriptor_a0fdb8f0f85bb51e, []int{12} } func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -790,7 +682,7 @@ func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } func (*QueryParamsResponse) ProtoMessage() {} func (*QueryParamsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a0fdb8f0f85bb51e, []int{15} + return fileDescriptor_a0fdb8f0f85bb51e, []int{13} } func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -829,12 +721,8 @@ func (m *QueryParamsResponse) GetParams() Params { func init() { proto.RegisterType((*QueryRawCheckpointListRequest)(nil), "babylon.checkpointing.v1.QueryRawCheckpointListRequest") proto.RegisterType((*QueryRawCheckpointListResponse)(nil), "babylon.checkpointing.v1.QueryRawCheckpointListResponse") - proto.RegisterType((*QueryRecentRawCheckpointListRequest)(nil), "babylon.checkpointing.v1.QueryRecentRawCheckpointListRequest") - proto.RegisterType((*QueryRecentRawCheckpointListResponse)(nil), "babylon.checkpointing.v1.QueryRecentRawCheckpointListResponse") proto.RegisterType((*QueryRawCheckpointRequest)(nil), "babylon.checkpointing.v1.QueryRawCheckpointRequest") proto.RegisterType((*QueryRawCheckpointResponse)(nil), "babylon.checkpointing.v1.QueryRawCheckpointResponse") - proto.RegisterType((*QueryLatestCheckpointRequest)(nil), "babylon.checkpointing.v1.QueryLatestCheckpointRequest") - proto.RegisterType((*QueryLatestCheckpointResponse)(nil), "babylon.checkpointing.v1.QueryLatestCheckpointResponse") proto.RegisterType((*QueryBlsPublicKeyListRequest)(nil), "babylon.checkpointing.v1.QueryBlsPublicKeyListRequest") proto.RegisterType((*QueryBlsPublicKeyListResponse)(nil), "babylon.checkpointing.v1.QueryBlsPublicKeyListResponse") proto.RegisterType((*QueryEpochStatusRequest)(nil), "babylon.checkpointing.v1.QueryEpochStatusRequest") @@ -842,6 +730,8 @@ func init() { proto.RegisterType((*QueryRecentEpochStatusCountRequest)(nil), "babylon.checkpointing.v1.QueryRecentEpochStatusCountRequest") proto.RegisterType((*QueryRecentEpochStatusCountResponse)(nil), "babylon.checkpointing.v1.QueryRecentEpochStatusCountResponse") proto.RegisterMapType((map[string]uint64)(nil), "babylon.checkpointing.v1.QueryRecentEpochStatusCountResponse.StatusCountEntry") + proto.RegisterType((*QueryLastCheckpointWithStatusRequest)(nil), "babylon.checkpointing.v1.QueryLastCheckpointWithStatusRequest") + proto.RegisterType((*QueryLastCheckpointWithStatusResponse)(nil), "babylon.checkpointing.v1.QueryLastCheckpointWithStatusResponse") proto.RegisterType((*QueryParamsRequest)(nil), "babylon.checkpointing.v1.QueryParamsRequest") proto.RegisterType((*QueryParamsResponse)(nil), "babylon.checkpointing.v1.QueryParamsResponse") } @@ -849,72 +739,68 @@ func init() { func init() { proto.RegisterFile("babylon/checkpointing/query.proto", fileDescriptor_a0fdb8f0f85bb51e) } var fileDescriptor_a0fdb8f0f85bb51e = []byte{ - // 1032 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x97, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0x33, 0x4e, 0x1b, 0x91, 0xe7, 0x34, 0xb8, 0x43, 0x44, 0x82, 0x5b, 0xdc, 0xb0, 0xad, - 0x4a, 0x54, 0xc8, 0xae, 0xec, 0xfc, 0x54, 0x68, 0x22, 0xe1, 0x28, 0x20, 0xd4, 0x52, 0xc2, 0x22, - 0x0a, 0x42, 0x08, 0x6b, 0xec, 0x0e, 0xf6, 0x2a, 0xeb, 0x9d, 0x8d, 0x77, 0xd6, 0xc1, 0xaa, 0x72, - 0x29, 0x7f, 0x00, 0x48, 0x95, 0xf8, 0x27, 0x38, 0x71, 0xe3, 0x0c, 0x97, 0x22, 0x21, 0x54, 0x89, - 0x0b, 0x27, 0x84, 0x12, 0xfe, 0x10, 0xb4, 0xb3, 0xb3, 0xb1, 0x77, 0xd7, 0x13, 0xdb, 0xc1, 0x97, - 0xde, 0x36, 0x6f, 0xdf, 0x9b, 0xf9, 0x7c, 0xbf, 0x3b, 0xf3, 0x5e, 0x0c, 0x6f, 0x54, 0x49, 0xb5, - 0x63, 0x33, 0xc7, 0xa8, 0x35, 0x68, 0xed, 0xc0, 0x65, 0x96, 0xc3, 0x2d, 0xa7, 0x6e, 0x1c, 0xfa, - 0xb4, 0xd5, 0xd1, 0xdd, 0x16, 0xe3, 0x0c, 0x2f, 0xc8, 0x14, 0x3d, 0x96, 0xa2, 0xb7, 0x8b, 0xf9, - 0x9b, 0xfd, 0x8b, 0xab, 0xb6, 0x57, 0x39, 0xa0, 0xb2, 0x3c, 0x7f, 0xa7, 0xc6, 0xbc, 0x26, 0xf3, - 0x8c, 0x2a, 0xf1, 0x68, 0xb8, 0xae, 0xd1, 0x2e, 0x56, 0x29, 0x27, 0x45, 0xc3, 0x25, 0x75, 0xcb, - 0x21, 0xdc, 0x62, 0x8e, 0xcc, 0x9d, 0xab, 0xb3, 0x3a, 0x13, 0x8f, 0x46, 0xf0, 0x24, 0xa3, 0xd7, - 0xeb, 0x8c, 0xd5, 0x6d, 0x6a, 0x10, 0xd7, 0x32, 0x88, 0xe3, 0x30, 0x2e, 0x4a, 0x3c, 0xf9, 0x56, - 0xeb, 0x0f, 0xe1, 0x92, 0x16, 0x69, 0x46, 0x39, 0xb7, 0xfb, 0xe7, 0x74, 0xff, 0x0a, 0xf3, 0xb4, - 0x1f, 0x11, 0xbc, 0xfe, 0x71, 0x80, 0x68, 0x92, 0xa3, 0xdd, 0xb3, 0x97, 0xf7, 0x2d, 0x8f, 0x9b, - 0xf4, 0xd0, 0xa7, 0x1e, 0xc7, 0x65, 0x98, 0xf2, 0x38, 0xe1, 0xbe, 0xb7, 0x80, 0x16, 0xd1, 0xd2, - 0x6c, 0xe9, 0x8e, 0xae, 0x72, 0x47, 0xef, 0x2e, 0xf0, 0x89, 0xa8, 0x30, 0x65, 0x25, 0x7e, 0x0f, - 0xa0, 0xab, 0x7c, 0x21, 0xb3, 0x88, 0x96, 0xb2, 0xa5, 0xdb, 0x7a, 0x68, 0x93, 0x1e, 0xd8, 0xa4, - 0x87, 0xf6, 0x4b, 0x9b, 0xf4, 0x7d, 0x52, 0xa7, 0x72, 0x7f, 0xb3, 0xa7, 0x52, 0xfb, 0x15, 0x41, - 0x41, 0x45, 0xeb, 0xb9, 0xcc, 0xf1, 0x28, 0xfe, 0x1c, 0x5e, 0x6e, 0x91, 0xa3, 0x4a, 0x97, 0x2d, - 0xe0, 0x9e, 0x5c, 0xca, 0x96, 0x0c, 0x35, 0x77, 0x6c, 0xb5, 0xcf, 0x2c, 0xde, 0xf8, 0x90, 0x72, - 0x62, 0xce, 0xb6, 0x7a, 0xc3, 0x1e, 0x7e, 0xbf, 0x8f, 0x88, 0x37, 0x07, 0x8a, 0x08, 0xb1, 0x62, - 0x2a, 0x9e, 0x22, 0xb8, 0x19, 0xaa, 0xa0, 0x35, 0xea, 0x70, 0xa5, 0xf3, 0xb7, 0x60, 0xf6, 0xeb, - 0x16, 0x6b, 0x56, 0xa8, 0xcb, 0x6a, 0x8d, 0x8a, 0xe3, 0x37, 0xc5, 0x17, 0xb8, 0x64, 0xce, 0x04, - 0xd1, 0xbd, 0x20, 0xf8, 0xc0, 0x6f, 0x8e, 0xcd, 0xdb, 0xdf, 0x10, 0xdc, 0x3a, 0x9f, 0xea, 0xc5, - 0x71, 0x78, 0x13, 0x5e, 0x4b, 0x1f, 0x93, 0xc8, 0xd6, 0x6b, 0x30, 0x9d, 0x74, 0xf4, 0x25, 0x2a, - 0xdd, 0xd4, 0x38, 0xe4, 0xfb, 0x55, 0x4a, 0xe9, 0x0f, 0x61, 0x36, 0x2e, 0x5d, 0xd4, 0x5f, 0x40, - 0xf9, 0x95, 0x98, 0x72, 0xad, 0x00, 0xd7, 0xc5, 0xae, 0xf7, 0x09, 0xa7, 0x1e, 0x4f, 0x21, 0x6b, - 0xc7, 0xf2, 0x92, 0xa6, 0xdf, 0x4b, 0xb0, 0x2f, 0xe1, 0xaa, 0x2d, 0xde, 0x8d, 0x81, 0x2d, 0x67, - 0x27, 0x76, 0xd1, 0xbe, 0x45, 0x92, 0xaf, 0x6c, 0x7b, 0xfb, 0x7e, 0xd5, 0xb6, 0x6a, 0xf7, 0x68, - 0xa7, 0xf7, 0xa4, 0x9e, 0x67, 0xe9, 0xd8, 0x0e, 0xe8, 0x1f, 0x51, 0xab, 0x4a, 0x53, 0x48, 0x17, - 0x1e, 0xc1, 0x7c, 0x9b, 0xd8, 0xd6, 0x23, 0xc2, 0x59, 0xab, 0x72, 0x64, 0xf1, 0x46, 0x45, 0x36, - 0xe6, 0xe8, 0x84, 0x2e, 0xab, 0xbd, 0x78, 0x18, 0x15, 0x06, 0x3e, 0x94, 0x6d, 0xef, 0x1e, 0xed, - 0x98, 0x73, 0xed, 0x74, 0x70, 0x8c, 0xa7, 0x74, 0x1d, 0xe6, 0x85, 0x1e, 0x71, 0x95, 0x65, 0xc7, - 0x1c, 0xe6, 0x8c, 0x7e, 0x05, 0x0b, 0xe9, 0x3a, 0x69, 0xc1, 0x18, 0xba, 0xb5, 0xb6, 0x07, 0x5a, - 0x4f, 0x23, 0xe8, 0xd9, 0x65, 0x97, 0xf9, 0xdd, 0x6b, 0x74, 0x03, 0xb2, 0x21, 0x62, 0x2d, 0x88, - 0x4a, 0x48, 0x10, 0x21, 0x91, 0xa7, 0xfd, 0x90, 0x89, 0xb5, 0xb9, 0xf4, 0x3a, 0x12, 0xf9, 0x1a, - 0x4c, 0x73, 0xcb, 0x0d, 0xbb, 0x5c, 0xa4, 0x95, 0x5b, 0xae, 0xc8, 0x4f, 0xee, 0x92, 0x49, 0xee, - 0x82, 0x0f, 0x61, 0x26, 0xc4, 0x96, 0x19, 0x93, 0xe2, 0x43, 0x3f, 0x50, 0xcb, 0x1e, 0x02, 0x49, - 0xef, 0x89, 0xed, 0x39, 0xbc, 0xd5, 0x31, 0xb3, 0x5e, 0x37, 0x92, 0xdf, 0x81, 0x5c, 0x32, 0x01, - 0xe7, 0x60, 0xf2, 0x80, 0x76, 0x04, 0xfe, 0xb4, 0x19, 0x3c, 0xe2, 0x39, 0xb8, 0xdc, 0x26, 0xb6, - 0x4f, 0x25, 0x73, 0xf8, 0xc7, 0x56, 0x66, 0x13, 0x69, 0x73, 0x80, 0x05, 0xc4, 0xbe, 0x18, 0xd8, - 0xd1, 0x1d, 0xff, 0x14, 0x5e, 0x89, 0x45, 0xa5, 0x3b, 0x3b, 0x30, 0x15, 0x0e, 0x76, 0x79, 0x9d, - 0x17, 0xd5, 0xca, 0xc2, 0xca, 0xf2, 0xa5, 0x67, 0x7f, 0xdf, 0x98, 0x30, 0x65, 0x55, 0xe9, 0xc9, - 0x0c, 0x5c, 0x16, 0xeb, 0xe2, 0x5f, 0x10, 0x5c, 0x4d, 0x75, 0x75, 0xbc, 0x31, 0xc8, 0x29, 0xc5, - 0x74, 0xca, 0x6f, 0x8e, 0x5e, 0x18, 0x4a, 0xd2, 0xb6, 0x9e, 0xfc, 0xf9, 0xef, 0xd3, 0xcc, 0x2a, - 0x2e, 0x19, 0xfd, 0xff, 0x49, 0x69, 0x17, 0x8d, 0xc4, 0x80, 0x31, 0x1e, 0x87, 0xfe, 0x1f, 0xe3, - 0x53, 0x04, 0xf3, 0x8a, 0x01, 0x85, 0xb7, 0x87, 0xfa, 0xe8, 0x4a, 0x41, 0x3b, 0x17, 0x2d, 0x97, - 0xb2, 0x3e, 0x10, 0xb2, 0x76, 0xf1, 0xbb, 0xe7, 0xc8, 0x12, 0x4b, 0x54, 0x52, 0xea, 0xe2, 0x63, - 0xfe, 0x18, 0xff, 0x8c, 0xe0, 0x4a, 0x6c, 0x23, 0xbc, 0x32, 0x8a, 0xdb, 0x91, 0xa2, 0xd5, 0xd1, - 0x8a, 0xa4, 0x8e, 0xbb, 0x42, 0xc7, 0x3a, 0x5e, 0x1d, 0xf6, 0xf3, 0x18, 0x8f, 0xe3, 0xe8, 0xb9, - 0xe4, 0x98, 0xc2, 0xeb, 0x03, 0x40, 0x14, 0x73, 0x2f, 0xbf, 0x31, 0x72, 0x9d, 0xd4, 0xb0, 0x22, - 0x34, 0x2c, 0xe3, 0xb7, 0xd4, 0x1a, 0x52, 0xf3, 0x32, 0xb8, 0x20, 0xb9, 0xe4, 0x6c, 0x19, 0x88, - 0xae, 0x18, 0x89, 0x03, 0xd1, 0x55, 0x43, 0x4c, 0xdb, 0x16, 0xe8, 0x1b, 0x78, 0x4d, 0x8d, 0x1e, - 0x4c, 0x35, 0x57, 0x14, 0x8b, 0xe1, 0x16, 0xf3, 0xff, 0x27, 0x04, 0xd9, 0x9e, 0xbe, 0x86, 0x8b, - 0x03, 0x38, 0xd2, 0xc3, 0x27, 0x5f, 0x1a, 0xa5, 0x44, 0x52, 0xbf, 0x23, 0xa8, 0xd7, 0xf0, 0x8a, - 0x9a, 0x5a, 0x40, 0xc6, 0x60, 0x0d, 0xf9, 0xf3, 0xe0, 0x77, 0x04, 0xaf, 0xf6, 0xef, 0xc8, 0xf8, - 0xee, 0x05, 0x1b, 0x79, 0xa8, 0x64, 0xfb, 0x7f, 0x8d, 0x01, 0x6d, 0x4d, 0x88, 0x32, 0xf0, 0xf2, - 0x20, 0x51, 0x5b, 0xbd, 0x23, 0x08, 0x7f, 0x87, 0x60, 0x2a, 0xec, 0xc5, 0xf8, 0xed, 0x01, 0x00, - 0xb1, 0x11, 0x90, 0x5f, 0x1e, 0x32, 0x5b, 0xe2, 0x2d, 0x09, 0x3c, 0x0d, 0x2f, 0xaa, 0xf1, 0xc2, - 0x21, 0x50, 0xfe, 0xe8, 0xd9, 0x49, 0x01, 0x3d, 0x3f, 0x29, 0xa0, 0x7f, 0x4e, 0x0a, 0xe8, 0xfb, - 0xd3, 0xc2, 0xc4, 0xf3, 0xd3, 0xc2, 0xc4, 0x5f, 0xa7, 0x85, 0x89, 0x2f, 0xd6, 0xea, 0x16, 0x6f, - 0xf8, 0x55, 0xbd, 0xc6, 0x9a, 0xd1, 0x2a, 0xb5, 0x06, 0xb1, 0x9c, 0xb3, 0x25, 0xbf, 0x49, 0x2c, - 0xca, 0x3b, 0x2e, 0xf5, 0xaa, 0x53, 0xe2, 0xd7, 0xe3, 0xca, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x91, 0xee, 0xc3, 0xd0, 0x4d, 0x0f, 0x00, 0x00, + // 974 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x5f, 0x6f, 0xdb, 0x54, + 0x14, 0xaf, 0xd3, 0xad, 0xa2, 0x27, 0xac, 0x94, 0x4b, 0xc5, 0x82, 0x37, 0xb2, 0xe2, 0xc1, 0x56, + 0x4d, 0xd4, 0x56, 0xd2, 0xbf, 0x2a, 0x5b, 0x91, 0x32, 0x15, 0x1e, 0x36, 0x4a, 0x31, 0x62, 0x20, + 0x1e, 0x88, 0x6e, 0xbc, 0xab, 0xc4, 0xd4, 0xf1, 0x75, 0x73, 0xaf, 0x53, 0xa2, 0x69, 0x2f, 0xf0, + 0x01, 0x40, 0x42, 0xe2, 0x4b, 0xf0, 0xc4, 0x1b, 0x6f, 0x48, 0xf0, 0xb2, 0x07, 0x84, 0x26, 0xf1, + 0x82, 0x84, 0x84, 0x50, 0xcb, 0x07, 0x41, 0xbe, 0xf7, 0xba, 0x89, 0x9d, 0x78, 0x4e, 0xba, 0xbc, + 0xd9, 0xc7, 0xe7, 0x77, 0xce, 0xef, 0x77, 0xee, 0xb9, 0xe7, 0x24, 0xf0, 0x46, 0x03, 0x37, 0x7a, + 0x1e, 0xf5, 0x2d, 0xa7, 0x45, 0x9c, 0xc3, 0x80, 0xba, 0x3e, 0x77, 0xfd, 0xa6, 0x75, 0x14, 0x92, + 0x4e, 0xcf, 0x0c, 0x3a, 0x94, 0x53, 0x54, 0x52, 0x2e, 0x66, 0xc2, 0xc5, 0xec, 0x56, 0xf4, 0xeb, + 0xa3, 0xc1, 0x0d, 0x8f, 0xd5, 0x0f, 0x89, 0x82, 0xeb, 0xb7, 0x1c, 0xca, 0xda, 0x94, 0x59, 0x0d, + 0xcc, 0x88, 0x8c, 0x6b, 0x75, 0x2b, 0x0d, 0xc2, 0x71, 0xc5, 0x0a, 0x70, 0xd3, 0xf5, 0x31, 0x77, + 0xa9, 0xaf, 0x7c, 0x97, 0x9a, 0xb4, 0x49, 0xc5, 0xa3, 0x15, 0x3d, 0x29, 0xeb, 0xd5, 0x26, 0xa5, + 0x4d, 0x8f, 0x58, 0x38, 0x70, 0x2d, 0xec, 0xfb, 0x94, 0x0b, 0x08, 0x53, 0x5f, 0x8d, 0xd1, 0x24, + 0x02, 0xdc, 0xc1, 0xed, 0xd8, 0xe7, 0xc6, 0x68, 0x9f, 0xfe, 0x9b, 0xf4, 0x33, 0x7e, 0xd4, 0xe0, + 0xf5, 0x8f, 0x22, 0x8a, 0x36, 0x3e, 0xbe, 0x7b, 0xf6, 0xf1, 0xbe, 0xcb, 0xb8, 0x4d, 0x8e, 0x42, + 0xc2, 0x38, 0xaa, 0xc1, 0x1c, 0xe3, 0x98, 0x87, 0xac, 0xa4, 0x2d, 0x6b, 0x2b, 0x0b, 0xd5, 0x5b, + 0x66, 0x56, 0x75, 0xcc, 0x7e, 0x80, 0x8f, 0x05, 0xc2, 0x56, 0x48, 0xf4, 0x1e, 0x40, 0x5f, 0x79, + 0xa9, 0xb0, 0xac, 0xad, 0x14, 0xab, 0x37, 0x4c, 0x59, 0x26, 0x33, 0x2a, 0x93, 0x29, 0xcb, 0xaf, + 0xca, 0x64, 0x1e, 0xe0, 0x26, 0x51, 0xf9, 0xed, 0x01, 0xa4, 0xf1, 0x9b, 0x06, 0xe5, 0x2c, 0xb6, + 0x2c, 0xa0, 0x3e, 0x23, 0xe8, 0x33, 0x78, 0xa9, 0x83, 0x8f, 0xeb, 0x7d, 0x6e, 0x11, 0xef, 0xd9, + 0x95, 0x62, 0xd5, 0xca, 0xe6, 0x9d, 0x88, 0xf6, 0xa9, 0xcb, 0x5b, 0x1f, 0x10, 0x8e, 0xed, 0x85, + 0xce, 0xa0, 0x99, 0xa1, 0xf7, 0x47, 0x88, 0xb8, 0x99, 0x2b, 0x42, 0xd2, 0x4a, 0xa8, 0xd8, 0x86, + 0xd7, 0x86, 0x45, 0xc4, 0xe5, 0xbe, 0x02, 0xf3, 0x24, 0xa0, 0x4e, 0xab, 0xee, 0x87, 0x6d, 0x51, + 0xf1, 0x0b, 0xf6, 0x0b, 0xc2, 0xb0, 0x1f, 0xb6, 0x0d, 0x0e, 0xfa, 0x28, 0xa4, 0x92, 0xfe, 0x00, + 0x16, 0x92, 0xd2, 0x05, 0xfe, 0x1c, 0xca, 0x2f, 0x25, 0x94, 0x1b, 0xdf, 0x68, 0x70, 0x55, 0xa4, + 0xad, 0x79, 0xec, 0x20, 0x6c, 0x78, 0xae, 0x73, 0x8f, 0xf4, 0x06, 0x5b, 0xe4, 0x59, 0x9c, 0xa7, + 0x76, 0xf6, 0x7f, 0xc4, 0x9d, 0x3a, 0xcc, 0x42, 0xe9, 0x7f, 0x08, 0x97, 0xbb, 0xd8, 0x73, 0x1f, + 0x62, 0x4e, 0x3b, 0xf5, 0x63, 0x97, 0xb7, 0xea, 0xea, 0x5e, 0xc6, 0x2d, 0xb0, 0x9a, 0x5d, 0x88, + 0x07, 0x31, 0x30, 0x2a, 0x42, 0xcd, 0x63, 0xf7, 0x48, 0xcf, 0x5e, 0xea, 0x0e, 0x1b, 0xa7, 0xd8, + 0x06, 0x9b, 0x70, 0x59, 0xe8, 0xd9, 0x8b, 0x2a, 0xa5, 0x2e, 0xcc, 0x38, 0x4d, 0xf0, 0x05, 0x94, + 0x86, 0x71, 0xaa, 0x04, 0x53, 0xb8, 0xac, 0xc6, 0x1e, 0x18, 0xb2, 0xc9, 0x88, 0x43, 0x7c, 0x3e, + 0x90, 0xe5, 0x2e, 0x0d, 0xfb, 0x7d, 0x7a, 0x0d, 0x8a, 0x92, 0xa2, 0x13, 0x59, 0x15, 0x49, 0x10, + 0x26, 0xe1, 0x67, 0xfc, 0x50, 0x80, 0xeb, 0xcf, 0x8c, 0xa3, 0x28, 0x5f, 0x81, 0x79, 0xee, 0x06, + 0x75, 0x81, 0x8c, 0xb5, 0x72, 0x37, 0x10, 0xfe, 0xe9, 0x2c, 0x85, 0x74, 0x16, 0x74, 0x04, 0x2f, + 0x4a, 0xda, 0xca, 0x63, 0x56, 0x1c, 0xf4, 0x7e, 0xb6, 0xec, 0x31, 0x28, 0x99, 0x03, 0xb6, 0x3d, + 0x9f, 0x77, 0x7a, 0x76, 0x91, 0xf5, 0x2d, 0xfa, 0x2e, 0x2c, 0xa6, 0x1d, 0xd0, 0x22, 0xcc, 0x1e, + 0x92, 0x9e, 0xa0, 0x3f, 0x6f, 0x47, 0x8f, 0x68, 0x09, 0x2e, 0x76, 0xb1, 0x17, 0x12, 0xc5, 0x59, + 0xbe, 0xec, 0x14, 0xb6, 0x35, 0xe3, 0x4b, 0x78, 0x53, 0x90, 0xb8, 0x8f, 0x19, 0x4f, 0x5e, 0xbe, + 0x64, 0x13, 0x4c, 0xe3, 0x2c, 0x8f, 0xe1, 0xad, 0x9c, 0x5c, 0xea, 0x14, 0xf6, 0x33, 0x66, 0xc7, + 0xcd, 0x31, 0x67, 0x47, 0x7a, 0x66, 0x2c, 0x01, 0x12, 0x89, 0x0f, 0xc4, 0x52, 0x52, 0x92, 0x8c, + 0x4f, 0xe0, 0x95, 0x84, 0x55, 0x25, 0xdf, 0x85, 0x39, 0xb9, 0xbc, 0x54, 0xd2, 0xe5, 0xec, 0xa4, + 0x12, 0x59, 0xbb, 0xf0, 0xe4, 0x9f, 0x6b, 0x33, 0xb6, 0x42, 0x55, 0x7f, 0x01, 0xb8, 0x28, 0xe2, + 0xa2, 0x5f, 0x35, 0x78, 0x79, 0x68, 0x37, 0xa0, 0xad, 0xbc, 0x76, 0xc8, 0xd8, 0x7d, 0xfa, 0xf6, + 0xe4, 0x40, 0x29, 0xc9, 0xd8, 0xf9, 0xfa, 0xcf, 0xff, 0xbe, 0x2f, 0xac, 0xa3, 0xaa, 0x35, 0x7a, + 0x11, 0x77, 0x2b, 0x56, 0x6a, 0x4d, 0x59, 0x8f, 0xe4, 0x99, 0x3d, 0x46, 0x3f, 0x6b, 0x70, 0x29, + 0x11, 0x19, 0xad, 0x4d, 0xc2, 0x23, 0x26, 0xbf, 0x3e, 0x19, 0x48, 0x11, 0xbf, 0x2d, 0x88, 0x6f, + 0xa2, 0xf5, 0x71, 0x89, 0x5b, 0x8f, 0xce, 0x46, 0xd5, 0xe3, 0xa8, 0xfe, 0x8b, 0xe9, 0xf9, 0x8c, + 0x36, 0x73, 0x88, 0x64, 0xac, 0x15, 0x7d, 0x6b, 0x62, 0x9c, 0xd2, 0x70, 0x47, 0x68, 0xd8, 0x42, + 0x1b, 0xd9, 0x1a, 0xa2, 0xcd, 0x10, 0x08, 0xb0, 0x58, 0x10, 0x09, 0x11, 0x3f, 0x69, 0x50, 0x1c, + 0x98, 0x0d, 0xa8, 0x92, 0xc3, 0x63, 0x78, 0x80, 0xeb, 0xd5, 0x49, 0x20, 0x8a, 0xf5, 0x3b, 0x82, + 0xf5, 0x06, 0x5a, 0xcb, 0x66, 0x2d, 0x48, 0x26, 0xc8, 0x5a, 0xea, 0x17, 0xd6, 0xef, 0x1a, 0xbc, + 0x3a, 0x7a, 0xaa, 0xa1, 0xdb, 0xe7, 0x1c, 0x86, 0x52, 0xc9, 0x9d, 0xe7, 0x1a, 0xa5, 0xc6, 0x86, + 0x10, 0x65, 0xa1, 0xd5, 0x3c, 0x51, 0x3b, 0x83, 0x63, 0x1c, 0xfd, 0xad, 0x41, 0x29, 0x6b, 0x66, + 0xa1, 0xdd, 0x1c, 0x4a, 0x39, 0x83, 0x55, 0x7f, 0xf7, 0xdc, 0x78, 0x25, 0x6a, 0x57, 0x88, 0xda, + 0x46, 0x9b, 0xd9, 0xa2, 0x3c, 0xcc, 0x78, 0x3d, 0x7d, 0x51, 0xe2, 0x0b, 0xfe, 0xad, 0x06, 0x73, + 0x72, 0x90, 0xa1, 0xb7, 0x73, 0xb8, 0x24, 0xe6, 0xa7, 0xbe, 0x3a, 0xa6, 0xb7, 0xe2, 0xb9, 0x22, + 0x78, 0x1a, 0x68, 0x39, 0x9b, 0xa7, 0x9c, 0xa0, 0xb5, 0x0f, 0x9f, 0x9c, 0x94, 0xb5, 0xa7, 0x27, + 0x65, 0xed, 0xdf, 0x93, 0xb2, 0xf6, 0xdd, 0x69, 0x79, 0xe6, 0xe9, 0x69, 0x79, 0xe6, 0xaf, 0xd3, + 0xf2, 0xcc, 0xe7, 0x1b, 0x4d, 0x97, 0xb7, 0xc2, 0x86, 0xe9, 0xd0, 0x76, 0x1c, 0xc5, 0x69, 0x61, + 0xd7, 0x3f, 0x0b, 0xf9, 0x55, 0x2a, 0x28, 0xef, 0x05, 0x84, 0x35, 0xe6, 0xc4, 0xdf, 0x8b, 0xb5, + 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x28, 0x98, 0x2f, 0x2c, 0x6e, 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -931,18 +817,16 @@ const _ = grpc.SupportPackageIsVersion4 type QueryClient interface { // RawCheckpointList queries all checkpoints that match the given status. RawCheckpointList(ctx context.Context, in *QueryRawCheckpointListRequest, opts ...grpc.CallOption) (*QueryRawCheckpointListResponse, error) - // RawCheckpointList queries a list of checkpoints starting from a given epoch number to the current epoch number. - RecentRawCheckpointList(ctx context.Context, in *QueryRecentRawCheckpointListRequest, opts ...grpc.CallOption) (*QueryRecentRawCheckpointListResponse, error) // RawCheckpoint queries a checkpoints at a given epoch number. RawCheckpoint(ctx context.Context, in *QueryRawCheckpointRequest, opts ...grpc.CallOption) (*QueryRawCheckpointResponse, error) - // LatestCheckpoint queries the checkpoint with the highest epoch num. - LatestCheckpoint(ctx context.Context, in *QueryLatestCheckpointRequest, opts ...grpc.CallOption) (*QueryLatestCheckpointResponse, error) // BlsPublicKeyList queries a list of bls public keys of the validators at a given epoch number. BlsPublicKeyList(ctx context.Context, in *QueryBlsPublicKeyListRequest, opts ...grpc.CallOption) (*QueryBlsPublicKeyListResponse, error) // EpochStatus queries the status of the checkpoint at a given epoch EpochStatus(ctx context.Context, in *QueryEpochStatusRequest, opts ...grpc.CallOption) (*QueryEpochStatusResponse, error) // RecentEpochStatusCount queries the number of epochs with each status in recent epochs RecentEpochStatusCount(ctx context.Context, in *QueryRecentEpochStatusCountRequest, opts ...grpc.CallOption) (*QueryRecentEpochStatusCountResponse, error) + // LastCheckpointWithStatus queries the last checkpoint with a given status or a more matured status + LastCheckpointWithStatus(ctx context.Context, in *QueryLastCheckpointWithStatusRequest, opts ...grpc.CallOption) (*QueryLastCheckpointWithStatusResponse, error) // Parameters queries the parameters of the module. Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) } @@ -964,15 +848,6 @@ func (c *queryClient) RawCheckpointList(ctx context.Context, in *QueryRawCheckpo return out, nil } -func (c *queryClient) RecentRawCheckpointList(ctx context.Context, in *QueryRecentRawCheckpointListRequest, opts ...grpc.CallOption) (*QueryRecentRawCheckpointListResponse, error) { - out := new(QueryRecentRawCheckpointListResponse) - err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/RecentRawCheckpointList", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *queryClient) RawCheckpoint(ctx context.Context, in *QueryRawCheckpointRequest, opts ...grpc.CallOption) (*QueryRawCheckpointResponse, error) { out := new(QueryRawCheckpointResponse) err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/RawCheckpoint", in, out, opts...) @@ -982,15 +857,6 @@ func (c *queryClient) RawCheckpoint(ctx context.Context, in *QueryRawCheckpointR return out, nil } -func (c *queryClient) LatestCheckpoint(ctx context.Context, in *QueryLatestCheckpointRequest, opts ...grpc.CallOption) (*QueryLatestCheckpointResponse, error) { - out := new(QueryLatestCheckpointResponse) - err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/LatestCheckpoint", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *queryClient) BlsPublicKeyList(ctx context.Context, in *QueryBlsPublicKeyListRequest, opts ...grpc.CallOption) (*QueryBlsPublicKeyListResponse, error) { out := new(QueryBlsPublicKeyListResponse) err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/BlsPublicKeyList", in, out, opts...) @@ -1018,6 +884,15 @@ func (c *queryClient) RecentEpochStatusCount(ctx context.Context, in *QueryRecen return out, nil } +func (c *queryClient) LastCheckpointWithStatus(ctx context.Context, in *QueryLastCheckpointWithStatusRequest, opts ...grpc.CallOption) (*QueryLastCheckpointWithStatusResponse, error) { + out := new(QueryLastCheckpointWithStatusResponse) + err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/LastCheckpointWithStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { out := new(QueryParamsResponse) err := c.cc.Invoke(ctx, "/babylon.checkpointing.v1.Query/Params", in, out, opts...) @@ -1031,18 +906,16 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts . type QueryServer interface { // RawCheckpointList queries all checkpoints that match the given status. RawCheckpointList(context.Context, *QueryRawCheckpointListRequest) (*QueryRawCheckpointListResponse, error) - // RawCheckpointList queries a list of checkpoints starting from a given epoch number to the current epoch number. - RecentRawCheckpointList(context.Context, *QueryRecentRawCheckpointListRequest) (*QueryRecentRawCheckpointListResponse, error) // RawCheckpoint queries a checkpoints at a given epoch number. RawCheckpoint(context.Context, *QueryRawCheckpointRequest) (*QueryRawCheckpointResponse, error) - // LatestCheckpoint queries the checkpoint with the highest epoch num. - LatestCheckpoint(context.Context, *QueryLatestCheckpointRequest) (*QueryLatestCheckpointResponse, error) // BlsPublicKeyList queries a list of bls public keys of the validators at a given epoch number. BlsPublicKeyList(context.Context, *QueryBlsPublicKeyListRequest) (*QueryBlsPublicKeyListResponse, error) // EpochStatus queries the status of the checkpoint at a given epoch EpochStatus(context.Context, *QueryEpochStatusRequest) (*QueryEpochStatusResponse, error) // RecentEpochStatusCount queries the number of epochs with each status in recent epochs RecentEpochStatusCount(context.Context, *QueryRecentEpochStatusCountRequest) (*QueryRecentEpochStatusCountResponse, error) + // LastCheckpointWithStatus queries the last checkpoint with a given status or a more matured status + LastCheckpointWithStatus(context.Context, *QueryLastCheckpointWithStatusRequest) (*QueryLastCheckpointWithStatusResponse, error) // Parameters queries the parameters of the module. Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) } @@ -1054,15 +927,9 @@ type UnimplementedQueryServer struct { func (*UnimplementedQueryServer) RawCheckpointList(ctx context.Context, req *QueryRawCheckpointListRequest) (*QueryRawCheckpointListResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RawCheckpointList not implemented") } -func (*UnimplementedQueryServer) RecentRawCheckpointList(ctx context.Context, req *QueryRecentRawCheckpointListRequest) (*QueryRecentRawCheckpointListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RecentRawCheckpointList not implemented") -} func (*UnimplementedQueryServer) RawCheckpoint(ctx context.Context, req *QueryRawCheckpointRequest) (*QueryRawCheckpointResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RawCheckpoint not implemented") } -func (*UnimplementedQueryServer) LatestCheckpoint(ctx context.Context, req *QueryLatestCheckpointRequest) (*QueryLatestCheckpointResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LatestCheckpoint not implemented") -} func (*UnimplementedQueryServer) BlsPublicKeyList(ctx context.Context, req *QueryBlsPublicKeyListRequest) (*QueryBlsPublicKeyListResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BlsPublicKeyList not implemented") } @@ -1072,6 +939,9 @@ func (*UnimplementedQueryServer) EpochStatus(ctx context.Context, req *QueryEpoc func (*UnimplementedQueryServer) RecentEpochStatusCount(ctx context.Context, req *QueryRecentEpochStatusCountRequest) (*QueryRecentEpochStatusCountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecentEpochStatusCount not implemented") } +func (*UnimplementedQueryServer) LastCheckpointWithStatus(ctx context.Context, req *QueryLastCheckpointWithStatusRequest) (*QueryLastCheckpointWithStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LastCheckpointWithStatus not implemented") +} func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") } @@ -1098,24 +968,6 @@ func _Query_RawCheckpointList_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _Query_RecentRawCheckpointList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryRecentRawCheckpointListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).RecentRawCheckpointList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.checkpointing.v1.Query/RecentRawCheckpointList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).RecentRawCheckpointList(ctx, req.(*QueryRecentRawCheckpointListRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Query_RawCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryRawCheckpointRequest) if err := dec(in); err != nil { @@ -1134,24 +986,6 @@ func _Query_RawCheckpoint_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } -func _Query_LatestCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLatestCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).LatestCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.checkpointing.v1.Query/LatestCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).LatestCheckpoint(ctx, req.(*QueryLatestCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Query_BlsPublicKeyList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryBlsPublicKeyListRequest) if err := dec(in); err != nil { @@ -1206,44 +1040,54 @@ func _Query_RecentEpochStatusCount_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryParamsRequest) +func _Query_LastCheckpointWithStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryLastCheckpointWithStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).Params(ctx, in) + return srv.(QueryServer).LastCheckpointWithStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/babylon.checkpointing.v1.Query/Params", + FullMethod: "/babylon.checkpointing.v1.Query/LastCheckpointWithStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + return srv.(QueryServer).LastCheckpointWithStatus(ctx, req.(*QueryLastCheckpointWithStatusRequest)) } return interceptor(ctx, in, info, handler) } -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "babylon.checkpointing.v1.Query", - HandlerType: (*QueryServer)(nil), +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.checkpointing.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "babylon.checkpointing.v1.Query", + HandlerType: (*QueryServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "RawCheckpointList", Handler: _Query_RawCheckpointList_Handler, }, - { - MethodName: "RecentRawCheckpointList", - Handler: _Query_RecentRawCheckpointList_Handler, - }, { MethodName: "RawCheckpoint", Handler: _Query_RawCheckpoint_Handler, }, - { - MethodName: "LatestCheckpoint", - Handler: _Query_LatestCheckpoint_Handler, - }, { MethodName: "BlsPublicKeyList", Handler: _Query_BlsPublicKeyList_Handler, @@ -1256,6 +1100,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "RecentEpochStatusCount", Handler: _Query_RecentEpochStatusCount_Handler, }, + { + MethodName: "LastCheckpointWithStatus", + Handler: _Query_LastCheckpointWithStatus_Handler, + }, { MethodName: "Params", Handler: _Query_Params_Handler, @@ -1354,95 +1202,6 @@ func (m *QueryRawCheckpointListResponse) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *QueryRecentRawCheckpointListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryRecentRawCheckpointListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryRecentRawCheckpointListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FromEpochNum != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.FromEpochNum)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryRecentRawCheckpointListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryRecentRawCheckpointListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryRecentRawCheckpointListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.RawCheckpoints) > 0 { - for iNdEx := len(m.RawCheckpoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.RawCheckpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *QueryRawCheckpointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1506,64 +1265,6 @@ func (m *QueryRawCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *QueryLatestCheckpointRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLatestCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLatestCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *QueryLatestCheckpointResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLatestCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLatestCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.LatestCheckpoint != nil { - { - size, err := m.LatestCheckpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *QueryBlsPublicKeyListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1787,6 +1488,69 @@ func (m *QueryRecentEpochStatusCountResponse) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } +func (m *QueryLastCheckpointWithStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLastCheckpointWithStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLastCheckpointWithStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryLastCheckpointWithStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLastCheckpointWithStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLastCheckpointWithStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RawCheckpoint != nil { + { + size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1889,42 +1653,32 @@ func (m *QueryRawCheckpointListResponse) Size() (n int) { return n } -func (m *QueryRecentRawCheckpointListRequest) Size() (n int) { +func (m *QueryRawCheckpointRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.FromEpochNum != 0 { - n += 1 + sovQuery(uint64(m.FromEpochNum)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) } return n } -func (m *QueryRecentRawCheckpointListResponse) Size() (n int) { +func (m *QueryRawCheckpointResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.RawCheckpoints) > 0 { - for _, e := range m.RawCheckpoints { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() + if m.RawCheckpoint != nil { + l = m.RawCheckpoint.Size() n += 1 + l + sovQuery(uint64(l)) } return n } -func (m *QueryRawCheckpointRequest) Size() (n int) { +func (m *QueryBlsPublicKeyListRequest) Size() (n int) { if m == nil { return 0 } @@ -1933,92 +1687,92 @@ func (m *QueryRawCheckpointRequest) Size() (n int) { if m.EpochNum != 0 { n += 1 + sovQuery(uint64(m.EpochNum)) } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } return n } -func (m *QueryRawCheckpointResponse) Size() (n int) { +func (m *QueryBlsPublicKeyListResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.RawCheckpoint != nil { - l = m.RawCheckpoint.Size() + if len(m.ValidatorWithBlsKeys) > 0 { + for _, e := range m.ValidatorWithBlsKeys { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() n += 1 + l + sovQuery(uint64(l)) } return n } -func (m *QueryLatestCheckpointRequest) Size() (n int) { +func (m *QueryEpochStatusRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } return n } -func (m *QueryLatestCheckpointResponse) Size() (n int) { +func (m *QueryEpochStatusResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.LatestCheckpoint != nil { - l = m.LatestCheckpoint.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.Status != 0 { + n += 1 + sovQuery(uint64(m.Status)) } return n } -func (m *QueryBlsPublicKeyListRequest) Size() (n int) { +func (m *QueryRecentEpochStatusCountRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.EpochNum != 0 { - n += 1 + sovQuery(uint64(m.EpochNum)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.EpochCount != 0 { + n += 1 + sovQuery(uint64(m.EpochCount)) } return n } -func (m *QueryBlsPublicKeyListResponse) Size() (n int) { +func (m *QueryRecentEpochStatusCountResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.ValidatorWithBlsKeys) > 0 { - for _, e := range m.ValidatorWithBlsKeys { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) + if m.TipEpoch != 0 { + n += 1 + sovQuery(uint64(m.TipEpoch)) } - return n -} - -func (m *QueryEpochStatusRequest) Size() (n int) { - if m == nil { - return 0 + if m.EpochCount != 0 { + n += 1 + sovQuery(uint64(m.EpochCount)) } - var l int - _ = l - if m.EpochNum != 0 { - n += 1 + sovQuery(uint64(m.EpochNum)) + if len(m.StatusCount) > 0 { + for k, v := range m.StatusCount { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovQuery(uint64(len(k))) + 1 + sovQuery(uint64(v)) + n += mapEntrySize + 1 + sovQuery(uint64(mapEntrySize)) + } } return n } -func (m *QueryEpochStatusResponse) Size() (n int) { +func (m *QueryLastCheckpointWithStatusRequest) Size() (n int) { if m == nil { return 0 } @@ -2030,37 +1784,15 @@ func (m *QueryEpochStatusResponse) Size() (n int) { return n } -func (m *QueryRecentEpochStatusCountRequest) Size() (n int) { +func (m *QueryLastCheckpointWithStatusResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.EpochCount != 0 { - n += 1 + sovQuery(uint64(m.EpochCount)) - } - return n -} - -func (m *QueryRecentEpochStatusCountResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TipEpoch != 0 { - n += 1 + sovQuery(uint64(m.TipEpoch)) - } - if m.EpochCount != 0 { - n += 1 + sovQuery(uint64(m.EpochCount)) - } - if len(m.StatusCount) > 0 { - for k, v := range m.StatusCount { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovQuery(uint64(len(k))) + 1 + sovQuery(uint64(v)) - n += mapEntrySize + 1 + sovQuery(uint64(mapEntrySize)) - } + if m.RawCheckpoint != nil { + l = m.RawCheckpoint.Size() + n += 1 + l + sovQuery(uint64(l)) } return n } @@ -2169,352 +1901,7 @@ func (m *QueryRawCheckpointListRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRawCheckpointListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRawCheckpointListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRawCheckpointListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawCheckpoints = append(m.RawCheckpoints, &RawCheckpointWithMeta{}) - if err := m.RawCheckpoints[len(m.RawCheckpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRecentRawCheckpointListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FromEpochNum", wireType) - } - m.FromEpochNum = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FromEpochNum |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRecentRawCheckpointListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRecentRawCheckpointListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawCheckpoints = append(m.RawCheckpoints, &RawCheckpointWithMeta{}) - if err := m.RawCheckpoints[len(m.RawCheckpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} + m.Pagination = &query.PageRequest{} } if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2541,7 +1928,7 @@ func (m *QueryRecentRawCheckpointListResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { +func (m *QueryRawCheckpointListResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2564,17 +1951,17 @@ func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryRawCheckpointRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRawCheckpointListResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRawCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRawCheckpointListResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoints", wireType) } - m.EpochNum = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -2584,64 +1971,29 @@ func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EpochNum |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.RawCheckpoints = append(m.RawCheckpoints, &RawCheckpointWithMeta{}) + if err := m.RawCheckpoints[len(m.RawCheckpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRawCheckpointResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRawCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2668,10 +2020,10 @@ func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RawCheckpoint == nil { - m.RawCheckpoint = &RawCheckpointWithMeta{} + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} } - if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2696,7 +2048,7 @@ func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryLatestCheckpointRequest) Unmarshal(dAtA []byte) error { +func (m *QueryRawCheckpointRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2719,12 +2071,31 @@ func (m *QueryLatestCheckpointRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryLatestCheckpointRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRawCheckpointRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLatestCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRawCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -2746,7 +2117,7 @@ func (m *QueryLatestCheckpointRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryLatestCheckpointResponse) Unmarshal(dAtA []byte) error { +func (m *QueryRawCheckpointResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2769,15 +2140,15 @@ func (m *QueryLatestCheckpointResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryLatestCheckpointResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRawCheckpointResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLatestCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRawCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestCheckpoint", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2804,10 +2175,10 @@ func (m *QueryLatestCheckpointResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LatestCheckpoint == nil { - m.LatestCheckpoint = &RawCheckpointWithMeta{} + if m.RawCheckpoint == nil { + m.RawCheckpoint = &RawCheckpointWithMeta{} } - if err := m.LatestCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3465,6 +2836,161 @@ func (m *QueryRecentEpochStatusCountResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryLastCheckpointWithStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= CheckpointStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLastCheckpointWithStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLastCheckpointWithStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RawCheckpoint == nil { + m.RawCheckpoint = &RawCheckpoint{} + } + if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/checkpointing/types/query.pb.gw.go b/x/checkpointing/types/query.pb.gw.go index 9483c6025..833886136 100644 --- a/x/checkpointing/types/query.pb.gw.go +++ b/x/checkpointing/types/query.pb.gw.go @@ -111,78 +111,6 @@ func local_request_Query_RawCheckpointList_0(ctx context.Context, marshaler runt } -var ( - filter_Query_RecentRawCheckpointList_0 = &utilities.DoubleArray{Encoding: map[string]int{"from_epoch_num": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_RecentRawCheckpointList_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryRecentRawCheckpointListRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["from_epoch_num"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_epoch_num") - } - - protoReq.FromEpochNum, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_epoch_num", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RecentRawCheckpointList_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RecentRawCheckpointList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_RecentRawCheckpointList_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryRecentRawCheckpointListRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["from_epoch_num"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_epoch_num") - } - - protoReq.FromEpochNum, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_epoch_num", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RecentRawCheckpointList_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.RecentRawCheckpointList(ctx, &protoReq) - return msg, metadata, err - -} - func request_Query_RawCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryRawCheckpointRequest var metadata runtime.ServerMetadata @@ -237,24 +165,6 @@ func local_request_Query_RawCheckpoint_0(ctx context.Context, marshaler runtime. } -func request_Query_LatestCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLatestCheckpointRequest - var metadata runtime.ServerMetadata - - msg, err := client.LatestCheckpoint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_LatestCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLatestCheckpointRequest - var metadata runtime.ServerMetadata - - msg, err := server.LatestCheckpoint(ctx, &protoReq) - return msg, metadata, err - -} - var ( filter_Query_BlsPublicKeyList_0 = &utilities.DoubleArray{Encoding: map[string]int{"epoch_num": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) @@ -417,6 +327,66 @@ func local_request_Query_RecentEpochStatusCount_0(ctx context.Context, marshaler } +func request_Query_LastCheckpointWithStatus_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLastCheckpointWithStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + e int32 + ok bool + err error + _ = err + ) + + val, ok = pathParams["status"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "status") + } + + e, err = runtime.Enum(val, CheckpointStatus_value) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "status", err) + } + + protoReq.Status = CheckpointStatus(e) + + msg, err := client.LastCheckpointWithStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_LastCheckpointWithStatus_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLastCheckpointWithStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + e int32 + ok bool + err error + _ = err + ) + + val, ok = pathParams["status"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "status") + } + + e, err = runtime.Enum(val, CheckpointStatus_value) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "status", err) + } + + protoReq.Status = CheckpointStatus(e) + + msg, err := server.LastCheckpointWithStatus(ctx, &protoReq) + return msg, metadata, err + +} + func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryParamsRequest var metadata runtime.ServerMetadata @@ -464,29 +434,6 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_RecentRawCheckpointList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_RecentRawCheckpointList_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_RecentRawCheckpointList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("GET", pattern_Query_RawCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -510,7 +457,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) - mux.Handle("GET", pattern_Query_LatestCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BlsPublicKeyList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -521,7 +468,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_LatestCheckpoint_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_BlsPublicKeyList_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -529,11 +476,11 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_LatestCheckpoint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BlsPublicKeyList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_BlsPublicKeyList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_EpochStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -544,7 +491,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_BlsPublicKeyList_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_EpochStatus_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -552,11 +499,11 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_BlsPublicKeyList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_EpochStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_EpochStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_RecentEpochStatusCount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -567,7 +514,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_EpochStatus_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_RecentEpochStatusCount_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -575,11 +522,11 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_EpochStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_RecentEpochStatusCount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_RecentEpochStatusCount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_LastCheckpointWithStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -590,7 +537,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Query_RecentEpochStatusCount_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Query_LastCheckpointWithStatus_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -598,7 +545,7 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } - forward_Query_RecentEpochStatusCount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_LastCheckpointWithStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -686,26 +633,6 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_RecentRawCheckpointList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_RecentRawCheckpointList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_RecentRawCheckpointList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("GET", pattern_Query_RawCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -726,7 +653,7 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) - mux.Handle("GET", pattern_Query_LatestCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_BlsPublicKeyList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -735,18 +662,18 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_LatestCheckpoint_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_BlsPublicKeyList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_LatestCheckpoint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_BlsPublicKeyList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_BlsPublicKeyList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_EpochStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -755,18 +682,18 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_BlsPublicKeyList_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_EpochStatus_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_BlsPublicKeyList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_EpochStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_EpochStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_RecentEpochStatusCount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -775,18 +702,18 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_EpochStatus_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_RecentEpochStatusCount_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_EpochStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_RecentEpochStatusCount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("GET", pattern_Query_RecentEpochStatusCount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("GET", pattern_Query_LastCheckpointWithStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -795,14 +722,14 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Query_RecentEpochStatusCount_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Query_LastCheckpointWithStatus_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Query_RecentEpochStatusCount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Query_LastCheckpointWithStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -832,35 +759,31 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie var ( pattern_Query_RawCheckpointList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "raw_checkpoints", "status"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_RecentRawCheckpointList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "recent_raw_checkpoints", "from_epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_RawCheckpoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "raw_checkpoint", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_LatestCheckpoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "checkpointing", "v1", "latest_checkpoint"}, "", runtime.AssumeColonVerbOpt(false))) - pattern_Query_BlsPublicKeyList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "bls_public_keys", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_EpochStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"babylon", "checkpointing", "v1", "epochs", "epoch_num", "status"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_RecentEpochStatusCount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "checkpointing", "v1", "epochs"}, "status_count", runtime.AssumeColonVerbOpt(false))) + pattern_Query_LastCheckpointWithStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "checkpointing", "v1", "last_raw_checkpoint", "status"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "checkpointing", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( forward_Query_RawCheckpointList_0 = runtime.ForwardResponseMessage - forward_Query_RecentRawCheckpointList_0 = runtime.ForwardResponseMessage - forward_Query_RawCheckpoint_0 = runtime.ForwardResponseMessage - forward_Query_LatestCheckpoint_0 = runtime.ForwardResponseMessage - forward_Query_BlsPublicKeyList_0 = runtime.ForwardResponseMessage forward_Query_EpochStatus_0 = runtime.ForwardResponseMessage forward_Query_RecentEpochStatusCount_0 = runtime.ForwardResponseMessage + forward_Query_LastCheckpointWithStatus_0 = runtime.ForwardResponseMessage + forward_Query_Params_0 = runtime.ForwardResponseMessage ) diff --git a/x/checkpointing/types/types.go b/x/checkpointing/types/types.go index ca4ece4eb..3f2a26b4a 100644 --- a/x/checkpointing/types/types.go +++ b/x/checkpointing/types/types.go @@ -1,7 +1,6 @@ package types import ( - "bytes" "crypto/sha256" "encoding/hex" "errors" @@ -36,8 +35,9 @@ func NewCheckpoint(epochNum uint64, lch LastCommitHash) *RawCheckpoint { func NewCheckpointWithMeta(ckpt *RawCheckpoint, status CheckpointStatus) *RawCheckpointWithMeta { return &RawCheckpointWithMeta{ - Ckpt: ckpt, - Status: status, + Ckpt: ckpt, + Status: status, + Lifecycle: []*CheckpointStateUpdate{}, } } @@ -46,35 +46,35 @@ func NewCheckpointWithMeta(ckpt *RawCheckpoint, status CheckpointStatus) *RawChe // 2. aggregates the BLS public key // 3. updates Bitmap // 4. accumulates voting power -// it returns True if the checkpoint is updated +// it returns nil if the checkpoint is updated, otherwise it returns an error func (cm *RawCheckpointWithMeta) Accumulate( vals epochingtypes.ValidatorSet, signerAddr sdk.ValAddress, signerBlsKey bls12381.PublicKey, sig bls12381.Signature, - totalPower int64) (bool, error) { + totalPower int64) error { // the checkpoint should be accumulating if cm.Status != Accumulating { - return false, ErrCkptNotAccumulating + return ErrCkptNotAccumulating } // get validator and its index val, index, err := vals.FindValidatorWithIndex(signerAddr) if err != nil { - return false, err + return err } // return an error if the validator has already voted if bitmap.Get(cm.Ckpt.Bitmap, index) { - return false, ErrCkptAlreadyVoted + return ErrCkptAlreadyVoted } // aggregate BLS sig if cm.Ckpt.BlsMultiSig != nil { aggSig, err := bls12381.AggrSig(*cm.Ckpt.BlsMultiSig, sig) if err != nil { - return false, err + return err } cm.Ckpt.BlsMultiSig = &aggSig } else { @@ -85,7 +85,7 @@ func (cm *RawCheckpointWithMeta) Accumulate( if cm.BlsAggrPk != nil { aggPK, err := bls12381.AggrPK(*cm.BlsAggrPk, signerBlsKey) if err != nil { - return false, err + return err } cm.BlsAggrPk = &aggPK } else { @@ -101,7 +101,23 @@ func (cm *RawCheckpointWithMeta) Accumulate( cm.Status = Sealed } - return true, nil + return nil +} + +func (cm *RawCheckpointWithMeta) IsMoreMatureThanStatus(status CheckpointStatus) bool { + return cm.Status > status +} + +// RecordStateUpdate appends a new state update to the raw ckpt with meta +// where the time/height are captured by the current ctx +func (cm *RawCheckpointWithMeta) RecordStateUpdate(ctx sdk.Context, status CheckpointStatus) { + height, time := ctx.BlockHeight(), ctx.BlockTime() + stateUpdate := &CheckpointStateUpdate{ + State: status, + BlockHeight: uint64(height), + BlockTime: &time, + } + cm.Lifecycle = append(cm.Lifecycle, stateUpdate) } func NewLastCommitHashFromHex(s string) (LastCommitHash, error) { @@ -200,7 +216,3 @@ func BytesToCkptWithMeta(cdc codec.BinaryCodec, bz []byte) (*RawCheckpointWithMe err := cdc.Unmarshal(bz, ckptWithMeta) return ckptWithMeta, err } - -func (m RawCkptHash) Equals(h RawCkptHash) bool { - return bytes.Equal(m.Bytes(), h.Bytes()) -} diff --git a/x/checkpointing/types/types_test.go b/x/checkpointing/types/types_test.go index fa5ecf8ed..3bfe4aa33 100644 --- a/x/checkpointing/types/types_test.go +++ b/x/checkpointing/types/types_test.go @@ -1,13 +1,14 @@ package types_test import ( + "testing" + "github.com/babylonchain/babylon/testutil/datagen" testkeeper "github.com/babylonchain/babylon/testutil/keeper" "github.com/babylonchain/babylon/x/checkpointing/types" "github.com/cosmos/cosmos-sdk/client" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" - "testing" ) // a single validator @@ -22,15 +23,13 @@ func TestRawCheckpointWithMeta_Accumulate1(t *testing.T) { ckpt, err := ckptkeeper.BuildRawCheckpoint(ctx, epochNum, lch) require.NoError(t, err) valSet := datagen.GenRandomValSet(n) - updated, err := ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) + err = ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) require.NoError(t, err) - require.True(t, updated) require.Equal(t, types.Sealed, ckpt.Status) // accumulate the same BLS sig - updated, err = ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) + err = ckpt.Accumulate(valSet, valSet[0].Addr, blsPubkeys[0], blsSigs[0], totalPower) require.ErrorIs(t, err, types.ErrCkptNotAccumulating) - require.False(t, updated) require.Equal(t, types.Sealed, ckpt.Status) } @@ -47,21 +46,17 @@ func TestRawCheckpointWithMeta_Accumulate4(t *testing.T) { require.NoError(t, err) valSet := datagen.GenRandomValSet(n) for i := 0; i < n; i++ { - var updated bool - updated, err = ckpt.Accumulate(valSet, valSet[i].Addr, blsPubkeys[i], blsSigs[i], totalPower) + err = ckpt.Accumulate(valSet, valSet[i].Addr, blsPubkeys[i], blsSigs[i], totalPower) if i == 0 { require.NoError(t, err) - require.True(t, updated) require.Equal(t, types.Accumulating, ckpt.Status) } if i == 1 { require.NoError(t, err) - require.True(t, updated) require.Equal(t, types.Sealed, ckpt.Status) } if i >= 2 { require.ErrorIs(t, err, types.ErrCkptNotAccumulating) - require.False(t, updated) require.Equal(t, types.Sealed, ckpt.Status) } } diff --git a/x/checkpointing/types/utils.go b/x/checkpointing/types/utils.go index 1dc48ecdb..584037089 100644 --- a/x/checkpointing/types/utils.go +++ b/x/checkpointing/types/utils.go @@ -1,6 +1,8 @@ package types import ( + "bytes" + "encoding/hex" "github.com/babylonchain/babylon/btctxformatter" "github.com/babylonchain/babylon/crypto/bls12381" sdk "github.com/cosmos/cosmos-sdk/types" @@ -27,6 +29,10 @@ func (m RawCheckpoint) Hash() RawCkptHash { return hash(fields) } +func (m RawCheckpoint) HashStr() string { + return m.Hash().String() +} + // SignedMsg is the message corresponding to the BLS sig in this raw checkpoint // Its value should be (epoch_number || last_commit_hash) func (m RawCheckpoint) SignedMsg() []byte { @@ -49,6 +55,18 @@ func (m RawCkptHash) Bytes() []byte { return m } +func (m RawCkptHash) Equals(h RawCkptHash) bool { + return bytes.Equal(m.Bytes(), h.Bytes()) +} + +func (m RawCkptHash) String() string { + return hex.EncodeToString(m) +} + +func FromStringToCkptHash(s string) (RawCkptHash, error) { + return hex.DecodeString(s) +} + func FromBTCCkptBytesToRawCkpt(btcCkptBytes []byte) (*RawCheckpoint, error) { btcCkpt, err := btctxformatter.DecodeRawCheckpoint(btctxformatter.CurrentVersion, btcCkptBytes) if err != nil { diff --git a/x/epoching/keeper/epoch_msg_queue.go b/x/epoching/keeper/epoch_msg_queue.go index 6e70cee6f..b370ba500 100644 --- a/x/epoching/keeper/epoch_msg_queue.go +++ b/x/epoching/keeper/epoch_msg_queue.go @@ -144,11 +144,12 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk return nil, err } // self-bonded to the created validator - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack - if err != nil { + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED); err != nil { + return nil, err + } + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED); err != nil { return nil, err } - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack case *types.QueuedMessage_MsgDelegate: delAddr, err := sdk.AccAddressFromBech32(unwrappedMsg.MsgDelegate.DelegatorAddress) if err != nil { @@ -159,8 +160,12 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk return nil, err } // created and bonded to the validator - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED) //nolint:errcheck // either we ignore the error here, or propoagate up the stack + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_CREATED); err != nil { + return nil, err + } + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_BONDED); err != nil { + return nil, err + } case *types.QueuedMessage_MsgUndelegate: delAddr, err := sdk.AccAddressFromBech32(unwrappedMsg.MsgUndelegate.DelegatorAddress) if err != nil { @@ -172,7 +177,9 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk } // unbonding from the validator // (in `ApplyMatureUnbonding`) AFTER mature, unbonded from the validator - k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_UNBONDING) //nolint:errcheck // either we ignore the error here, or propoagate up the stack + if err := k.RecordNewDelegationState(ctx, delAddr, valAddr, types.BondState_UNBONDING); err != nil { + return nil, err + } case *types.QueuedMessage_MsgBeginRedelegate: delAddr, err := sdk.AccAddressFromBech32(unwrappedMsg.MsgBeginRedelegate.DelegatorAddress) if err != nil { @@ -184,7 +191,9 @@ func (k Keeper) HandleQueuedMsg(ctx sdk.Context, msg *types.QueuedMessage) (*sdk } // unbonding from the source validator // (in `ApplyMatureUnbonding`) AFTER mature, unbonded from the source validator, created/bonded to the destination validator - k.RecordNewDelegationState(ctx, delAddr, srcValAddr, types.BondState_UNBONDING) //nolint:errcheck // either we ignore the error here, or propoagate up the stack + if err := k.RecordNewDelegationState(ctx, delAddr, srcValAddr, types.BondState_UNBONDING); err != nil { + return nil, err + } default: panic(sdkerrors.Wrap(types.ErrInvalidQueuedMessageType, msg.String())) } diff --git a/x/epoching/keeper/epochs.go b/x/epoching/keeper/epochs.go index 8948bbfab..93be60372 100644 --- a/x/epoching/keeper/epochs.go +++ b/x/epoching/keeper/epochs.go @@ -137,6 +137,10 @@ func (k Keeper) IncEpoch(ctx sdk.Context) types.Epoch { return newEpoch } +// epochInfoStore returns the store for epoch metadata +// prefix: EpochInfoKey +// key: epochNumber +// value: epoch metadata func (k Keeper) epochInfoStore(ctx sdk.Context) prefix.Store { store := ctx.KVStore(k.storeKey) return prefix.NewStore(store, types.EpochInfoKey) diff --git a/x/epoching/keeper/grpc_query.go b/x/epoching/keeper/grpc_query.go index af203312d..f76c5f81a 100644 --- a/x/epoching/keeper/grpc_query.go +++ b/x/epoching/keeper/grpc_query.go @@ -2,8 +2,10 @@ package keeper import ( "context" - "cosmossdk.io/math" "errors" + "fmt" + + "cosmossdk.io/math" "github.com/babylonchain/babylon/x/epoching/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -53,6 +55,47 @@ func (k Keeper) EpochInfo(c context.Context, req *types.QueryEpochInfoRequest) ( return resp, nil } +// EpochsInfo handles the QueryEpochsInfoRequest query +func (k Keeper) EpochsInfo(c context.Context, req *types.QueryEpochsInfoRequest) (*types.QueryEpochsInfoResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + + // parse start_epoch and end_epoch and forward to the pagination request + if req.EndEpoch > 0 { + // this query uses start_epoch and end_epoch to specify range + if req.StartEpoch > req.EndEpoch { + return nil, fmt.Errorf("StartEpoch (%d) should not be larger than EndEpoch (%d)", req.StartEpoch, req.EndEpoch) + } + req.Pagination = &query.PageRequest{ + Key: sdk.Uint64ToBigEndian(req.StartEpoch), + Limit: req.EndEpoch - req.StartEpoch + 1, + Reverse: false, + } + } + + epochInfoStore := k.epochInfoStore(ctx) + epochs := []*types.Epoch{} + pageRes, err := query.Paginate(epochInfoStore, req.Pagination, func(key, value []byte) error { + // unmarshal to epoch metadata + var epoch types.Epoch + if err := k.cdc.Unmarshal(value, &epoch); err != nil { + return err + } + // append to epochs list + epochs = append(epochs, &epoch) + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + resp := &types.QueryEpochsInfoResponse{ + Epochs: epochs, + Pagination: pageRes, + } + + return resp, nil +} + // EpochMsgs handles the QueryEpochMsgsRequest query func (k Keeper) EpochMsgs(c context.Context, req *types.QueryEpochMsgsRequest) (*types.QueryEpochMsgsResponse, error) { ctx := sdk.UnwrapSDKContext(c) diff --git a/x/epoching/keeper/grpc_query_test.go b/x/epoching/keeper/grpc_query_test.go index 1c32d40c7..2f3f2a10e 100644 --- a/x/epoching/keeper/grpc_query_test.go +++ b/x/epoching/keeper/grpc_query_test.go @@ -88,10 +88,83 @@ func FuzzCurrentEpoch(f *testing.F) { }) } +func FuzzEpochsInfo(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + numEpochs := datagen.RandomInt(10) + 1 + limit := datagen.RandomInt(10) + 1 + + helper := testepoching.NewHelper(t) + ctx, keeper, queryClient := helper.Ctx, helper.EpochingKeeper, helper.QueryClient + wctx := sdk.WrapSDKContext(ctx) + + // enque the first block of the numEpochs'th epoch + epochInterval := keeper.GetParams(ctx).EpochInterval + for i := uint64(0); i < numEpochs-1; i++ { + for j := uint64(0); j < epochInterval; j++ { + helper.GenAndApplyEmptyBlock() + } + } + + // get epoch msgs + req := types.QueryEpochsInfoRequest{ + Pagination: &query.PageRequest{ + Limit: limit, + }, + } + resp, err := queryClient.EpochsInfo(wctx, &req) + require.NoError(t, err) + + require.Equal(t, testepoching.Min(numEpochs, limit), uint64(len(resp.Epochs))) + for i, epoch := range resp.Epochs { + require.Equal(t, uint64(i), epoch.EpochNumber) + } + }) +} + +func FuzzEpochsInfo_QueryParams(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + numEpochs := datagen.RandomInt(10) + 2 + + endEpoch := rand.Uint64()%(numEpochs-1) + 1 + startEpoch := rand.Uint64() % endEpoch + + helper := testepoching.NewHelper(t) + ctx, keeper, queryClient := helper.Ctx, helper.EpochingKeeper, helper.QueryClient + wctx := sdk.WrapSDKContext(ctx) + + // enque the first block of the numEpochs'th epoch + epochInterval := keeper.GetParams(ctx).EpochInterval + for i := uint64(0); i < numEpochs-1; i++ { + for j := uint64(0); j < epochInterval; j++ { + helper.GenAndApplyEmptyBlock() + } + } + + // get epoch msgs + req := types.QueryEpochsInfoRequest{ + StartEpoch: startEpoch, + EndEpoch: endEpoch, + } + resp, err := queryClient.EpochsInfo(wctx, &req) + require.NoError(t, err) + + require.Equal(t, endEpoch-startEpoch+1, uint64(len(resp.Epochs))) + for i, epoch := range resp.Epochs { + require.Equal(t, uint64(i)+startEpoch, epoch.EpochNumber) + } + }) +} + // FuzzEpochMsgsQuery fuzzes queryClient.EpochMsgs // 1. randomly generate msgs and limit in pagination // 2. check the returned msg was previously enqueued -// NOTE: Msgs in QueryEpochMsgsResponse are out-of-roder +// NOTE: Msgs in QueryEpochMsgsResponse are out-of-order func FuzzEpochMsgsQuery(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) diff --git a/x/epoching/keeper/hooks.go b/x/epoching/keeper/hooks.go index 803012b31..7117d4a13 100644 --- a/x/epoching/keeper/hooks.go +++ b/x/epoching/keeper/hooks.go @@ -124,4 +124,13 @@ func (h Hooks) AfterDelegationModified(ctx sdk.Context, delAddr sdk.AccAddress, return nil } func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { return nil } -func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } + +func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } + +func (h Hooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} + +func (h Hooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} diff --git a/x/epoching/testepoching/helper.go b/x/epoching/testepoching/helper.go index 72500f787..56c401b98 100644 --- a/x/epoching/testepoching/helper.go +++ b/x/epoching/testepoching/helper.go @@ -8,7 +8,6 @@ import ( "cosmossdk.io/math" appparams "github.com/babylonchain/babylon/app/params" - "github.com/stretchr/testify/require" "github.com/babylonchain/babylon/app" @@ -17,7 +16,6 @@ import ( "github.com/babylonchain/babylon/x/epoching/types" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" @@ -27,6 +25,11 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) +type ValidatorInfo struct { + BlsKey bls12381.PrivateKey + Address sdk.ValAddress +} + // Helper is a structure which wraps the entire app and exposes functionalities for testing the epoching module type Helper struct { t *testing.T @@ -38,7 +41,8 @@ type Helper struct { QueryClient types.QueryClient StakingKeeper *stakingkeeper.Keeper - GenAccs []authtypes.GenesisAccount + GenAccs []authtypes.GenesisAccount + ValBlsPrivKeys []ValidatorInfo } // NewHelper creates the helper for testing the epoching module @@ -52,7 +56,8 @@ func NewHelper(t *testing.T) *Helper { valSet := epochingKeeper.GetValidatorSet(ctx, 0) require.Len(t, valSet, 1) genesisVal := valSet[0] - genesisBLSPubkey := bls12381.GenPrivKey().PubKey() + blsPrivKey := bls12381.GenPrivKey() + genesisBLSPubkey := blsPrivKey.PubKey() err := app.CheckpointingKeeper.CreateRegistration(ctx, genesisBLSPubkey, genesisVal.Addr) require.NoError(t, err) @@ -62,7 +67,20 @@ func NewHelper(t *testing.T) *Helper { queryClient := types.NewQueryClient(queryHelper) msgSrvr := keeper.NewMsgServerImpl(epochingKeeper) - return &Helper{t, ctx, app, &epochingKeeper, msgSrvr, queryClient, &app.StakingKeeper, nil} + return &Helper{ + t, + ctx, + app, + &epochingKeeper, + msgSrvr, + queryClient, + &app.StakingKeeper, + nil, + []ValidatorInfo{ValidatorInfo{ + blsPrivKey, + genesisVal.Addr, + }}, + } } // NewHelperWithValSet is same as NewHelper, except that it creates a set of validators @@ -87,22 +105,23 @@ func NewHelperWithValSet(t *testing.T) *Helper { // get necessary subsets of the app/keeper epochingKeeper := app.EpochingKeeper - + valInfos := []ValidatorInfo{} // add BLS pubkey to the genesis validator valSet := epochingKeeper.GetValidatorSet(ctx, 0) for _, val := range valSet { - blsPubkey := bls12381.GenPrivKey().PubKey() + blsPrivKey := bls12381.GenPrivKey() + valInfos = append(valInfos, ValidatorInfo{blsPrivKey, val.Addr}) + blsPubkey := blsPrivKey.PubKey() err = app.CheckpointingKeeper.CreateRegistration(ctx, blsPubkey, val.Addr) require.NoError(t, err) } - querier := keeper.Querier{Keeper: epochingKeeper} queryHelper := baseapp.NewQueryServerTestHelper(ctx, app.InterfaceRegistry()) types.RegisterQueryServer(queryHelper, querier) queryClient := types.NewQueryClient(queryHelper) msgSrvr := keeper.NewMsgServerImpl(epochingKeeper) - return &Helper{t, ctx, app, &epochingKeeper, msgSrvr, queryClient, &app.StakingKeeper, GenAccs} + return &Helper{t, ctx, app, &epochingKeeper, msgSrvr, queryClient, &app.StakingKeeper, GenAccs, valInfos} } // GenAndApplyEmptyBlock generates a new empty block and appends it to the current blockchain @@ -147,38 +166,6 @@ func (h *Helper) EndBlock() sdk.Context { return h.Ctx } -// CreateValidator calls handler to create a new staking validator -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) CreateValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, stakeAmount math.Int, ok bool) { - coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) - h.createValidator(addr, pk, coin, ok) -} - -// CreateValidatorWithValPower calls handler to create a new staking validator with zero commission -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) CreateValidatorWithValPower(addr sdk.ValAddress, pk cryptotypes.PubKey, valPower int64, ok bool) math.Int { - amount := h.StakingKeeper.TokensFromConsensusPower(h.Ctx, valPower) - coin := sdk.NewCoin(appparams.DefaultBondDenom, amount) - h.createValidator(addr, pk, coin, ok) - return amount -} - -// CreateValidatorMsg returns a message used to create validator in this service. -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) CreateValidatorMsg(addr sdk.ValAddress, pk cryptotypes.PubKey, stakeAmount math.Int) *stakingtypes.MsgCreateValidator { - coin := sdk.NewCoin(appparams.DefaultBondDenom, stakeAmount) - msg, err := stakingtypes.NewMsgCreateValidator(addr, pk, coin, stakingtypes.Description{}, ZeroCommission(), sdk.OneInt()) - require.NoError(h.t, err) - return msg -} - -// TODO: change to the wrapped version in the checkpointing module (require modifying checkpointing module) -func (h *Helper) createValidator(addr sdk.ValAddress, pk cryptotypes.PubKey, coin sdk.Coin, ok bool) { - msg, err := stakingtypes.NewMsgCreateValidator(addr, pk, coin, stakingtypes.Description{}, ZeroCommission(), sdk.OneInt()) - require.NoError(h.t, err) - h.Handle(msg, ok) -} - // WrappedDelegate calls handler to delegate stake for a validator func (h *Helper) WrappedDelegate(delegator sdk.AccAddress, val sdk.ValAddress, amount math.Int) *sdk.Result { coin := sdk.NewCoin(appparams.DefaultBondDenom, amount) diff --git a/x/epoching/types/query.pb.go b/x/epoching/types/query.pb.go index 101e6ddf7..8d90b59d9 100644 --- a/x/epoching/types/query.pb.go +++ b/x/epoching/types/query.pb.go @@ -201,6 +201,120 @@ func (m *QueryEpochInfoResponse) GetEpoch() *Epoch { return nil } +type QueryEpochsInfoRequest struct { + StartEpoch uint64 `protobuf:"varint,1,opt,name=start_epoch,json=startEpoch,proto3" json:"start_epoch,omitempty"` + EndEpoch uint64 `protobuf:"varint,2,opt,name=end_epoch,json=endEpoch,proto3" json:"end_epoch,omitempty"` + // pagination defines whether to have the pagination in the request + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryEpochsInfoRequest) Reset() { *m = QueryEpochsInfoRequest{} } +func (m *QueryEpochsInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEpochsInfoRequest) ProtoMessage() {} +func (*QueryEpochsInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1821b530f2ec2711, []int{4} +} +func (m *QueryEpochsInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochsInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochsInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochsInfoRequest.Merge(m, src) +} +func (m *QueryEpochsInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochsInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochsInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochsInfoRequest proto.InternalMessageInfo + +func (m *QueryEpochsInfoRequest) GetStartEpoch() uint64 { + if m != nil { + return m.StartEpoch + } + return 0 +} + +func (m *QueryEpochsInfoRequest) GetEndEpoch() uint64 { + if m != nil { + return m.EndEpoch + } + return 0 +} + +func (m *QueryEpochsInfoRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QueryEpochsInfoResponse struct { + Epochs []*Epoch `protobuf:"bytes,1,rep,name=epochs,proto3" json:"epochs,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryEpochsInfoResponse) Reset() { *m = QueryEpochsInfoResponse{} } +func (m *QueryEpochsInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEpochsInfoResponse) ProtoMessage() {} +func (*QueryEpochsInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1821b530f2ec2711, []int{5} +} +func (m *QueryEpochsInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochsInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochsInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochsInfoResponse.Merge(m, src) +} +func (m *QueryEpochsInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochsInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochsInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochsInfoResponse proto.InternalMessageInfo + +func (m *QueryEpochsInfoResponse) GetEpochs() []*Epoch { + if m != nil { + return m.Epochs + } + return nil +} + +func (m *QueryEpochsInfoResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + // QueryCurrentEpochRequest is the request type for the Query/CurrentEpoch RPC method type QueryCurrentEpochRequest struct { } @@ -209,7 +323,7 @@ func (m *QueryCurrentEpochRequest) Reset() { *m = QueryCurrentEpochReque func (m *QueryCurrentEpochRequest) String() string { return proto.CompactTextString(m) } func (*QueryCurrentEpochRequest) ProtoMessage() {} func (*QueryCurrentEpochRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{4} + return fileDescriptor_1821b530f2ec2711, []int{6} } func (m *QueryCurrentEpochRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -250,7 +364,7 @@ func (m *QueryCurrentEpochResponse) Reset() { *m = QueryCurrentEpochResp func (m *QueryCurrentEpochResponse) String() string { return proto.CompactTextString(m) } func (*QueryCurrentEpochResponse) ProtoMessage() {} func (*QueryCurrentEpochResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{5} + return fileDescriptor_1821b530f2ec2711, []int{7} } func (m *QueryCurrentEpochResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -297,7 +411,7 @@ func (m *QueryCurrentEpochResponse) GetEpochBoundary() uint64 { type QueryEpochMsgsRequest struct { // epoch_num is the number of epoch of the requested msg queue EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` - // pagination defines whether to have the pagination in the response + // pagination defines whether to have the pagination in the request Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } @@ -305,7 +419,7 @@ func (m *QueryEpochMsgsRequest) Reset() { *m = QueryEpochMsgsRequest{} } func (m *QueryEpochMsgsRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochMsgsRequest) ProtoMessage() {} func (*QueryEpochMsgsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{6} + return fileDescriptor_1821b530f2ec2711, []int{8} } func (m *QueryEpochMsgsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +474,7 @@ func (m *QueryEpochMsgsResponse) Reset() { *m = QueryEpochMsgsResponse{} func (m *QueryEpochMsgsResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochMsgsResponse) ProtoMessage() {} func (*QueryEpochMsgsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{7} + return fileDescriptor_1821b530f2ec2711, []int{9} } func (m *QueryEpochMsgsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -417,7 +531,7 @@ func (m *QueryLatestEpochMsgsRequest) Reset() { *m = QueryLatestEpochMsg func (m *QueryLatestEpochMsgsRequest) String() string { return proto.CompactTextString(m) } func (*QueryLatestEpochMsgsRequest) ProtoMessage() {} func (*QueryLatestEpochMsgsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{8} + return fileDescriptor_1821b530f2ec2711, []int{10} } func (m *QueryLatestEpochMsgsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -479,7 +593,7 @@ func (m *QueryLatestEpochMsgsResponse) Reset() { *m = QueryLatestEpochMs func (m *QueryLatestEpochMsgsResponse) String() string { return proto.CompactTextString(m) } func (*QueryLatestEpochMsgsResponse) ProtoMessage() {} func (*QueryLatestEpochMsgsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{9} + return fileDescriptor_1821b530f2ec2711, []int{11} } func (m *QueryLatestEpochMsgsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -530,7 +644,7 @@ func (m *QueryValidatorLifecycleRequest) Reset() { *m = QueryValidatorLi func (m *QueryValidatorLifecycleRequest) String() string { return proto.CompactTextString(m) } func (*QueryValidatorLifecycleRequest) ProtoMessage() {} func (*QueryValidatorLifecycleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{10} + return fileDescriptor_1821b530f2ec2711, []int{12} } func (m *QueryValidatorLifecycleRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -574,7 +688,7 @@ func (m *QueryValidatorLifecycleResponse) Reset() { *m = QueryValidatorL func (m *QueryValidatorLifecycleResponse) String() string { return proto.CompactTextString(m) } func (*QueryValidatorLifecycleResponse) ProtoMessage() {} func (*QueryValidatorLifecycleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{11} + return fileDescriptor_1821b530f2ec2711, []int{13} } func (m *QueryValidatorLifecycleResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -618,7 +732,7 @@ func (m *QueryDelegationLifecycleRequest) Reset() { *m = QueryDelegation func (m *QueryDelegationLifecycleRequest) String() string { return proto.CompactTextString(m) } func (*QueryDelegationLifecycleRequest) ProtoMessage() {} func (*QueryDelegationLifecycleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{12} + return fileDescriptor_1821b530f2ec2711, []int{14} } func (m *QueryDelegationLifecycleRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +776,7 @@ func (m *QueryDelegationLifecycleResponse) Reset() { *m = QueryDelegatio func (m *QueryDelegationLifecycleResponse) String() string { return proto.CompactTextString(m) } func (*QueryDelegationLifecycleResponse) ProtoMessage() {} func (*QueryDelegationLifecycleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{13} + return fileDescriptor_1821b530f2ec2711, []int{15} } func (m *QueryDelegationLifecycleResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -707,7 +821,7 @@ func (m *QueryEpochValSetRequest) Reset() { *m = QueryEpochValSetRequest func (m *QueryEpochValSetRequest) String() string { return proto.CompactTextString(m) } func (*QueryEpochValSetRequest) ProtoMessage() {} func (*QueryEpochValSetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{14} + return fileDescriptor_1821b530f2ec2711, []int{16} } func (m *QueryEpochValSetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -760,7 +874,7 @@ func (m *QueryEpochValSetResponse) Reset() { *m = QueryEpochValSetRespon func (m *QueryEpochValSetResponse) String() string { return proto.CompactTextString(m) } func (*QueryEpochValSetResponse) ProtoMessage() {} func (*QueryEpochValSetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1821b530f2ec2711, []int{15} + return fileDescriptor_1821b530f2ec2711, []int{17} } func (m *QueryEpochValSetResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -815,6 +929,8 @@ func init() { proto.RegisterType((*QueryParamsResponse)(nil), "babylon.epoching.v1.QueryParamsResponse") proto.RegisterType((*QueryEpochInfoRequest)(nil), "babylon.epoching.v1.QueryEpochInfoRequest") proto.RegisterType((*QueryEpochInfoResponse)(nil), "babylon.epoching.v1.QueryEpochInfoResponse") + proto.RegisterType((*QueryEpochsInfoRequest)(nil), "babylon.epoching.v1.QueryEpochsInfoRequest") + proto.RegisterType((*QueryEpochsInfoResponse)(nil), "babylon.epoching.v1.QueryEpochsInfoResponse") proto.RegisterType((*QueryCurrentEpochRequest)(nil), "babylon.epoching.v1.QueryCurrentEpochRequest") proto.RegisterType((*QueryCurrentEpochResponse)(nil), "babylon.epoching.v1.QueryCurrentEpochResponse") proto.RegisterType((*QueryEpochMsgsRequest)(nil), "babylon.epoching.v1.QueryEpochMsgsRequest") @@ -832,71 +948,75 @@ func init() { func init() { proto.RegisterFile("babylon/epoching/v1/query.proto", fileDescriptor_1821b530f2ec2711) } var fileDescriptor_1821b530f2ec2711 = []byte{ - // 1022 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x97, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xb3, 0x4d, 0x5a, 0x92, 0xe7, 0x96, 0xc2, 0xa4, 0x40, 0xba, 0x29, 0x4e, 0xb4, 0x85, - 0x26, 0x24, 0xcd, 0x6e, 0xec, 0xa4, 0x45, 0xfd, 0x01, 0x88, 0x84, 0x1f, 0xa2, 0x4a, 0x51, 0xba, - 0x48, 0x39, 0x70, 0xb1, 0xc6, 0xde, 0xc9, 0x66, 0xa5, 0xf5, 0x8e, 0xbb, 0x3b, 0x6b, 0xb0, 0x4a, - 0x10, 0xe2, 0x2f, 0x40, 0xe2, 0x80, 0x7a, 0x43, 0xe2, 0xc8, 0x9f, 0x00, 0x07, 0xb8, 0xf5, 0x58, - 0xc4, 0x85, 0x13, 0x42, 0x09, 0x7f, 0x08, 0xda, 0x37, 0x63, 0x7b, 0x6d, 0x66, 0x63, 0xa7, 0x42, - 0xdc, 0x92, 0x99, 0xf7, 0xe3, 0xf3, 0xbe, 0xf3, 0xf6, 0x3d, 0x19, 0x16, 0xea, 0xb4, 0xde, 0x09, - 0x79, 0xe4, 0xb0, 0x16, 0x6f, 0x1c, 0x04, 0x91, 0xef, 0xb4, 0x2b, 0xce, 0xc3, 0x94, 0xc5, 0x1d, - 0xbb, 0x15, 0x73, 0xc1, 0xc9, 0xac, 0x32, 0xb0, 0xbb, 0x06, 0x76, 0xbb, 0x62, 0x5e, 0xf2, 0xb9, - 0xcf, 0xf1, 0xde, 0xc9, 0xfe, 0x92, 0xa6, 0xe6, 0x15, 0x9f, 0x73, 0x3f, 0x64, 0x0e, 0x6d, 0x05, - 0x0e, 0x8d, 0x22, 0x2e, 0xa8, 0x08, 0x78, 0x94, 0xa8, 0xdb, 0x95, 0x06, 0x4f, 0x9a, 0x3c, 0x71, - 0xea, 0x34, 0x61, 0x32, 0x83, 0xd3, 0xae, 0xd4, 0x99, 0xa0, 0x15, 0xa7, 0x45, 0xfd, 0x20, 0x42, - 0x63, 0x65, 0xbb, 0xa8, 0xa3, 0x6a, 0xd1, 0x98, 0x36, 0xbb, 0xd1, 0x2c, 0x9d, 0x45, 0x0f, 0x11, - 0x6d, 0xac, 0x4b, 0x40, 0x1e, 0x64, 0x79, 0x76, 0xd1, 0xd1, 0x65, 0x0f, 0x53, 0x96, 0x08, 0x6b, - 0x17, 0x66, 0x07, 0x4e, 0x93, 0x16, 0x8f, 0x12, 0x46, 0x6e, 0xc1, 0x39, 0x99, 0x60, 0xce, 0x58, - 0x34, 0x96, 0x4b, 0xd5, 0x79, 0x5b, 0x53, 0xb8, 0x2d, 0x9d, 0xb6, 0xa6, 0x9e, 0xfc, 0xb9, 0x30, - 0xe1, 0x2a, 0x07, 0x6b, 0x13, 0x5e, 0xc2, 0x88, 0xef, 0x67, 0x86, 0x1f, 0x45, 0xfb, 0x5c, 0xa5, - 0x22, 0xf3, 0x30, 0x83, 0xce, 0xb5, 0x28, 0x6d, 0x62, 0xd8, 0x29, 0x77, 0x1a, 0x0f, 0x3e, 0x4e, - 0x9b, 0xd6, 0x3d, 0x78, 0x79, 0xd8, 0x4b, 0xa1, 0xac, 0xc3, 0x59, 0xb4, 0x52, 0x24, 0xa6, 0x96, - 0x04, 0xdd, 0x5c, 0x69, 0x68, 0x99, 0x30, 0x87, 0xb1, 0xb6, 0xd3, 0x38, 0x66, 0x91, 0x90, 0x77, - 0xaa, 0x5e, 0x1f, 0x2e, 0x6b, 0xee, 0x54, 0xaa, 0xab, 0x70, 0xa1, 0x21, 0xcf, 0x6b, 0xfd, 0x94, - 0x53, 0xee, 0xf9, 0x46, 0xce, 0x98, 0xbc, 0x0e, 0xcf, 0xcb, 0x32, 0xea, 0x3c, 0x8d, 0x3c, 0x1a, - 0x77, 0xe6, 0xce, 0xa0, 0xd5, 0x05, 0x3c, 0xdd, 0x52, 0x87, 0xd6, 0x17, 0x79, 0x19, 0xee, 0x27, - 0x7e, 0x32, 0x8e, 0x0c, 0xe4, 0x03, 0x80, 0xfe, 0xf3, 0x63, 0xe0, 0x52, 0xf5, 0x9a, 0x2d, 0x7b, - 0xc5, 0xce, 0x7a, 0xc5, 0x96, 0xdd, 0xa8, 0x7a, 0xc5, 0xde, 0xa5, 0x3e, 0x53, 0x81, 0xdd, 0x9c, - 0xa7, 0xf5, 0xd8, 0xc8, 0xeb, 0x29, 0xd3, 0xab, 0x22, 0x6f, 0xc2, 0x54, 0x33, 0xf1, 0xb3, 0x87, - 0x9d, 0x5c, 0x2e, 0x55, 0x2d, 0xad, 0x9c, 0x0f, 0x52, 0x96, 0x32, 0xef, 0x3e, 0x4b, 0x92, 0x2c, - 0x3e, 0xda, 0x93, 0x0f, 0x35, 0x68, 0x4b, 0x23, 0xd1, 0x64, 0xd2, 0x01, 0xb6, 0x1f, 0x0c, 0x98, - 0x47, 0xb6, 0x1d, 0x2a, 0x58, 0x22, 0xb4, 0x02, 0x45, 0xde, 0xc0, 0x0b, 0x4c, 0xb3, 0xc8, 0x93, - 0xea, 0x2f, 0x40, 0x49, 0xaa, 0xd7, 0xe0, 0x69, 0x24, 0x94, 0xf4, 0x80, 0x47, 0xdb, 0xd9, 0xc9, - 0x90, 0x82, 0x93, 0xcf, 0xac, 0xe0, 0x4f, 0x06, 0x5c, 0xd1, 0x53, 0x2a, 0x1d, 0x5d, 0x78, 0x31, - 0xc4, 0x2b, 0x49, 0x5a, 0xcb, 0x89, 0x7a, 0x6d, 0xb4, 0xa8, 0x3b, 0x41, 0x22, 0xdc, 0x8b, 0xe1, - 0x60, 0xec, 0xff, 0x4e, 0xe3, 0x3b, 0x50, 0x46, 0xf8, 0x3d, 0x1a, 0x06, 0x1e, 0x15, 0x3c, 0xde, - 0x09, 0xf6, 0x59, 0xa3, 0xd3, 0x08, 0xbb, 0xb5, 0x92, 0xcb, 0x30, 0xdd, 0xa6, 0x61, 0x8d, 0x7a, - 0x5e, 0x8c, 0x22, 0xcf, 0xb8, 0xcf, 0xb5, 0x69, 0xf8, 0xae, 0xe7, 0xc5, 0x16, 0x83, 0x85, 0x42, - 0x67, 0x55, 0xfc, 0x96, 0xf4, 0x0e, 0x83, 0x7d, 0xa6, 0xbe, 0xcb, 0x25, 0x6d, 0xcd, 0x9a, 0x10, - 0x59, 0x9a, 0xec, 0x3f, 0xeb, 0xae, 0x4a, 0xf3, 0x1e, 0x0b, 0x99, 0x8f, 0xd8, 0x3a, 0x48, 0x8f, - 0x0d, 0x42, 0x7a, 0x4c, 0x42, 0xfa, 0xb0, 0x58, 0xec, 0xad, 0x28, 0xb7, 0xa5, 0x7b, 0x8e, 0x72, - 0x59, 0x4b, 0xa9, 0x8b, 0x91, 0x25, 0x42, 0xcc, 0x2f, 0xe1, 0x95, 0xfe, 0x97, 0xb4, 0x47, 0xc3, - 0x4f, 0x98, 0xf8, 0x5f, 0x3f, 0xe5, 0xdf, 0x0c, 0x35, 0xce, 0x06, 0x00, 0x54, 0x85, 0x6f, 0x03, - 0xb4, 0xbb, 0x12, 0x77, 0xbb, 0xaf, 0x7c, 0xf2, 0x4b, 0xb8, 0x39, 0x0f, 0x72, 0x1d, 0x88, 0xe0, - 0x82, 0x86, 0xb5, 0x36, 0x17, 0x41, 0xe4, 0xd7, 0x5a, 0xfc, 0x33, 0x16, 0x23, 0xec, 0xa4, 0xfb, - 0x02, 0xde, 0xec, 0xe1, 0xc5, 0x6e, 0x76, 0x3e, 0xd4, 0x9e, 0x93, 0xcf, 0xdc, 0x9e, 0xd5, 0x5f, - 0x01, 0xce, 0x62, 0x4d, 0xe4, 0x2b, 0x03, 0xce, 0xc9, 0x35, 0x42, 0x96, 0x8a, 0xbe, 0x9a, 0xa1, - 0x9d, 0x65, 0x2e, 0x8f, 0x36, 0x94, 0x39, 0xad, 0xab, 0x5f, 0xff, 0xfe, 0xf7, 0xb7, 0x67, 0x5e, - 0x25, 0xf3, 0x4e, 0xf1, 0x0a, 0x25, 0xdf, 0x19, 0x30, 0xd3, 0x5b, 0x3b, 0x64, 0xa5, 0x38, 0xf8, - 0xf0, 0x46, 0x33, 0x57, 0xc7, 0xb2, 0x55, 0x2c, 0x15, 0x64, 0x59, 0x25, 0x6f, 0x38, 0x85, 0xcb, - 0x3a, 0x71, 0x1e, 0xf5, 0xfa, 0xe9, 0xad, 0x95, 0x43, 0xf2, 0xd8, 0x80, 0xf3, 0xf9, 0x45, 0x45, - 0xd6, 0x8a, 0x13, 0x6a, 0x96, 0x9d, 0x69, 0x8f, 0x6b, 0xae, 0x10, 0x57, 0x10, 0xf1, 0x35, 0x62, - 0x69, 0x11, 0x07, 0x56, 0x23, 0xf9, 0xbe, 0xab, 0x1a, 0x0e, 0xae, 0x51, 0xaa, 0xe5, 0xe6, 0xfb, - 0x48, 0xd5, 0xf2, 0x53, 0xd6, 0xba, 0x8d, 0x48, 0x9b, 0xa4, 0x3a, 0xb6, 0x6a, 0x4e, 0x53, 0x4e, - 0xd8, 0x84, 0xfc, 0x68, 0xc0, 0xc5, 0xa1, 0xe9, 0x4d, 0xd6, 0x8b, 0x93, 0xeb, 0xd7, 0x91, 0x59, - 0x39, 0x85, 0x87, 0x82, 0xde, 0x40, 0xe8, 0x35, 0xb2, 0x7a, 0x02, 0xf4, 0x6d, 0x39, 0xfb, 0xfb, - 0xb4, 0x3f, 0x1b, 0x40, 0xfe, 0x3d, 0x2e, 0xc9, 0x46, 0x71, 0xfa, 0xc2, 0xe1, 0x6e, 0x6e, 0x9e, - 0xce, 0x49, 0x61, 0xdf, 0x41, 0xec, 0x1b, 0x64, 0x43, 0x8b, 0xdd, 0x9b, 0x1a, 0x38, 0x4f, 0xd1, - 0xd3, 0x79, 0xd4, 0x5d, 0x21, 0x87, 0xe4, 0x17, 0x03, 0x66, 0x35, 0x73, 0x94, 0x9c, 0x80, 0x52, - 0x3c, 0xf8, 0xcd, 0x1b, 0xa7, 0xf4, 0x52, 0x15, 0xdc, 0xc5, 0x0a, 0x6e, 0x92, 0x4d, 0x6d, 0x05, - 0x5e, 0xcf, 0x33, 0x5f, 0x42, 0x77, 0xc1, 0x1c, 0x66, 0xfd, 0x52, 0xca, 0x0d, 0x59, 0x72, 0x7d, - 0x44, 0xa3, 0x0e, 0x2c, 0x03, 0x73, 0x6d, 0x4c, 0x6b, 0x85, 0xfa, 0x0e, 0xa2, 0xde, 0x22, 0x6f, - 0x8e, 0xdf, 0xd8, 0xfd, 0x17, 0x48, 0x98, 0xd8, 0xba, 0xf7, 0xe4, 0xa8, 0x6c, 0x3c, 0x3d, 0x2a, - 0x1b, 0x7f, 0x1d, 0x95, 0x8d, 0x6f, 0x8e, 0xcb, 0x13, 0x4f, 0x8f, 0xcb, 0x13, 0x7f, 0x1c, 0x97, - 0x27, 0x3e, 0x5d, 0xf7, 0x03, 0x71, 0x90, 0xd6, 0xed, 0x06, 0x6f, 0x76, 0x83, 0x37, 0x0e, 0x68, - 0x10, 0xf5, 0x32, 0x7d, 0xde, 0xcf, 0x25, 0x3a, 0x2d, 0x96, 0xd4, 0xcf, 0xe1, 0x4f, 0x84, 0x8d, - 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x99, 0xd6, 0x70, 0x53, 0x00, 0x0d, 0x00, 0x00, + // 1088 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x97, 0xcd, 0x6f, 0xdc, 0x44, + 0x14, 0xc0, 0xe3, 0x24, 0x0d, 0xed, 0x4b, 0x4b, 0x61, 0x52, 0x20, 0x75, 0xca, 0x26, 0x72, 0xa1, + 0x09, 0x49, 0x63, 0x67, 0x93, 0xb4, 0xa8, 0x1f, 0x80, 0x48, 0xf8, 0x10, 0x55, 0x8a, 0x52, 0x23, + 0xe5, 0xc0, 0x65, 0x35, 0xbb, 0x9e, 0x38, 0x96, 0xbc, 0x9e, 0xad, 0xc7, 0x5e, 0x58, 0x95, 0x20, + 0xc4, 0x99, 0x03, 0x12, 0x12, 0xa8, 0x17, 0x84, 0xc4, 0x91, 0x3f, 0x01, 0x0e, 0x1c, 0x7b, 0x0c, + 0xe2, 0xc2, 0x09, 0xa1, 0x84, 0x3f, 0x04, 0xf9, 0xcd, 0x78, 0xd7, 0xde, 0xda, 0xd9, 0x4d, 0x15, + 0x71, 0x4b, 0x66, 0xde, 0xc7, 0xef, 0x7d, 0xf8, 0xbd, 0x59, 0x98, 0xad, 0xd3, 0x7a, 0xc7, 0xe7, + 0x81, 0xc5, 0x5a, 0xbc, 0xb1, 0xe7, 0x05, 0xae, 0xd5, 0xae, 0x5a, 0x0f, 0x63, 0x16, 0x76, 0xcc, + 0x56, 0xc8, 0x23, 0x4e, 0xa6, 0x94, 0x80, 0x99, 0x0a, 0x98, 0xed, 0xaa, 0x7e, 0xc9, 0xe5, 0x2e, + 0xc7, 0x7b, 0x2b, 0xf9, 0x4b, 0x8a, 0xea, 0x57, 0x5c, 0xce, 0x5d, 0x9f, 0x59, 0xb4, 0xe5, 0x59, + 0x34, 0x08, 0x78, 0x44, 0x23, 0x8f, 0x07, 0x42, 0xdd, 0x2e, 0x36, 0xb8, 0x68, 0x72, 0x61, 0xd5, + 0xa9, 0x60, 0xd2, 0x83, 0xd5, 0xae, 0xd6, 0x59, 0x44, 0xab, 0x56, 0x8b, 0xba, 0x5e, 0x80, 0xc2, + 0x4a, 0x76, 0xae, 0x88, 0xaa, 0x45, 0x43, 0xda, 0x4c, 0xad, 0x19, 0x45, 0x12, 0x5d, 0x44, 0x94, + 0x31, 0x2e, 0x01, 0x79, 0x90, 0xf8, 0xd9, 0x46, 0x45, 0x9b, 0x3d, 0x8c, 0x99, 0x88, 0x8c, 0x6d, + 0x98, 0xca, 0x9d, 0x8a, 0x16, 0x0f, 0x04, 0x23, 0xb7, 0x60, 0x42, 0x3a, 0x98, 0xd6, 0xe6, 0xb4, + 0x85, 0xc9, 0xd5, 0x19, 0xb3, 0x20, 0x70, 0x53, 0x2a, 0x6d, 0x8c, 0x3f, 0xf9, 0x7b, 0x76, 0xc4, + 0x56, 0x0a, 0xc6, 0x3a, 0xbc, 0x84, 0x16, 0xdf, 0x4f, 0x04, 0x3f, 0x0a, 0x76, 0xb9, 0x72, 0x45, + 0x66, 0xe0, 0x1c, 0x2a, 0xd7, 0x82, 0xb8, 0x89, 0x66, 0xc7, 0xed, 0xb3, 0x78, 0xf0, 0x71, 0xdc, + 0x34, 0xee, 0xc1, 0xcb, 0xfd, 0x5a, 0x0a, 0x65, 0x05, 0xce, 0xa0, 0x94, 0x22, 0xd1, 0x0b, 0x49, + 0x50, 0xcd, 0x96, 0x82, 0xc6, 0x8f, 0x5a, 0xd6, 0x98, 0xc8, 0x32, 0xcc, 0xc2, 0xa4, 0x88, 0x68, + 0x18, 0xd5, 0x7a, 0x26, 0xc7, 0x6d, 0xc0, 0x23, 0x14, 0x46, 0xc8, 0xc0, 0x51, 0xd7, 0xa3, 0x0a, + 0x32, 0x70, 0xe4, 0xe5, 0x07, 0x00, 0xbd, 0xe2, 0x4c, 0x8f, 0x21, 0xcf, 0x35, 0x53, 0x56, 0xd2, + 0x4c, 0x2a, 0x69, 0xca, 0x5e, 0x51, 0x95, 0x34, 0xb7, 0xa9, 0xcb, 0x94, 0x67, 0x3b, 0xa3, 0x69, + 0x7c, 0xaf, 0xc1, 0x2b, 0x4f, 0x01, 0xaa, 0x70, 0x57, 0x61, 0x02, 0x9d, 0x27, 0x99, 0x1f, 0x1b, + 0x10, 0xaf, 0x92, 0x24, 0x1f, 0xe6, 0xb8, 0x46, 0x91, 0x6b, 0x7e, 0x20, 0x97, 0x74, 0x98, 0x03, + 0xd3, 0x61, 0x1a, 0xb9, 0x36, 0xe3, 0x30, 0x64, 0x81, 0x4c, 0x49, 0xda, 0x29, 0x2e, 0x5c, 0x2e, + 0xb8, 0x53, 0xd4, 0x57, 0xe1, 0x42, 0x43, 0x9e, 0xe7, 0x32, 0x7b, 0xbe, 0x91, 0x11, 0x26, 0xaf, + 0xc3, 0xf3, 0xb2, 0x01, 0xea, 0x3c, 0x0e, 0x1c, 0x1a, 0x76, 0x54, 0x82, 0x2f, 0xe0, 0xe9, 0x86, + 0x3a, 0x34, 0xbe, 0xc8, 0x36, 0xd0, 0x7d, 0xe1, 0x8a, 0x61, 0x1a, 0xa8, 0xaf, 0x36, 0xa3, 0xcf, + 0x5c, 0x9b, 0xc7, 0xb9, 0xe6, 0x91, 0xee, 0x55, 0x90, 0x37, 0x61, 0xbc, 0x29, 0xdc, 0xb4, 0x30, + 0x46, 0x61, 0x61, 0x1e, 0xc4, 0x2c, 0x66, 0xce, 0x7d, 0x26, 0x44, 0x62, 0x1f, 0xe5, 0x4f, 0xaf, + 0x3c, 0x3f, 0x6b, 0x30, 0x83, 0x6c, 0x5b, 0x34, 0x62, 0x22, 0x2a, 0x4c, 0x50, 0xb7, 0x79, 0xb5, + 0xbe, 0xe6, 0x9d, 0x85, 0x49, 0x99, 0xbd, 0x06, 0x8f, 0x83, 0x48, 0xa5, 0x1e, 0xf0, 0x68, 0x33, + 0x39, 0x39, 0xb5, 0xee, 0xfe, 0x55, 0x83, 0x2b, 0xc5, 0x94, 0x2a, 0x8f, 0x36, 0xbc, 0xe8, 0xe3, + 0x95, 0x24, 0xad, 0x65, 0x92, 0x7a, 0x6d, 0x70, 0x52, 0xb7, 0x3c, 0x11, 0xd9, 0x17, 0xfd, 0xbc, + 0xed, 0xd3, 0xcb, 0xf1, 0x1d, 0xa8, 0x20, 0xfc, 0x0e, 0xf5, 0x3d, 0x87, 0x46, 0x3c, 0xdc, 0xf2, + 0x76, 0x59, 0xa3, 0xd3, 0xf0, 0xd3, 0x58, 0xc9, 0x65, 0x38, 0xdb, 0xa6, 0x7e, 0x8d, 0x3a, 0x4e, + 0x88, 0x49, 0x3e, 0x67, 0x3f, 0xd7, 0xa6, 0xfe, 0xbb, 0x8e, 0x13, 0x1a, 0x0c, 0x66, 0x4b, 0x95, + 0x55, 0xf0, 0x1b, 0x52, 0xdb, 0xf7, 0x76, 0x99, 0x9a, 0x68, 0xf3, 0x85, 0x31, 0x17, 0x98, 0x48, + 0xdc, 0x24, 0xff, 0x19, 0x77, 0x95, 0x9b, 0xf7, 0x98, 0xcf, 0x5c, 0xc4, 0x2e, 0x82, 0x74, 0x58, + 0x1e, 0xd2, 0x61, 0x12, 0xd2, 0x85, 0xb9, 0x72, 0x6d, 0x45, 0xb9, 0x29, 0xd5, 0x33, 0x94, 0x0b, + 0x85, 0x94, 0x45, 0x36, 0x12, 0x47, 0x88, 0xf9, 0x65, 0x76, 0xca, 0xed, 0x50, 0xff, 0x13, 0x16, + 0xfd, 0xaf, 0x9f, 0xf2, 0x1f, 0x9a, 0x1a, 0x67, 0x39, 0x00, 0x15, 0xe1, 0xdb, 0x00, 0xed, 0x34, + 0xc5, 0x69, 0xf7, 0x55, 0x8e, 0xaf, 0x84, 0x9d, 0xd1, 0x20, 0xd7, 0x81, 0x44, 0x3c, 0xa2, 0x7e, + 0xad, 0xcd, 0x23, 0x2f, 0x70, 0x6b, 0x2d, 0xfe, 0x19, 0x0b, 0x11, 0x76, 0xcc, 0x7e, 0x01, 0x6f, + 0x76, 0xf0, 0x62, 0x3b, 0x39, 0xef, 0x6b, 0xcf, 0xb1, 0x67, 0x6e, 0xcf, 0xd5, 0x83, 0x49, 0x38, + 0x83, 0x31, 0x91, 0xaf, 0x34, 0x98, 0x90, 0x0b, 0x98, 0xcc, 0x97, 0x7d, 0x35, 0x7d, 0xdb, 0x5e, + 0x5f, 0x18, 0x2c, 0x28, 0x7d, 0x1a, 0x57, 0xbf, 0xfe, 0xf3, 0xdf, 0xef, 0x46, 0x5f, 0x25, 0x33, + 0x56, 0xf9, 0xe3, 0x83, 0xfc, 0xa0, 0xc1, 0xb9, 0xee, 0xc2, 0x26, 0x8b, 0xe5, 0xc6, 0xfb, 0xdf, + 0x02, 0xfa, 0xd2, 0x50, 0xb2, 0x8a, 0xa5, 0x8a, 0x2c, 0x4b, 0xe4, 0x0d, 0xab, 0xf4, 0x99, 0x23, + 0xac, 0x47, 0xdd, 0x7e, 0x7a, 0x6b, 0x71, 0x9f, 0x7c, 0xa3, 0x01, 0xf4, 0x96, 0x2b, 0x19, 0xe4, + 0x2e, 0xfb, 0x46, 0xd0, 0xaf, 0x0f, 0x27, 0x3c, 0x54, 0xa2, 0xd4, 0x82, 0x7e, 0xac, 0xc1, 0xf9, + 0xec, 0xde, 0x24, 0xcb, 0xe5, 0x3e, 0x0a, 0x76, 0xaf, 0x6e, 0x0e, 0x2b, 0xae, 0xa0, 0x16, 0x11, + 0xea, 0x35, 0x62, 0x14, 0x42, 0xe5, 0x36, 0x35, 0xf9, 0x29, 0x2d, 0x22, 0xce, 0xd1, 0x41, 0x45, + 0xcc, 0xac, 0x9b, 0x81, 0x45, 0xcc, 0x0e, 0x7d, 0xe3, 0x36, 0x22, 0xad, 0x93, 0xd5, 0xa1, 0x8b, + 0x68, 0x35, 0xe5, 0xc0, 0x17, 0xe4, 0x17, 0x0d, 0x2e, 0xf6, 0x2d, 0x13, 0xb2, 0x52, 0xee, 0xbc, + 0x78, 0x3b, 0xea, 0xd5, 0x13, 0x68, 0x28, 0xe8, 0x35, 0x84, 0x5e, 0x26, 0x4b, 0xc7, 0x40, 0xdf, + 0x96, 0xab, 0xa8, 0x47, 0xfb, 0x9b, 0x06, 0xe4, 0xe9, 0xe9, 0x4d, 0xd6, 0xca, 0xdd, 0x97, 0xee, + 0x1a, 0x7d, 0xfd, 0x64, 0x4a, 0x0a, 0xfb, 0x0e, 0x62, 0xdf, 0x20, 0x6b, 0x85, 0xd8, 0xdd, 0x21, + 0x86, 0xe3, 0x1d, 0x35, 0xad, 0x47, 0xe9, 0x46, 0xdb, 0x27, 0xbf, 0x6b, 0x30, 0x55, 0x30, 0xd6, + 0xc9, 0x31, 0x28, 0xe5, 0x7b, 0x48, 0xbf, 0x71, 0x42, 0x2d, 0x15, 0xc1, 0x5d, 0x8c, 0xe0, 0x26, + 0x59, 0x2f, 0x8c, 0xc0, 0xe9, 0x6a, 0x66, 0x43, 0x48, 0xf7, 0xdd, 0x7e, 0xd2, 0x2f, 0x93, 0x99, + 0x99, 0x4f, 0x06, 0x7d, 0xd1, 0xb9, 0xdd, 0xa4, 0x2f, 0x0f, 0x29, 0xad, 0x50, 0xdf, 0x41, 0xd4, + 0x5b, 0xe4, 0xcd, 0xe1, 0x1b, 0xbb, 0x57, 0x01, 0xc1, 0xa2, 0x8d, 0x7b, 0x4f, 0x0e, 0x2b, 0xda, + 0xc1, 0x61, 0x45, 0xfb, 0xe7, 0xb0, 0xa2, 0x7d, 0x7b, 0x54, 0x19, 0x39, 0x38, 0xaa, 0x8c, 0xfc, + 0x75, 0x54, 0x19, 0xf9, 0x74, 0xc5, 0xf5, 0xa2, 0xbd, 0xb8, 0x6e, 0x36, 0x78, 0x33, 0x35, 0xde, + 0xd8, 0xa3, 0x5e, 0xd0, 0xf5, 0xf4, 0x79, 0xcf, 0x57, 0xd4, 0x69, 0x31, 0x51, 0x9f, 0xc0, 0xdf, + 0x7a, 0x6b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x45, 0xf9, 0x53, 0x14, 0xc9, 0x0e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -915,6 +1035,10 @@ type QueryClient interface { Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) // EpochInfo queries the information of a given epoch EpochInfo(ctx context.Context, in *QueryEpochInfoRequest, opts ...grpc.CallOption) (*QueryEpochInfoResponse, error) + // EpochsInfo queries the metadata of epochs in a given range, depending on the + // parameters in the pagination request. Th main use case will be querying the + // latest epochs in time order. + EpochsInfo(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) // CurrentEpoch queries the current epoch CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) // EpochMsgs queries the messages of a given epoch @@ -955,6 +1079,15 @@ func (c *queryClient) EpochInfo(ctx context.Context, in *QueryEpochInfoRequest, return out, nil } +func (c *queryClient) EpochsInfo(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) { + out := new(QueryEpochsInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.epoching.v1.Query/EpochsInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryClient) CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) { out := new(QueryCurrentEpochResponse) err := c.cc.Invoke(ctx, "/babylon.epoching.v1.Query/CurrentEpoch", in, out, opts...) @@ -1015,6 +1148,10 @@ type QueryServer interface { Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) // EpochInfo queries the information of a given epoch EpochInfo(context.Context, *QueryEpochInfoRequest) (*QueryEpochInfoResponse, error) + // EpochsInfo queries the metadata of epochs in a given range, depending on the + // parameters in the pagination request. Th main use case will be querying the + // latest epochs in time order. + EpochsInfo(context.Context, *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) // CurrentEpoch queries the current epoch CurrentEpoch(context.Context, *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) // EpochMsgs queries the messages of a given epoch @@ -1039,6 +1176,9 @@ func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsReq func (*UnimplementedQueryServer) EpochInfo(ctx context.Context, req *QueryEpochInfoRequest) (*QueryEpochInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EpochInfo not implemented") } +func (*UnimplementedQueryServer) EpochsInfo(ctx context.Context, req *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EpochsInfo not implemented") +} func (*UnimplementedQueryServer) CurrentEpoch(ctx context.Context, req *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CurrentEpoch not implemented") } @@ -1098,6 +1238,24 @@ func _Query_EpochInfo_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _Query_EpochsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEpochsInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EpochsInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.epoching.v1.Query/EpochsInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EpochsInfo(ctx, req.(*QueryEpochsInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Query_CurrentEpoch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryCurrentEpochRequest) if err := dec(in); err != nil { @@ -1218,6 +1376,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{ MethodName: "EpochInfo", Handler: _Query_EpochInfo_Handler, }, + { + MethodName: "EpochsInfo", + Handler: _Query_EpochsInfo_Handler, + }, { MethodName: "CurrentEpoch", Handler: _Query_CurrentEpoch_Handler, @@ -1366,6 +1528,100 @@ func (m *QueryEpochInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *QueryEpochsInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochsInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EndEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EndEpoch)) + i-- + dAtA[i] = 0x10 + } + if m.StartEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.StartEpoch)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryEpochsInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochsInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Epochs) > 0 { + for iNdEx := len(m.Epochs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Epochs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *QueryCurrentEpochRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1885,6 +2141,44 @@ func (m *QueryEpochInfoResponse) Size() (n int) { return n } +func (m *QueryEpochsInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartEpoch != 0 { + n += 1 + sovQuery(uint64(m.StartEpoch)) + } + if m.EndEpoch != 0 { + n += 1 + sovQuery(uint64(m.EndEpoch)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEpochsInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Epochs) > 0 { + for _, e := range m.Epochs { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + func (m *QueryCurrentEpochRequest) Size() (n int) { if m == nil { return 0 @@ -2366,6 +2660,250 @@ func (m *QueryEpochInfoResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryEpochsInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochsInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartEpoch", wireType) + } + m.StartEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndEpoch", wireType) + } + m.EndEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEpochsInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochsInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epochs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epochs = append(m.Epochs, &Epoch{}) + if err := m.Epochs[len(m.Epochs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueryCurrentEpochRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/x/epoching/types/query.pb.gw.go b/x/epoching/types/query.pb.gw.go index 82e30aa48..46850074f 100644 --- a/x/epoching/types/query.pb.gw.go +++ b/x/epoching/types/query.pb.gw.go @@ -105,6 +105,42 @@ func local_request_Query_EpochInfo_0(ctx context.Context, marshaler runtime.Mars } +var ( + filter_Query_EpochsInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_EpochsInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochsInfoRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_EpochsInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.EpochsInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_EpochsInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochsInfoRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_EpochsInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.EpochsInfo(ctx, &protoReq) + return msg, metadata, err + +} + func request_Query_CurrentEpoch_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryCurrentEpochRequest var metadata runtime.ServerMetadata @@ -463,6 +499,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_EpochsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_EpochsInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_CurrentEpoch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -682,6 +741,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_EpochsInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_EpochsInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochsInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_CurrentEpoch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -810,6 +889,8 @@ var ( pattern_Query_EpochInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "epoching", "v1", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_EpochsInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "epoching", "v1", "epochs"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_CurrentEpoch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "epoching", "v1", "current_epoch"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_EpochMsgs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"babylon", "epoching", "v1", "epochs", "epoch_num", "messages"}, "", runtime.AssumeColonVerbOpt(false))) @@ -828,6 +909,8 @@ var ( forward_Query_EpochInfo_0 = runtime.ForwardResponseMessage + forward_Query_EpochsInfo_0 = runtime.ForwardResponseMessage + forward_Query_CurrentEpoch_0 = runtime.ForwardResponseMessage forward_Query_EpochMsgs_0 = runtime.ForwardResponseMessage diff --git a/x/monitor/client/cli/query.go b/x/monitor/client/cli/query.go new file mode 100644 index 000000000..50e9b2a2e --- /dev/null +++ b/x/monitor/client/cli/query.go @@ -0,0 +1,25 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + + "github.com/babylonchain/babylon/x/monitor/types" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd(queryRoute string) *cobra.Command { + // Group monitor queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + return cmd +} diff --git a/x/monitor/client/cli/query_params.go b/x/monitor/client/cli/query_params.go new file mode 100644 index 000000000..9578c4226 --- /dev/null +++ b/x/monitor/client/cli/query_params.go @@ -0,0 +1,34 @@ +package cli + +import ( + "context" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdQueryParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "shows the parameters of the module", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/monitor/client/cli/tx.go b/x/monitor/client/cli/tx.go new file mode 100644 index 000000000..c1836f6e7 --- /dev/null +++ b/x/monitor/client/cli/tx.go @@ -0,0 +1,24 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + + "github.com/babylonchain/babylon/x/monitor/types" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + return cmd +} diff --git a/x/monitor/genesis.go b/x/monitor/genesis.go new file mode 100644 index 000000000..9a2cea635 --- /dev/null +++ b/x/monitor/genesis.go @@ -0,0 +1,21 @@ +package monitor + +import ( + "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the capability module's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) +} + +// ExportGenesis returns the capability module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + genesis.Params = k.GetParams(ctx) + + return genesis +} diff --git a/x/monitor/genesis_test.go b/x/monitor/genesis_test.go new file mode 100644 index 000000000..a5d8dd915 --- /dev/null +++ b/x/monitor/genesis_test.go @@ -0,0 +1,33 @@ +package monitor_test + +import ( + "testing" + + "github.com/babylonchain/babylon/x/monitor" + "github.com/stretchr/testify/require" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + simapp "github.com/babylonchain/babylon/app" + "github.com/babylonchain/babylon/x/monitor/types" +) + +func TestExportGenesis(t *testing.T) { + app := simapp.Setup(t, false) + ctx := app.BaseApp.NewContext(false, tmproto.Header{}) + + app.MonitorKeeper.SetParams(ctx, types.DefaultParams()) + genesisState := monitor.ExportGenesis(ctx, app.MonitorKeeper) + require.Equal(t, genesisState.Params, types.DefaultParams()) +} + +func TestInitGenesis(t *testing.T) { + app := simapp.Setup(t, false) + ctx := app.BaseApp.NewContext(false, tmproto.Header{}) + + genesisState := types.GenesisState{ + Params: types.Params{}, + } + + monitor.InitGenesis(ctx, app.MonitorKeeper, genesisState) + require.Equal(t, app.MonitorKeeper.GetParams(ctx), genesisState.Params) +} diff --git a/x/monitor/handler.go b/x/monitor/handler.go new file mode 100644 index 000000000..f9d925572 --- /dev/null +++ b/x/monitor/handler.go @@ -0,0 +1,23 @@ +package monitor + +import ( + "fmt" + + "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// NewHandler ... +func NewHandler(k keeper.Keeper) sdk.Handler { + + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + + switch msg := msg.(type) { + default: + errMsg := fmt.Sprintf("unrecognized %s message type: %T", types.ModuleName, msg) + return nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, errMsg) + } + } +} diff --git a/x/monitor/keeper/grpc_query.go b/x/monitor/keeper/grpc_query.go new file mode 100644 index 000000000..6a38d7a90 --- /dev/null +++ b/x/monitor/keeper/grpc_query.go @@ -0,0 +1,44 @@ +package keeper + +import ( + "context" + + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ types.QueryServer = Keeper{} + +func (k Keeper) EndedEpochBtcHeight(c context.Context, req *types.QueryEndedEpochBtcHeightRequest) (*types.QueryEndedEpochBtcHeightResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + btcHeight, err := k.LightclientHeightAtEpochEnd(ctx, req.EpochNum) + + if err != nil { + return nil, err + } + + return &types.QueryEndedEpochBtcHeightResponse{BtcLightClientHeight: btcHeight}, nil +} + +func (k Keeper) ReportedCheckpointBtcHeight(c context.Context, req *types.QueryReportedCheckpointBtcHeightRequest) (*types.QueryReportedCheckpointBtcHeightResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + btcHeight, err := k.LightclientHeightAtCheckpointReported(ctx, req.CkptHash) + + if err != nil { + return nil, err + } + + return &types.QueryReportedCheckpointBtcHeightResponse{BtcLightClientHeight: btcHeight}, nil +} diff --git a/x/monitor/keeper/grpc_query_params.go b/x/monitor/keeper/grpc_query_params.go new file mode 100644 index 000000000..9ba4a1b00 --- /dev/null +++ b/x/monitor/keeper/grpc_query_params.go @@ -0,0 +1,26 @@ +package keeper + +import ( + "context" + + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Querier is used as Keeper will have duplicate methods if used directly, and gRPC names take precedence over keeper +type Querier struct { + Keeper +} + +var _ types.QueryServer = Querier{} + +func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil +} diff --git a/x/monitor/keeper/grpc_query_test.go b/x/monitor/keeper/grpc_query_test.go new file mode 100644 index 000000000..44dc9ee11 --- /dev/null +++ b/x/monitor/keeper/grpc_query_test.go @@ -0,0 +1,158 @@ +package keeper_test + +import ( + "github.com/babylonchain/babylon/btctxformatter" + "github.com/babylonchain/babylon/testutil/datagen" + "github.com/babylonchain/babylon/testutil/mocks" + btclightclienttypes "github.com/babylonchain/babylon/x/btclightclient/types" + ckpttypes "github.com/babylonchain/babylon/x/checkpointing/types" + "github.com/babylonchain/babylon/x/epoching/testepoching" + types2 "github.com/babylonchain/babylon/x/epoching/types" + monitorkeeper "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "math/rand" + "testing" +) + +func FuzzQueryEndedEpochBtcHeight(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + // a genesis validator is generated for setup + helper := testepoching.NewHelper(t) + lck := helper.App.BTCLightClientKeeper + mk := helper.App.MonitorKeeper + ek := helper.EpochingKeeper + querier := monitorkeeper.Querier{Keeper: mk} + queryHelper := baseapp.NewQueryServerTestHelper(helper.Ctx, helper.App.InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, querier) + queryClient := types.NewQueryClient(queryHelper) + + // BeginBlock of block 1, and thus entering epoch 1 + ctx := helper.BeginBlock() + epoch := ek.GetEpoch(ctx) + require.Equal(t, uint64(1), epoch.EpochNumber) + + // Insert header tree + tree := datagen.NewBTCHeaderTree() + root := lck.GetBaseBTCHeader(ctx) + tree.Add(root, nil) + tree.GenRandomBTCHeaderTree(1, 10, root, func(header *btclightclienttypes.BTCHeaderInfo) bool { + err := lck.InsertHeader(ctx, header.Header) + require.NoError(t, err) + return true + }) + + // EndBlock of block 1 + ctx = helper.EndBlock() + + // go to BeginBlock of block 11, and thus entering epoch 2 + for i := uint64(0); i < ek.GetParams(ctx).EpochInterval; i++ { + ctx = helper.GenAndApplyEmptyBlock() + } + epoch = ek.GetEpoch(ctx) + require.Equal(t, uint64(2), epoch.EpochNumber) + + // query epoch 0 ended BTC light client height, should return base height + req := types.QueryEndedEpochBtcHeightRequest{ + EpochNum: 0, + } + resp, err := queryClient.EndedEpochBtcHeight(ctx, &req) + require.NoError(t, err) + require.Equal(t, lck.GetBaseBTCHeader(ctx).Height, resp.BtcLightClientHeight) + + // query epoch 1 ended BTC light client height, should return tip height + req = types.QueryEndedEpochBtcHeightRequest{ + EpochNum: 1, + } + resp, err = queryClient.EndedEpochBtcHeight(ctx, &req) + require.NoError(t, err) + require.Equal(t, lck.GetTipInfo(ctx).Height, resp.BtcLightClientHeight) + }) +} + +func FuzzQueryReportedCheckpointBtcHeight(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + // a genesis validator is generated for setup + helper := testepoching.NewHelper(t) + ctl := gomock.NewController(t) + defer ctl.Finish() + lck := helper.App.BTCLightClientKeeper + mk := helper.App.MonitorKeeper + ek := helper.EpochingKeeper + ck := helper.App.CheckpointingKeeper + mockEk := mocks.NewMockEpochingKeeper(ctl) + ck.SetEpochingKeeper(mockEk) + querier := monitorkeeper.Querier{Keeper: mk} + queryHelper := baseapp.NewQueryServerTestHelper(helper.Ctx, helper.App.InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, querier) + queryClient := types.NewQueryClient(queryHelper) + + // BeginBlock of block 1, and thus entering epoch 1 + ctx := helper.BeginBlock() + epoch := ek.GetEpoch(ctx) + require.Equal(t, uint64(1), epoch.EpochNumber) + + // Insert header tree + tree := datagen.NewBTCHeaderTree() + root := lck.GetBaseBTCHeader(ctx) + tree.Add(root, nil) + tree.GenRandomBTCHeaderTree(1, 10, root, func(header *btclightclienttypes.BTCHeaderInfo) bool { + err := lck.InsertHeader(ctx, header.Header) + require.NoError(t, err) + return true + }) + + // Add checkpoint + valBlsSet, privKeys := datagen.GenerateValidatorSetWithBLSPrivKeys(int(datagen.RandomIntOtherThan(0, 10))) + valSet := make([]types2.Validator, len(valBlsSet.ValSet)) + for i, val := range valBlsSet.ValSet { + valSet[i] = types2.Validator{ + Addr: []byte(val.ValidatorAddress), + Power: int64(val.VotingPower), + } + err := ck.CreateRegistration(ctx, val.BlsPubKey, []byte(val.ValidatorAddress)) + require.NoError(t, err) + } + mockCkptWithMeta := &ckpttypes.RawCheckpointWithMeta{Ckpt: datagen.GenerateLegitimateRawCheckpoint(privKeys)} + mockEk.EXPECT().GetValidatorSet(gomock.Any(), gomock.Eq(mockCkptWithMeta.Ckpt.EpochNum)).Return(valSet).AnyTimes() + // make sure voting power is always sufficient + mockEk.EXPECT().GetTotalVotingPower(gomock.Any(), gomock.Eq(mockCkptWithMeta.Ckpt.EpochNum)).Return(int64(0)).AnyTimes() + err := ck.AddRawCheckpoint( + ctx, + mockCkptWithMeta, + ) + require.NoError(t, err) + + // Verify checkpoint + btcCkpt := btctxformatter.RawBtcCheckpoint{ + Epoch: mockCkptWithMeta.Ckpt.EpochNum, + LastCommitHash: *mockCkptWithMeta.Ckpt.LastCommitHash, + BitMap: mockCkptWithMeta.Ckpt.Bitmap, + SubmitterAddress: datagen.GenRandomByteArray(btctxformatter.AddressLength), + BlsSig: *mockCkptWithMeta.Ckpt.BlsMultiSig, + } + err = ck.VerifyCheckpoint(ctx, btcCkpt) + require.NoError(t, err) + + // query reported checkpoint BTC light client height + req := types.QueryReportedCheckpointBtcHeightRequest{ + CkptHash: mockCkptWithMeta.Ckpt.HashStr(), + } + resp, err := queryClient.ReportedCheckpointBtcHeight(ctx, &req) + require.NoError(t, err) + require.Equal(t, lck.GetTipInfo(ctx).Height, resp.BtcLightClientHeight) + + // query not reported checkpoint BTC light client height, should expect an ErrCheckpointNotReported + req = types.QueryReportedCheckpointBtcHeightRequest{ + CkptHash: datagen.GenRandomHexStr(32), + } + _, err = queryClient.ReportedCheckpointBtcHeight(ctx, &req) + require.ErrorIs(t, err, types.ErrCheckpointNotReported) + }) +} diff --git a/x/monitor/keeper/hooks.go b/x/monitor/keeper/hooks.go new file mode 100644 index 000000000..49a24e0e7 --- /dev/null +++ b/x/monitor/keeper/hooks.go @@ -0,0 +1,47 @@ +package keeper + +import ( + checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" + etypes "github.com/babylonchain/babylon/x/epoching/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Helper interface to be sure Hooks implement both epoching and light client hooks +type HandledHooks interface { + etypes.EpochingHooks + checkpointingtypes.CheckpointingHooks +} + +type Hooks struct { + k Keeper +} + +// Create new distribution hooks +func (k Keeper) Hooks() Hooks { return Hooks{k} } + +func (h Hooks) AfterEpochBegins(ctx sdk.Context, epoch uint64) {} + +func (h Hooks) AfterEpochEnds(ctx sdk.Context, epoch uint64) { + h.k.updateBtcLightClientHeightForEpoch(ctx, epoch) +} + +func (h Hooks) BeforeSlashThreshold(ctx sdk.Context, valSet etypes.ValidatorSet) {} + +func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { + return nil +} +func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { + return nil +} + +func (h Hooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return h.k.removeCheckpointRecord(ctx, ckpt) +} + +func (h Hooks) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error { + return nil +} + +func (h Hooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return h.k.updateBtcLightClientHeightForCheckpoint(ctx, ckpt) +} diff --git a/x/monitor/keeper/keeper.go b/x/monitor/keeper/keeper.go new file mode 100644 index 000000000..308ee45e8 --- /dev/null +++ b/x/monitor/keeper/keeper.go @@ -0,0 +1,144 @@ +package keeper + +import ( + "fmt" + ckpttypes "github.com/babylonchain/babylon/x/checkpointing/types" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/tendermint/tendermint/libs/log" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + paramstore paramtypes.Subspace + btcLightClientKeeper types.BTCLightClientKeeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey, + memKey storetypes.StoreKey, + ps paramtypes.Subspace, + bk types.BTCLightClientKeeper, +) Keeper { + // set KeyTable if it has not already been set + if !ps.HasKeyTable() { + ps = ps.WithKeyTable(types.ParamKeyTable()) + } + + return Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + paramstore: ps, + btcLightClientKeeper: bk, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func bytesToUint64(bytes []byte) (uint64, error) { + if len(bytes) != 8 { + return 0, fmt.Errorf("epoch bytes must have exactly 8 bytes") + } + + return sdk.BigEndianToUint64(bytes), nil +} + +func (k Keeper) updateBtcLightClientHeightForEpoch(ctx sdk.Context, epoch uint64) { + store := ctx.KVStore(k.storeKey) + currentTipHeight := k.btcLightClientKeeper.GetTipInfo(ctx).Height + store.Set(types.GetEpochEndLightClientHeightKey(epoch), sdk.Uint64ToBigEndian(currentTipHeight)) +} + +func (k Keeper) updateBtcLightClientHeightForCheckpoint(ctx sdk.Context, ckpt *ckpttypes.RawCheckpoint) error { + store := ctx.KVStore(k.storeKey) + ckptHashStr := ckpt.HashStr() + + storeKey, err := types.GetCheckpointReportedLightClientHeightKey(ckptHashStr) + if err != nil { + return err + } + + // if the checkpoint exists, meaning an earlier checkpoint with a lower btc height is already recorded + // we should keep the lower btc height in the store + if store.Has(storeKey) { + k.Logger(ctx).With("module", fmt.Sprintf("checkpoint %s is already recorded", ckptHashStr)) + return nil + } + + currentTipHeight := k.btcLightClientKeeper.GetTipInfo(ctx).Height + store.Set(storeKey, sdk.Uint64ToBigEndian(currentTipHeight)) + + return nil +} + +func (k Keeper) removeCheckpointRecord(ctx sdk.Context, ckpt *ckpttypes.RawCheckpoint) error { + store := ctx.KVStore(k.storeKey) + ckptHashStr := ckpt.HashStr() + + storeKey, err := types.GetCheckpointReportedLightClientHeightKey(ckptHashStr) + if err != nil { + return err + } + + store.Delete(storeKey) + + return nil +} + +func (k Keeper) LightclientHeightAtEpochEnd(ctx sdk.Context, epoch uint64) (uint64, error) { + if epoch == 0 { + return k.btcLightClientKeeper.GetBaseBTCHeader(ctx).Height, nil + } + + store := ctx.KVStore(k.storeKey) + + btcHeightBytes := store.Get(types.GetEpochEndLightClientHeightKey(epoch)) + // nil would be returned if key does not exist + if btcHeightBytes == nil { + // we do not have any key under given epoch, most probably epoch did not finish + // yet + return 0, types.ErrEpochNotEnded.Wrapf("epoch %d", epoch) + } + + btcHeight, err := bytesToUint64(btcHeightBytes) + + if err != nil { + panic("Invalid data in database") + } + + return btcHeight, nil +} + +func (k Keeper) LightclientHeightAtCheckpointReported(ctx sdk.Context, hashString string) (uint64, error) { + store := ctx.KVStore(k.storeKey) + + storeKey, err := types.GetCheckpointReportedLightClientHeightKey(hashString) + if err != nil { + return 0, err + } + + btcHeightBytes := store.Get(storeKey) + // nil would be returned if key does not exist + if btcHeightBytes == nil { + return 0, types.ErrCheckpointNotReported.Wrapf("checkpoint hash: %s", hashString) + } + + btcHeight, err := bytesToUint64(btcHeightBytes) + if err != nil { + panic("invalid data in database") + } + + return btcHeight, nil +} diff --git a/x/monitor/keeper/params.go b/x/monitor/keeper/params.go new file mode 100644 index 000000000..c904062ad --- /dev/null +++ b/x/monitor/keeper/params.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/babylonchain/babylon/x/monitor/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetParams get all parameters as types.Params +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + k.paramstore.GetParamSet(ctx, ¶ms) + return params +} + +// SetParams set the params +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramstore.SetParamSet(ctx, ¶ms) +} diff --git a/x/monitor/module.go b/x/monitor/module.go new file mode 100644 index 000000000..337d27286 --- /dev/null +++ b/x/monitor/module.go @@ -0,0 +1,177 @@ +package monitor + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/gorilla/mux" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/babylonchain/babylon/x/monitor/client/cli" + "github.com/babylonchain/babylon/x/monitor/keeper" + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface for the capability module. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the capability module's name. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +func (AppModuleBasic) RegisterCodec(cdc *codec.LegacyAmino) { + + // types.RegisterCodec(cdc) +} + +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + // types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers the module's interface types +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + // types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns the capability module's default genesis state. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis performs genesis state validation for the capability module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterRESTRoutes registers the capability module's REST service handlers. +func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) { +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) //nolint:errcheck // generally we don't handle errors in these registration functions +} + +// GetTxCmd returns the capability module's root tx command. +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the capability module's root query command. +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd(types.StoreKey) +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface for the capability module. +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper + // TODO: add dependencies to staking, slashing and evidence +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +// Name returns the capability module's name. +func (am AppModule) Name() string { + return am.AppModuleBasic.Name() +} + +// Route returns the capability module's message routing key. +func (am AppModule) Route() sdk.Route { + return sdk.NewRoute(types.RouterKey, NewHandler(am.keeper)) +} + +// QuerierRoute returns the capability module's query routing key. +func (AppModule) QuerierRoute() string { return types.QuerierRoute } + +// LegacyQuerierHandler returns the capability module's Querier. +func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a GRPC query service to respond to the +// module-specific GRPC queries. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the capability module's invariants. +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the capability module's genesis initialization It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the capability module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion implements ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 2 } + +// BeginBlock executes all ABCI BeginBlock logic respective to the capability module. +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock executes all ABCI EndBlock logic respective to the capability module. It +// returns no validator updates. +func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} diff --git a/x/monitor/module_simulation.go b/x/monitor/module_simulation.go new file mode 100644 index 000000000..bf642f766 --- /dev/null +++ b/x/monitor/module_simulation.go @@ -0,0 +1,55 @@ +package monitor + +import ( + "math/rand" + + monitorsimulation "github.com/babylonchain/babylon/x/monitor/simulation" + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/cosmos/cosmos-sdk/baseapp" + simappparams "github.com/cosmos/cosmos-sdk/simapp/params" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/simulation" +) + +// avoid unused import issue +var ( + _ = monitorsimulation.FindAccount + _ = simappparams.StakePerAccount + _ = simulation.MsgEntryKind + _ = baseapp.Paramspace +) + +// GenerateGenesisState creates a randomized GenState of the module +func (AppModule) GenerateGenesisState(simState *module.SimulationState) { + accs := make([]string, len(simState.Accounts)) + for i, acc := range simState.Accounts { + accs[i] = acc.Address.String() + } + monitorgenesis := types.GenesisState{ + Params: types.DefaultParams(), + } + simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&monitorgenesis) +} + +// ProposalContents doesn't return any content functions for governance proposals +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { + return nil +} + +// RandomizedParams creates randomized param changes for the simulator +func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { + + return []simtypes.ParamChange{} +} + +// RegisterStoreDecoder registers a decoder +func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} + +// WeightedOperations returns the all the gov module operations with their respective weights. +func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { + operations := make([]simtypes.WeightedOperation, 0) + + return operations +} diff --git a/x/monitor/simulation/simap.go b/x/monitor/simulation/simap.go new file mode 100644 index 000000000..92c437c0d --- /dev/null +++ b/x/monitor/simulation/simap.go @@ -0,0 +1,15 @@ +package simulation + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +// FindAccount find a specific address from an account list +func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { + creator, err := sdk.AccAddressFromBech32(address) + if err != nil { + panic(err) + } + return simtypes.FindAccount(accs, creator) +} diff --git a/x/monitor/types/errors.go b/x/monitor/types/errors.go new file mode 100644 index 000000000..565387fe6 --- /dev/null +++ b/x/monitor/types/errors.go @@ -0,0 +1,13 @@ +package types + +// DONTCOVER + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// x/monitor module sentinel errors +var ( + ErrEpochNotEnded = sdkerrors.Register(ModuleName, 1100, "Epoch not ended yet") + ErrCheckpointNotReported = sdkerrors.Register(ModuleName, 1101, "Checkpoint not reported yet") +) diff --git a/x/monitor/types/expected_keepers.go b/x/monitor/types/expected_keepers.go new file mode 100644 index 000000000..6f3566003 --- /dev/null +++ b/x/monitor/types/expected_keepers.go @@ -0,0 +1,24 @@ +package types + +import ( + lc "github.com/babylonchain/babylon/x/btclightclient/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/types" +) + +// AccountKeeper defines the expected account keeper used for simulations (noalias) +type AccountKeeper interface { + GetAccount(ctx sdk.Context, addr sdk.AccAddress) types.AccountI + // Methods imported from account should be defined here +} + +// BankKeeper defines the expected interface needed to retrieve account balances. +type BankKeeper interface { + SpendableCoins(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins + // Methods imported from bank should be defined here +} + +type BTCLightClientKeeper interface { + GetTipInfo(ctx sdk.Context) *lc.BTCHeaderInfo + GetBaseBTCHeader(ctx sdk.Context) *lc.BTCHeaderInfo +} diff --git a/x/monitor/types/genesis.go b/x/monitor/types/genesis.go new file mode 100644 index 000000000..a6cdfe807 --- /dev/null +++ b/x/monitor/types/genesis.go @@ -0,0 +1,18 @@ +package types + +// DefaultIndex is the default capability global index +const DefaultIndex uint64 = 1 + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + + return gs.Params.Validate() +} diff --git a/x/monitor/types/genesis.pb.go b/x/monitor/types/genesis.pb.go new file mode 100644 index 000000000..2bfeb15e0 --- /dev/null +++ b/x/monitor/types/genesis.pb.go @@ -0,0 +1,321 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: babylon/monitor/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the monitor module's genesis state. +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_98b2aa1b23cbbe77, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "babylon.monitor.v1.GenesisState") +} + +func init() { proto.RegisterFile("babylon/monitor/genesis.proto", fileDescriptor_98b2aa1b23cbbe77) } + +var fileDescriptor_98b2aa1b23cbbe77 = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0x4a, 0x4c, 0xaa, + 0xcc, 0xc9, 0xcf, 0xd3, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, + 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0x4a, 0xeb, 0x41, 0xa5, + 0xf5, 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, + 0x94, 0x0c, 0xba, 0x41, 0x05, 0x89, 0x45, 0x89, 0xb9, 0x50, 0x73, 0x94, 0x3c, 0xb8, 0x78, 0xdc, + 0x21, 0x06, 0x07, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x59, 0x70, 0xb1, 0x41, 0xe4, 0x25, 0x18, 0x15, + 0x18, 0x35, 0xb8, 0x8d, 0xa4, 0xf4, 0x30, 0x2d, 0xd2, 0x0b, 0x00, 0xab, 0x70, 0x62, 0x39, 0x71, + 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xde, 0xc9, 0xf3, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, + 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, + 0x18, 0xa2, 0xf4, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xa1, 0xa6, + 0x25, 0x67, 0x24, 0x66, 0xe6, 0xc1, 0x38, 0xfa, 0x15, 0x70, 0xb7, 0x95, 0x54, 0x16, 0xa4, 0x16, + 0x27, 0xb1, 0x81, 0xdd, 0x66, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x87, 0x54, 0x4d, 0xde, 0x04, + 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/monitor/types/genesis_test.go b/x/monitor/types/genesis_test.go new file mode 100644 index 000000000..5c5092312 --- /dev/null +++ b/x/monitor/types/genesis_test.go @@ -0,0 +1,31 @@ +package types_test + +import ( + "testing" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/stretchr/testify/require" +) + +func TestGenesisState_Validate(t *testing.T) { + for _, tc := range []struct { + desc string + genState *types.GenesisState + valid bool + }{ + { + desc: "default is valid", + genState: types.DefaultGenesis(), + valid: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + err := tc.genState.Validate() + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/x/monitor/types/keys.go b/x/monitor/types/keys.go new file mode 100644 index 000000000..1f65b3c03 --- /dev/null +++ b/x/monitor/types/keys.go @@ -0,0 +1,45 @@ +package types + +import ( + "fmt" + "github.com/babylonchain/babylon/x/checkpointing/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + // ModuleName defines the module name + ModuleName = "monitor" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey is the message route for slashing + RouterKey = ModuleName + + // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_monitor" +) + +var ( + EpochEndLightClientHeightPrefix = []byte{1} + CheckpointReportedLightClientHeightPrefix = []byte{2} +) + +func KeyPrefix(p string) []byte { + return []byte(p) +} + +func GetEpochEndLightClientHeightKey(e uint64) []byte { + return append(EpochEndLightClientHeightPrefix, sdk.Uint64ToBigEndian(e)...) +} + +func GetCheckpointReportedLightClientHeightKey(hashString string) ([]byte, error) { + hashBytes, err := types.FromStringToCkptHash(hashString) + if err != nil { + return nil, fmt.Errorf("invalid hash string %s: %w", hashString, err) + } + return append(CheckpointReportedLightClientHeightPrefix, hashBytes...), nil +} diff --git a/x/monitor/types/params.go b/x/monitor/types/params.go new file mode 100644 index 000000000..4f3215e35 --- /dev/null +++ b/x/monitor/types/params.go @@ -0,0 +1,32 @@ +package types + +import ( + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +// ParamKeyTable the param key table for launch module +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new Params instance +func NewParams() Params { + return Params{} +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams() +} + +// ParamSetPairs get the params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{} +} + +// Validate validates the set of params +func (p Params) Validate() error { + return nil +} diff --git a/x/monitor/types/params.pb.go b/x/monitor/types/params.pb.go new file mode 100644 index 000000000..70566842b --- /dev/null +++ b/x/monitor/types/params.pb.go @@ -0,0 +1,286 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: babylon/monitor/params.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the parameters for the module. +type Params struct { +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_122b02f3f9b23cbe, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Params)(nil), "babylon.monitor.v1.Params") +} + +func init() { proto.RegisterFile("babylon/monitor/params.proto", fileDescriptor_122b02f3f9b23cbe) } + +var fileDescriptor_122b02f3f9b23cbe = []byte{ + // 153 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x4a, 0x4c, 0xaa, + 0xcc, 0xc9, 0xcf, 0xd3, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xd2, 0x2f, 0x48, 0x2c, 0x4a, + 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0xca, 0xea, 0x41, 0x65, 0xf5, + 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, 0x12, + 0x1f, 0x17, 0x5b, 0x00, 0x58, 0xa7, 0x15, 0xcb, 0x8b, 0x05, 0xf2, 0x8c, 0x4e, 0x9e, 0x27, 0x1e, + 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, + 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x9f, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, + 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x35, 0x3e, 0x39, 0x23, 0x31, 0x33, 0x0f, 0xc6, 0xd1, 0xaf, 0x80, + 0xbb, 0xa5, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x6c, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x3f, 0x70, 0x61, 0xbe, 0xab, 0x00, 0x00, 0x00, +} + +func (this *Params) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Params) + if !ok { + that2, ok := that.(Params) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/monitor/types/params_test.go b/x/monitor/types/params_test.go new file mode 100644 index 000000000..53e3c1f6a --- /dev/null +++ b/x/monitor/types/params_test.go @@ -0,0 +1,16 @@ +package types_test + +import ( + "testing" + + "github.com/babylonchain/babylon/x/monitor/types" + "github.com/stretchr/testify/require" +) + +func TestParamsEqual(t *testing.T) { + p1 := types.DefaultParams() + p2 := types.DefaultParams() + + ok := p1.Equal(p2) + require.True(t, ok) +} diff --git a/x/monitor/types/query.pb.go b/x/monitor/types/query.pb.go new file mode 100644 index 000000000..8be5efc78 --- /dev/null +++ b/x/monitor/types/query.pb.go @@ -0,0 +1,1266 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: babylon/monitor/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +type QueryEndedEpochBtcHeightRequest struct { + EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` +} + +func (m *QueryEndedEpochBtcHeightRequest) Reset() { *m = QueryEndedEpochBtcHeightRequest{} } +func (m *QueryEndedEpochBtcHeightRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEndedEpochBtcHeightRequest) ProtoMessage() {} +func (*QueryEndedEpochBtcHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{2} +} +func (m *QueryEndedEpochBtcHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEndedEpochBtcHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEndedEpochBtcHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEndedEpochBtcHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEndedEpochBtcHeightRequest.Merge(m, src) +} +func (m *QueryEndedEpochBtcHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryEndedEpochBtcHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEndedEpochBtcHeightRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEndedEpochBtcHeightRequest proto.InternalMessageInfo + +func (m *QueryEndedEpochBtcHeightRequest) GetEpochNum() uint64 { + if m != nil { + return m.EpochNum + } + return 0 +} + +type QueryEndedEpochBtcHeightResponse struct { + // height of btc light client when epoch ended + BtcLightClientHeight uint64 `protobuf:"varint,1,opt,name=btc_light_client_height,json=btcLightClientHeight,proto3" json:"btc_light_client_height,omitempty"` +} + +func (m *QueryEndedEpochBtcHeightResponse) Reset() { *m = QueryEndedEpochBtcHeightResponse{} } +func (m *QueryEndedEpochBtcHeightResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEndedEpochBtcHeightResponse) ProtoMessage() {} +func (*QueryEndedEpochBtcHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{3} +} +func (m *QueryEndedEpochBtcHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEndedEpochBtcHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEndedEpochBtcHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEndedEpochBtcHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEndedEpochBtcHeightResponse.Merge(m, src) +} +func (m *QueryEndedEpochBtcHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryEndedEpochBtcHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEndedEpochBtcHeightResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEndedEpochBtcHeightResponse proto.InternalMessageInfo + +func (m *QueryEndedEpochBtcHeightResponse) GetBtcLightClientHeight() uint64 { + if m != nil { + return m.BtcLightClientHeight + } + return 0 +} + +type QueryReportedCheckpointBtcHeightRequest struct { + // ckpt_hash is hex encoded byte string of the hash of the checkpoint + CkptHash string `protobuf:"bytes,1,opt,name=ckpt_hash,json=ckptHash,proto3" json:"ckpt_hash,omitempty"` +} + +func (m *QueryReportedCheckpointBtcHeightRequest) Reset() { + *m = QueryReportedCheckpointBtcHeightRequest{} +} +func (m *QueryReportedCheckpointBtcHeightRequest) String() string { return proto.CompactTextString(m) } +func (*QueryReportedCheckpointBtcHeightRequest) ProtoMessage() {} +func (*QueryReportedCheckpointBtcHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{4} +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest.Merge(m, src) +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryReportedCheckpointBtcHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryReportedCheckpointBtcHeightRequest proto.InternalMessageInfo + +func (m *QueryReportedCheckpointBtcHeightRequest) GetCkptHash() string { + if m != nil { + return m.CkptHash + } + return "" +} + +type QueryReportedCheckpointBtcHeightResponse struct { + // height of btc light client when checkpoint is reported + BtcLightClientHeight uint64 `protobuf:"varint,1,opt,name=btc_light_client_height,json=btcLightClientHeight,proto3" json:"btc_light_client_height,omitempty"` +} + +func (m *QueryReportedCheckpointBtcHeightResponse) Reset() { + *m = QueryReportedCheckpointBtcHeightResponse{} +} +func (m *QueryReportedCheckpointBtcHeightResponse) String() string { return proto.CompactTextString(m) } +func (*QueryReportedCheckpointBtcHeightResponse) ProtoMessage() {} +func (*QueryReportedCheckpointBtcHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3b70877a7534d1c4, []int{5} +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse.Merge(m, src) +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryReportedCheckpointBtcHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryReportedCheckpointBtcHeightResponse proto.InternalMessageInfo + +func (m *QueryReportedCheckpointBtcHeightResponse) GetBtcLightClientHeight() uint64 { + if m != nil { + return m.BtcLightClientHeight + } + return 0 +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "babylon.monitor.v1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "babylon.monitor.v1.QueryParamsResponse") + proto.RegisterType((*QueryEndedEpochBtcHeightRequest)(nil), "babylon.monitor.v1.QueryEndedEpochBtcHeightRequest") + proto.RegisterType((*QueryEndedEpochBtcHeightResponse)(nil), "babylon.monitor.v1.QueryEndedEpochBtcHeightResponse") + proto.RegisterType((*QueryReportedCheckpointBtcHeightRequest)(nil), "babylon.monitor.v1.QueryReportedCheckpointBtcHeightRequest") + proto.RegisterType((*QueryReportedCheckpointBtcHeightResponse)(nil), "babylon.monitor.v1.QueryReportedCheckpointBtcHeightResponse") +} + +func init() { proto.RegisterFile("babylon/monitor/query.proto", fileDescriptor_3b70877a7534d1c4) } + +var fileDescriptor_3b70877a7534d1c4 = []byte{ + // 515 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4f, 0x6b, 0xd4, 0x4e, + 0x18, 0xde, 0xfc, 0xd8, 0xdf, 0xd2, 0x8e, 0xb7, 0xe9, 0x82, 0x92, 0x2d, 0x69, 0xc9, 0xa1, 0x5d, + 0x14, 0x33, 0x6c, 0x57, 0x41, 0x50, 0x3c, 0x6c, 0xa9, 0x54, 0x10, 0xff, 0xe4, 0xa6, 0x97, 0x30, + 0x99, 0x1d, 0x92, 0xa1, 0x9b, 0x99, 0x69, 0x66, 0xb2, 0xb8, 0x94, 0xbd, 0x78, 0xf3, 0x26, 0xf8, + 0x45, 0xfc, 0x18, 0xbd, 0x08, 0x05, 0x2f, 0x9e, 0x44, 0x76, 0xfd, 0x20, 0x92, 0xc9, 0x34, 0x60, + 0x9b, 0x75, 0x51, 0x6f, 0xc9, 0xfb, 0xbc, 0xcf, 0xf3, 0x3e, 0x6f, 0xde, 0x87, 0x80, 0x5e, 0x8c, + 0xe3, 0xd9, 0x44, 0x70, 0x94, 0x09, 0xce, 0xb4, 0xc8, 0xd1, 0x69, 0x41, 0xf3, 0x59, 0x20, 0x73, + 0xa1, 0x05, 0x84, 0x16, 0x0c, 0x2c, 0x18, 0x4c, 0x07, 0x6e, 0x37, 0x11, 0x89, 0x30, 0x30, 0x2a, + 0x9f, 0xaa, 0x4e, 0x77, 0x3b, 0x11, 0x22, 0x99, 0x50, 0x84, 0x25, 0x43, 0x98, 0x73, 0xa1, 0xb1, + 0x66, 0x82, 0x2b, 0x8b, 0xde, 0x26, 0x42, 0x65, 0x42, 0xa1, 0x18, 0x2b, 0x5a, 0x0d, 0x40, 0xd3, + 0x41, 0x4c, 0x35, 0x1e, 0x20, 0x89, 0x13, 0xc6, 0x4d, 0xf3, 0xa5, 0xd2, 0x55, 0x43, 0x12, 0xe7, + 0x38, 0xb3, 0x4a, 0x7e, 0x17, 0xc0, 0x57, 0x25, 0xff, 0xa5, 0x29, 0x86, 0xf4, 0xb4, 0xa0, 0x4a, + 0xfb, 0x2f, 0xc0, 0xd6, 0x2f, 0x55, 0x25, 0x05, 0x57, 0x14, 0x3e, 0x00, 0x9d, 0x8a, 0x7c, 0xcb, + 0xd9, 0x75, 0xfa, 0x37, 0x0e, 0xdc, 0xe0, 0xfa, 0x3e, 0x41, 0xc5, 0x19, 0xb5, 0xcf, 0xbf, 0xed, + 0xb4, 0x42, 0xdb, 0xef, 0x3f, 0x06, 0x3b, 0x46, 0xf0, 0x88, 0x8f, 0xe9, 0xf8, 0x48, 0x0a, 0x92, + 0x8e, 0x34, 0x39, 0xa6, 0x2c, 0x49, 0xb5, 0x9d, 0x09, 0x7b, 0x60, 0x93, 0x96, 0x40, 0xc4, 0x8b, + 0xcc, 0xe8, 0xb7, 0xc3, 0x0d, 0x53, 0x78, 0x5e, 0x64, 0xfe, 0x6b, 0xb0, 0xbb, 0x9a, 0x6f, 0xdd, + 0xdd, 0x07, 0x37, 0x63, 0x4d, 0xa2, 0x49, 0x59, 0x8c, 0xc8, 0x84, 0x51, 0xae, 0xa3, 0xd4, 0xb4, + 0x58, 0xb9, 0x6e, 0xac, 0xc9, 0xb3, 0xf2, 0xfd, 0xd0, 0x80, 0x15, 0xdd, 0x7f, 0x02, 0xf6, 0x8d, + 0x74, 0x48, 0xa5, 0xc8, 0x35, 0x1d, 0x1f, 0xa6, 0x94, 0x9c, 0x48, 0xc1, 0xb8, 0x6e, 0xb2, 0x48, + 0x4e, 0xa4, 0x8e, 0x52, 0xac, 0x52, 0xa3, 0xb9, 0x19, 0x6e, 0x94, 0x85, 0x63, 0xac, 0x52, 0x1f, + 0x83, 0xfe, 0x7a, 0x9d, 0x7f, 0xb2, 0x7a, 0xf0, 0xbe, 0x0d, 0xfe, 0x37, 0x33, 0xe0, 0x1c, 0x74, + 0xaa, 0xef, 0x0c, 0xf7, 0x9a, 0x6e, 0x70, 0xfd, 0xa4, 0xee, 0xfe, 0xda, 0xbe, 0xca, 0x9b, 0xef, + 0xbf, 0xfb, 0xf2, 0xe3, 0xe3, 0x7f, 0xdb, 0xd0, 0x45, 0x57, 0x83, 0x33, 0x1d, 0xd8, 0xec, 0xc0, + 0x4f, 0x0e, 0xd8, 0x6a, 0x38, 0x05, 0x1c, 0xae, 0x1c, 0xb2, 0xfa, 0xf0, 0xee, 0xbd, 0x3f, 0x23, + 0x59, 0x9b, 0x81, 0xb1, 0xd9, 0x87, 0x7b, 0x4d, 0x36, 0x4d, 0x6e, 0x14, 0x3a, 0xab, 0x03, 0x35, + 0x87, 0x9f, 0x1d, 0xd0, 0xfb, 0xcd, 0x69, 0xe0, 0xc3, 0x95, 0x2e, 0xd6, 0x07, 0xc3, 0x7d, 0xf4, + 0x77, 0x64, 0xbb, 0xca, 0xd0, 0xac, 0x72, 0x17, 0xde, 0x69, 0x5a, 0x85, 0xd4, 0x44, 0x85, 0xce, + 0xea, 0xf4, 0xcd, 0x47, 0x4f, 0xcf, 0x17, 0x9e, 0x73, 0xb1, 0xf0, 0x9c, 0xef, 0x0b, 0xcf, 0xf9, + 0xb0, 0xf4, 0x5a, 0x17, 0x4b, 0xaf, 0xf5, 0x75, 0xe9, 0xb5, 0xde, 0xa0, 0x84, 0xe9, 0xb4, 0x88, + 0x03, 0x22, 0xb2, 0x4b, 0x41, 0x92, 0x62, 0xc6, 0x6b, 0xf5, 0xb7, 0xb5, 0xbe, 0x9e, 0x49, 0xaa, + 0xe2, 0x8e, 0xf9, 0x15, 0x0c, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x8a, 0xb1, 0xb8, 0xbb, + 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // EndedEpochBtcHeight returns the BTC light client height at provided epoch finish + EndedEpochBtcHeight(ctx context.Context, in *QueryEndedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryEndedEpochBtcHeightResponse, error) + // ReportedCheckpointBtcHeight returns the BTC light client height at which the checkpoint with the given hash is reported back to Babylon + ReportedCheckpointBtcHeight(ctx context.Context, in *QueryReportedCheckpointBtcHeightRequest, opts ...grpc.CallOption) (*QueryReportedCheckpointBtcHeightResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) EndedEpochBtcHeight(ctx context.Context, in *QueryEndedEpochBtcHeightRequest, opts ...grpc.CallOption) (*QueryEndedEpochBtcHeightResponse, error) { + out := new(QueryEndedEpochBtcHeightResponse) + err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/EndedEpochBtcHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ReportedCheckpointBtcHeight(ctx context.Context, in *QueryReportedCheckpointBtcHeightRequest, opts ...grpc.CallOption) (*QueryReportedCheckpointBtcHeightResponse, error) { + out := new(QueryReportedCheckpointBtcHeightResponse) + err := c.cc.Invoke(ctx, "/babylon.monitor.v1.Query/ReportedCheckpointBtcHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // EndedEpochBtcHeight returns the BTC light client height at provided epoch finish + EndedEpochBtcHeight(context.Context, *QueryEndedEpochBtcHeightRequest) (*QueryEndedEpochBtcHeightResponse, error) + // ReportedCheckpointBtcHeight returns the BTC light client height at which the checkpoint with the given hash is reported back to Babylon + ReportedCheckpointBtcHeight(context.Context, *QueryReportedCheckpointBtcHeightRequest) (*QueryReportedCheckpointBtcHeightResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (*UnimplementedQueryServer) EndedEpochBtcHeight(ctx context.Context, req *QueryEndedEpochBtcHeightRequest) (*QueryEndedEpochBtcHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EndedEpochBtcHeight not implemented") +} +func (*UnimplementedQueryServer) ReportedCheckpointBtcHeight(ctx context.Context, req *QueryReportedCheckpointBtcHeightRequest) (*QueryReportedCheckpointBtcHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReportedCheckpointBtcHeight not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.monitor.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_EndedEpochBtcHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEndedEpochBtcHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EndedEpochBtcHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.monitor.v1.Query/EndedEpochBtcHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EndedEpochBtcHeight(ctx, req.(*QueryEndedEpochBtcHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ReportedCheckpointBtcHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryReportedCheckpointBtcHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ReportedCheckpointBtcHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.monitor.v1.Query/ReportedCheckpointBtcHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ReportedCheckpointBtcHeight(ctx, req.(*QueryReportedCheckpointBtcHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "babylon.monitor.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "EndedEpochBtcHeight", + Handler: _Query_EndedEpochBtcHeight_Handler, + }, + { + MethodName: "ReportedCheckpointBtcHeight", + Handler: _Query_ReportedCheckpointBtcHeight_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "babylon/monitor/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryEndedEpochBtcHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEndedEpochBtcHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEndedEpochBtcHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EpochNum != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryEndedEpochBtcHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEndedEpochBtcHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEndedEpochBtcHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BtcLightClientHeight != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.BtcLightClientHeight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryReportedCheckpointBtcHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryReportedCheckpointBtcHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryReportedCheckpointBtcHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CkptHash) > 0 { + i -= len(m.CkptHash) + copy(dAtA[i:], m.CkptHash) + i = encodeVarintQuery(dAtA, i, uint64(len(m.CkptHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryReportedCheckpointBtcHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryReportedCheckpointBtcHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryReportedCheckpointBtcHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BtcLightClientHeight != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.BtcLightClientHeight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryEndedEpochBtcHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } + return n +} + +func (m *QueryEndedEpochBtcHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BtcLightClientHeight != 0 { + n += 1 + sovQuery(uint64(m.BtcLightClientHeight)) + } + return n +} + +func (m *QueryReportedCheckpointBtcHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CkptHash) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryReportedCheckpointBtcHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BtcLightClientHeight != 0 { + n += 1 + sovQuery(uint64(m.BtcLightClientHeight)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEndedEpochBtcHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEndedEpochBtcHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEndedEpochBtcHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEndedEpochBtcHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEndedEpochBtcHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEndedEpochBtcHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BtcLightClientHeight", wireType) + } + m.BtcLightClientHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BtcLightClientHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryReportedCheckpointBtcHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CkptHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CkptHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryReportedCheckpointBtcHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryReportedCheckpointBtcHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BtcLightClientHeight", wireType) + } + m.BtcLightClientHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BtcLightClientHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/monitor/types/query.pb.gw.go b/x/monitor/types/query.pb.gw.go new file mode 100644 index 000000000..a750169ed --- /dev/null +++ b/x/monitor/types/query.pb.gw.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: babylon/monitor/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_EndedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEndedEpochBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := client.EndedEpochBtcHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_EndedEpochBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEndedEpochBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := server.EndedEpochBtcHeight(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ReportedCheckpointBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryReportedCheckpointBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["ckpt_hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "ckpt_hash") + } + + protoReq.CkptHash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ckpt_hash", err) + } + + msg, err := client.ReportedCheckpointBtcHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ReportedCheckpointBtcHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryReportedCheckpointBtcHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["ckpt_hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "ckpt_hash") + } + + protoReq.CkptHash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ckpt_hash", err) + } + + msg, err := server.ReportedCheckpointBtcHeight(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_EndedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_EndedEpochBtcHeight_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EndedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ReportedCheckpointBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ReportedCheckpointBtcHeight_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ReportedCheckpointBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_EndedEpochBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_EndedEpochBtcHeight_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EndedEpochBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ReportedCheckpointBtcHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ReportedCheckpointBtcHeight_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ReportedCheckpointBtcHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "monitor", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_EndedEpochBtcHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "monitor", "v1", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ReportedCheckpointBtcHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "monitor", "v1", "checkpoints", "ckpt_hash"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage + + forward_Query_EndedEpochBtcHeight_0 = runtime.ForwardResponseMessage + + forward_Query_ReportedCheckpointBtcHeight_0 = runtime.ForwardResponseMessage +) diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer.go b/x/zoneconcierge/keeper/canonical_chain_indexer.go index 73bd3cc9a..860e53008 100644 --- a/x/zoneconcierge/keeper/canonical_chain_indexer.go +++ b/x/zoneconcierge/keeper/canonical_chain_indexer.go @@ -1,12 +1,42 @@ package keeper import ( + "fmt" + sdkerrors "cosmossdk.io/errors" "github.com/babylonchain/babylon/x/zoneconcierge/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" ) +// FindClosestHeader finds the IndexedHeader that is closest to (but not after) the given height +func (k Keeper) FindClosestHeader(ctx sdk.Context, chainID string, height uint64) (*types.IndexedHeader, error) { + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + return nil, fmt.Errorf("failed to get chain info for chain with ID %s: %w", chainID, err) + } + + // if the given height is no lower than the latest header, return the latest header directly + if chainInfo.LatestHeader.Height <= height { + return chainInfo.LatestHeader, nil + } + + // the requested height is lower than the latest header, trace back until finding a timestamped header + store := k.canonicalChainStore(ctx, chainID) + heightBytes := sdk.Uint64ToBigEndian(height) + iter := store.ReverseIterator(nil, heightBytes) + defer iter.Close() + // if there is no key within range [0, height], return error + if !iter.Valid() { + return nil, fmt.Errorf("chain with ID %s does not have a timestamped header before height %d", chainID, height) + } + // find the header in bytes, decode and return + headerBytes := iter.Value() + var header types.IndexedHeader + k.cdc.MustUnmarshal(headerBytes, &header) + return &header, nil +} + func (k Keeper) GetHeader(ctx sdk.Context, chainID string, height uint64) (*types.IndexedHeader, error) { store := k.canonicalChainStore(ctx, chainID) heightBytes := sdk.Uint64ToBigEndian(height) diff --git a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go index d6041daf6..4248fb530 100644 --- a/x/zoneconcierge/keeper/canonical_chain_indexer_test.go +++ b/x/zoneconcierge/keeper/canonical_chain_indexer_test.go @@ -14,14 +14,15 @@ func FuzzCanonicalChainIndexer(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() - // invoke the hook a random number of times to simulate a random number of blocks + // simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 - headers := SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, numHeaders) + headers := SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders) // check if the canonical chain index is correct or not for i := uint64(0); i < numHeaders; i++ { @@ -34,10 +35,52 @@ func FuzzCanonicalChainIndexer(f *testing.F) { } // check if the chain info is updated or not - chainInfo := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + chainInfo, err := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + require.NoError(t, err) require.NotNil(t, chainInfo.LatestHeader) require.Equal(t, czChain.ChainID, chainInfo.LatestHeader.ChainId) require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height) require.Equal(t, headers[numHeaders-1].Header.LastCommitHash, chainInfo.LatestHeader.Hash) }) } + +func FuzzFindClosestHeader(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + // no header at the moment, FindClosestHeader invocation should give error + _, err := zcKeeper.FindClosestHeader(ctx, czChain.ChainID, 100) + require.Error(t, err) + + // simulate a random number of blocks + numHeaders := datagen.RandomInt(100) + 1 + headers := SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders) + + header, err := zcKeeper.FindClosestHeader(ctx, czChain.ChainID, numHeaders) + require.NoError(t, err) + require.Equal(t, headers[len(headers)-1].Header.LastCommitHash, header.Hash) + + // skip a non-zero number of headers in between, in order to create a gap of non-timestamped headers + gap := datagen.RandomInt(10) + 1 + + // simulate a random number of blocks + // where the new batch of headers has a gap with the previous batch + SimulateHeadersViaHook(ctx, hooks, czChain.ChainID, numHeaders+gap+1, numHeaders) + + // get a random height that is in this gap + randomHeightInGap := datagen.RandomInt(int(gap+1)) + numHeaders + // find the closest header with the given randomHeightInGap + header, err = zcKeeper.FindClosestHeader(ctx, czChain.ChainID, randomHeightInGap) + require.NoError(t, err) + // the header should be the same as the last header in the last batch + require.Equal(t, headers[len(headers)-1].Header.LastCommitHash, header.Hash) + }) +} diff --git a/x/zoneconcierge/keeper/chain_info_indexer.go b/x/zoneconcierge/keeper/chain_info_indexer.go index b4b41d56d..b85462fe2 100644 --- a/x/zoneconcierge/keeper/chain_info_indexer.go +++ b/x/zoneconcierge/keeper/chain_info_indexer.go @@ -1,6 +1,8 @@ package keeper import ( + "fmt" + sdkerrors "cosmossdk.io/errors" "github.com/babylonchain/babylon/x/zoneconcierge/types" "github.com/cosmos/cosmos-sdk/store/prefix" @@ -12,52 +14,87 @@ func (k Keeper) setChainInfo(ctx sdk.Context, chainInfo *types.ChainInfo) { store.Set([]byte(chainInfo.ChainId), k.cdc.MustMarshal(chainInfo)) } +func (k Keeper) InitChainInfo(ctx sdk.Context, chainID string) (*types.ChainInfo, error) { + if len(chainID) == 0 { + return nil, fmt.Errorf("chainID is empty") + } + // ensure chain info has not been initialised yet + if k.HasChainInfo(ctx, chainID) { + return nil, sdkerrors.Wrapf(types.ErrInvalidChainInfo, "chain info has already initialized") + } + + chainInfo := &types.ChainInfo{ + ChainId: chainID, + LatestHeader: nil, + LatestForks: &types.Forks{ + Headers: []*types.IndexedHeader{}, + }, + TimestampedHeadersCount: 0, + } + + k.setChainInfo(ctx, chainInfo) + return chainInfo, nil +} + +// HasChainInfo returns whether the chain info exists for a given ID +// Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection, +// we can only check its existence every time, and return an empty one if it's not initialised yet. +func (k Keeper) HasChainInfo(ctx sdk.Context, chainID string) bool { + store := k.chainInfoStore(ctx) + return store.Has([]byte(chainID)) +} + // GetChainInfo returns the ChainInfo struct for a chain with a given ID // Since IBC does not provide API that allows to initialise chain info right before creating an IBC connection, // we can only check its existence every time, and return an empty one if it's not initialised yet. -func (k Keeper) GetChainInfo(ctx sdk.Context, chainID string) *types.ChainInfo { +func (k Keeper) GetChainInfo(ctx sdk.Context, chainID string) (*types.ChainInfo, error) { store := k.chainInfoStore(ctx) if !store.Has([]byte(chainID)) { - return &types.ChainInfo{ - ChainId: chainID, - LatestHeader: nil, - LatestForks: &types.Forks{ - Headers: []*types.IndexedHeader{}, - }, - } + return nil, types.ErrEpochChainInfoNotFound } chainInfoBytes := store.Get([]byte(chainID)) var chainInfo types.ChainInfo k.cdc.MustUnmarshal(chainInfoBytes, &chainInfo) - return &chainInfo + return &chainInfo, nil } -func (k Keeper) tryToUpdateLatestHeader(ctx sdk.Context, chainID string, header *types.IndexedHeader) error { +// updateLatestHeader updates the chainInfo w.r.t. the given header, including +// - replace the old latest header with the given one +// - increment the number of timestamped headers +// Note that this function is triggered only upon receiving headers from the relayer, +// and only a subset of headers in CZ are relayed. Thus TimestampedHeadersCount is not +// equal to the total number of headers in CZ. +func (k Keeper) updateLatestHeader(ctx sdk.Context, chainID string, header *types.IndexedHeader) error { if header == nil { return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") } - // NOTE: we can accept header without ancestor since IBC connection can be established at any height - chainInfo := k.GetChainInfo(ctx, chainID) - if chainInfo.LatestHeader != nil { - // ensure the header is the latest one - // NOTE: submitting an old header is considered acceptable in IBC-Go (see Case_valid_past_update), - // but the chain info indexer will not record such old header since it's not the latest one - if chainInfo.LatestHeader.Height > header.Height { - return nil - } + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + // chain info has not been initialised yet + return fmt.Errorf("failed to get chain info of %s: %w", chainID, err) } - chainInfo.LatestHeader = header + chainInfo.LatestHeader = header // replace the old latest header with the given one + chainInfo.TimestampedHeadersCount++ // increment the number of timestamped headers + k.setChainInfo(ctx, chainInfo) return nil } +// tryToUpdateLatestForkHeader tries to update the chainInfo w.r.t. the given fork header +// - If no fork exists, add this fork header as the latest one +// - If there is a fork header at the same height, add this fork to the set of latest fork headers +// - If this fork header is newer than the previous one, replace the old fork headers with this fork header +// - If this fork header is older than the current latest fork, ignore func (k Keeper) tryToUpdateLatestForkHeader(ctx sdk.Context, chainID string, header *types.IndexedHeader) error { if header == nil { return sdkerrors.Wrapf(types.ErrInvalidHeader, "header is nil") } - chainInfo := k.GetChainInfo(ctx, chainID) + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + return sdkerrors.Wrapf(types.ErrChainInfoNotFound, "cannot insert fork header when chain info is not initialized") + } if len(chainInfo.LatestForks.Headers) == 0 { // no fork at the moment, add this fork header as the latest one @@ -66,7 +103,7 @@ func (k Keeper) tryToUpdateLatestForkHeader(ctx sdk.Context, chainID string, hea // there exists fork headers at the same height, add this fork header to the set of latest fork headers chainInfo.LatestForks.Headers = append(chainInfo.LatestForks.Headers, header) } else if chainInfo.LatestForks.Headers[0].Height < header.Height { - // this fork header is newer than the previous one, add this fork header as the latest one + // this fork header is newer than the previous one, replace the old fork headers with this fork header chainInfo.LatestForks = &types.Forks{ Headers: []*types.IndexedHeader{header}, } diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go index 6d1d550a0..f9d92e628 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer.go @@ -1,6 +1,7 @@ package keeper import ( + bbn "github.com/babylonchain/babylon/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" @@ -19,12 +20,85 @@ func (k Keeper) GetEpochChainInfo(ctx sdk.Context, chainID string, epochNumber u return &chainInfo, nil } +// GetLastFinalizedChainInfo gets the last finalised chain info recorded for a given chain ID +// and the earliest epoch that snapshots this chain info +func (k Keeper) GetLastFinalizedChainInfo(ctx sdk.Context, chainID string) (uint64, *types.ChainInfo, error) { + // find the last finalised epoch + finalizedEpoch, err := k.GetFinalizedEpoch(ctx) + if err != nil { + return 0, nil, err + } + + // find the chain info of this epoch + chainInfo, err := k.GetEpochChainInfo(ctx, chainID, finalizedEpoch) + if err != nil { + return finalizedEpoch, nil, err + } + + // It's possible that the chain info's epoch is way before the last finalised epoch + // e.g., when there is no relayer for many epochs + // NOTE: if an epoch is finalised then all of its previous epochs are also finalised + if chainInfo.LatestHeader.BabylonEpoch < finalizedEpoch { + // remember the last finalised epoch + finalizedEpoch = chainInfo.LatestHeader.BabylonEpoch + // replace the chain info w.r.t. this last finalised epoch + chainInfo, err = k.GetEpochChainInfo(ctx, chainID, finalizedEpoch) + if err != nil { + return finalizedEpoch, nil, err + } + } + + return finalizedEpoch, chainInfo, nil +} + +// GetEpochHeaders gets the headers timestamped in a given epoch, in the ascending order +func (k Keeper) GetEpochHeaders(ctx sdk.Context, chainID string, epochNumber uint64) ([]*types.IndexedHeader, error) { + headers := []*types.IndexedHeader{} + + // find the last timestamped header of this chain in the epoch + epochChainInfo, err := k.GetEpochChainInfo(ctx, chainID, epochNumber) + if err != nil { + return nil, err + } + // it's possible that this epoch's snapshot is not updated for many epochs + // this implies that this epoch does not timestamp any header for this chain at all + if epochChainInfo.LatestHeader.BabylonEpoch < epochNumber { + return nil, types.ErrEpochHeadersNotFound + } + // now we have the last header in this epoch + headers = append(headers, epochChainInfo.LatestHeader) + + // append all previous headers until reaching the previous epoch + canonicalChainStore := k.canonicalChainStore(ctx, chainID) + lastHeaderKey := sdk.Uint64ToBigEndian(epochChainInfo.LatestHeader.Height) + // NOTE: even in ReverseIterator, start and end should still be specified in ascending order + canonicalChainIter := canonicalChainStore.ReverseIterator(nil, lastHeaderKey) + defer canonicalChainIter.Close() + for ; canonicalChainIter.Valid(); canonicalChainIter.Next() { + var prevHeader types.IndexedHeader + k.cdc.MustUnmarshal(canonicalChainIter.Value(), &prevHeader) + if prevHeader.BabylonEpoch < epochNumber { + // we have reached the previous epoch, break the loop + break + } + headers = append(headers, &prevHeader) + } + + // reverse the list so that it remains ascending order + bbn.Reverse(headers) + + return headers, nil +} + // recordEpochChainInfo records the chain info for a given epoch number of given chain ID // where the latest chain info is retrieved from the chain info indexer func (k Keeper) recordEpochChainInfo(ctx sdk.Context, chainID string, epochNumber uint64) { // get the latest known chain info - // NOTE: GetChainInfo returns an empty ChainInfo object when the ChainInfo does not exist - chainInfo := k.GetChainInfo(ctx, chainID) + chainInfo, err := k.GetChainInfo(ctx, chainID) + if err != nil { + k.Logger(ctx).Debug("chain info does not exist yet, nothing to record") + return + } // NOTE: we can record epoch chain info without ancestor since IBC connection can be established at any height store := k.epochChainInfoStore(ctx, chainID) store.Set(sdk.Uint64ToBigEndian(epochNumber), k.cdc.MustMarshal(chainInfo)) diff --git a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go index d0c68532e..02f8b0606 100644 --- a/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go +++ b/x/zoneconcierge/keeper/epoch_chain_info_indexer_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/babylonchain/babylon/testutil/datagen" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" "github.com/stretchr/testify/require" ) @@ -14,24 +15,97 @@ func FuzzEpochChainInfoIndexer(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + epochingKeeper := babylonApp.EpochingKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() + // enter a random epoch + epochNum := datagen.RandomInt(10) + for j := uint64(0); j < epochNum; j++ { + epochingKeeper.IncEpoch(ctx) + } + // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) - // simulate the scenario that a random epoch has ended - epochNum := datagen.RandomInt(10) + // end this epoch hooks.AfterEpochEnds(ctx, epochNum) // check if the chain info of this epoch is recorded or not chainInfo, err := zcKeeper.GetEpochChainInfo(ctx, czChain.ChainID, epochNum) require.NoError(t, err) require.Equal(t, numHeaders-1, chainInfo.LatestHeader.Height) + require.Equal(t, numHeaders, chainInfo.TimestampedHeadersCount) require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers))) }) } + +func FuzzGetEpochHeaders(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + epochingKeeper := babylonApp.EpochingKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + numReqs := datagen.RandomInt(5) + 1 + + epochNumList := []uint64{datagen.RandomInt(10) + 1} + nextHeightList := []uint64{0} + numHeadersList := []uint64{} + expectedHeadersMap := map[uint64][]*ibctmtypes.Header{} + numForkHeadersList := []uint64{} + + // we test the scenario of ending an epoch for multiple times, in order to ensure that + // consecutive epoch infos do not affect each other. + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // enter a random epoch + if i == 0 { + for j := uint64(0); j < epochNum; j++ { + epochingKeeper.IncEpoch(ctx) + } + } else { + for j := uint64(0); j < epochNum-epochNumList[i-1]; j++ { + epochingKeeper.IncEpoch(ctx) + } + } + + // generate a random number of headers and fork headers + numHeadersList = append(numHeadersList, datagen.RandomInt(100)+1) + numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(10)+1) + // trigger hooks to append these headers and fork headers + expectedHeaders, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) + expectedHeadersMap[epochNum] = expectedHeaders + // prepare nextHeight for the next request + nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) + + // simulate the scenario that a random epoch has ended + hooks.AfterEpochEnds(ctx, epochNum) + // prepare epochNum for the next request + epochNumList = append(epochNumList, epochNum+datagen.RandomInt(10)+1) + } + + // attest the correctness of epoch info for each tested epoch + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // check if the headers are same as expected + headers, err := zcKeeper.GetEpochHeaders(ctx, czChain.ChainID, epochNum) + require.NoError(t, err) + require.Equal(t, len(expectedHeadersMap[epochNum]), len(headers)) + for j := 0; j < len(expectedHeadersMap[epochNum]); j++ { + require.Equal(t, expectedHeadersMap[epochNum][j].Header.LastCommitHash, headers[j].Hash) + } + } + }) +} diff --git a/x/zoneconcierge/keeper/fork_indexer_test.go b/x/zoneconcierge/keeper/fork_indexer_test.go index bc6158853..b6a8a28f4 100644 --- a/x/zoneconcierge/keeper/fork_indexer_test.go +++ b/x/zoneconcierge/keeper/fork_indexer_test.go @@ -14,7 +14,8 @@ func FuzzForkIndexer(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() @@ -22,7 +23,7 @@ func FuzzForkIndexer(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - _, forkHeaders := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + _, forkHeaders := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) // check if the fork is updated or not forks := zcKeeper.GetForks(ctx, czChain.ChainID, numHeaders-1) @@ -34,7 +35,8 @@ func FuzzForkIndexer(f *testing.F) { } // check if the chain info is updated or not - chainInfo := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + chainInfo, err := zcKeeper.GetChainInfo(ctx, czChain.ChainID) + require.NoError(t, err) require.Equal(t, numForkHeaders, uint64(len(chainInfo.LatestForks.Headers))) for i := range forks.Headers { require.Equal(t, czChain.ChainID, chainInfo.LatestForks.Headers[i].ChainId) diff --git a/x/zoneconcierge/keeper/grpc_query.go b/x/zoneconcierge/keeper/grpc_query.go index 7101248eb..e1aac72ff 100644 --- a/x/zoneconcierge/keeper/grpc_query.go +++ b/x/zoneconcierge/keeper/grpc_query.go @@ -2,12 +2,10 @@ package keeper import ( "context" - "fmt" - btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" - checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -20,9 +18,22 @@ func (k Keeper) ChainList(c context.Context, req *types.QueryChainListRequest) ( } ctx := sdk.UnwrapSDKContext(c) - chainIDs := k.GetAllChainIDs(ctx) - // TODO: pagination for this API - resp := &types.QueryChainListResponse{ChainIds: chainIDs} + + chainIDs := []string{} + store := k.chainInfoStore(ctx) + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + chainID := string(key) + chainIDs = append(chainIDs, chainID) + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + resp := &types.QueryChainListResponse{ + ChainIds: chainIDs, + Pagination: pageRes, + } return resp, nil } @@ -39,12 +50,16 @@ func (k Keeper) ChainInfo(c context.Context, req *types.QueryChainInfoRequest) ( ctx := sdk.UnwrapSDKContext(c) // find the chain info of this epoch - chainInfo := k.GetChainInfo(ctx, req.ChainId) + chainInfo, err := k.GetChainInfo(ctx, req.ChainId) + if err != nil { + return nil, err + } resp := &types.QueryChainInfoResponse{ChainInfo: chainInfo} return resp, nil } -func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedChainInfoRequest) (*types.QueryFinalizedChainInfoResponse, error) { +// Header returns the header and fork headers at a given height +func (k Keeper) Header(c context.Context, req *types.QueryHeaderRequest) (*types.QueryHeaderResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "invalid request") } @@ -55,56 +70,132 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC ctx := sdk.UnwrapSDKContext(c) - // find the last finalised epoch - finalizedEpoch, err := k.GetFinalizedEpoch(ctx) + header, err := k.GetHeader(ctx, req.ChainId, req.Height) if err != nil { return nil, err } + forks := k.GetForks(ctx, req.ChainId, req.Height) + resp := &types.QueryHeaderResponse{ + Header: header, + ForkHeaders: forks, + } - // find the chain info of this epoch - chainInfo, err := k.GetEpochChainInfo(ctx, req.ChainId, finalizedEpoch) + return resp, nil +} + +// EpochChainInfo returns the info of a chain with given ID in a given epoch +func (k Keeper) EpochChainInfo(c context.Context, req *types.QueryEpochChainInfoRequest) (*types.QueryEpochChainInfoResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + + // find the chain info of the given epoch + chainInfo, err := k.GetEpochChainInfo(ctx, req.ChainId, req.EpochNum) if err != nil { return nil, err } + resp := &types.QueryEpochChainInfoResponse{ChainInfo: chainInfo} + return resp, nil +} + +// ListHeaders returns all headers of a chain with given ID, with pagination support +func (k Keeper) ListHeaders(c context.Context, req *types.QueryListHeadersRequest) (*types.QueryListHeadersResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + + headers := []*types.IndexedHeader{} + store := k.canonicalChainStore(ctx, req.ChainId) + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + var header types.IndexedHeader + k.cdc.MustUnmarshal(value, &header) + headers = append(headers, &header) + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } - // It's possible that the chain info's epoch is way before the last finalised epoch - // e.g., when there is no relayer for many epochs - // NOTE: if an epoch is finalisedm then all of its previous epochs are also finalised - if chainInfo.LatestHeader.BabylonEpoch < finalizedEpoch { - finalizedEpoch = chainInfo.LatestHeader.BabylonEpoch + resp := &types.QueryListHeadersResponse{ + Headers: headers, + Pagination: pageRes, } + return resp, nil +} - // find the epoch metadata - epochInfo, err := k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) +// ListEpochHeaders returns all headers of a chain with given ID +// TODO: support pagination in this RPC +func (k Keeper) ListEpochHeaders(c context.Context, req *types.QueryListEpochHeadersRequest) (*types.QueryListEpochHeadersResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + + headers, err := k.GetEpochHeaders(ctx, req.ChainId, req.EpochNum) if err != nil { return nil, err } - // find the btc checkpoint tx index of this epoch - ed := k.btccKeeper.GetEpochData(ctx, finalizedEpoch) - if ed.Status != btcctypes.Finalized { - err := fmt.Errorf("epoch %d should have been finalized, but is in status %s", finalizedEpoch, ed.Status.String()) - panic(err) // this can only be a programming error + resp := &types.QueryListEpochHeadersResponse{ + Headers: headers, + } + return resp, nil +} + +func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedChainInfoRequest) (*types.QueryFinalizedChainInfoResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + resp := &types.QueryFinalizedChainInfoResponse{} + + // find the last finalised chain info and the earliest epoch that snapshots this chain info + finalizedEpoch, chainInfo, err := k.GetLastFinalizedChainInfo(ctx, req.ChainId) + if err != nil { + return nil, err } - if len(ed.Key) == 0 { - err := fmt.Errorf("finalized epoch %d should have at least 1 checkpoint submission", finalizedEpoch) - panic(err) // this can only be a programming error + resp.FinalizedChainInfo = chainInfo + + // find the epoch metadata of the finalised epoch + resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) + if err != nil { + return nil, err } - bestSubmissionKey := ed.Key[0] // index of checkpoint tx on BTC - // get raw checkpoint of this epoch - rawCheckpointBytes := ed.RawCheckpoint - rawCheckpoint, err := checkpointingtypes.FromBTCCkptBytesToRawCkpt(rawCheckpointBytes) + rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) + if err != nil { return nil, err } - resp := &types.QueryFinalizedChainInfoResponse{ - FinalizedChainInfo: chainInfo, - // metadata related to this chain info, including the epoch, the raw checkpoint of this epoch, and the BTC tx index of the raw checkpoint - EpochInfo: epochInfo, - RawCheckpoint: rawCheckpoint, - BtcSubmissionKey: bestSubmissionKey, + resp.RawCheckpoint = rawCheckpoint.Ckpt + + // find the raw checkpoint and the best submission key for the finalised epoch + _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) + if err != nil { + return nil, err } // if the query does not want the proofs, return here @@ -112,32 +203,95 @@ func (k Keeper) FinalizedChainInfo(c context.Context, req *types.QueryFinalizedC return resp, nil } - // Proof that the Babylon tx is in block - resp.ProofTxInBlock, err = k.ProveTxInBlock(ctx, chainInfo.LatestHeader.BabylonTxHash) + // generate all proofs + resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.BtcSubmissionKey) if err != nil { return nil, err } - // proof that the block is in this epoch - resp.ProofHeaderInEpoch, err = k.ProveHeaderInEpoch(ctx, chainInfo.LatestHeader.BabylonHeader, epochInfo) - if err != nil { - return nil, err + return resp, nil +} + +func (k Keeper) FinalizedChainInfoUntilHeight(c context.Context, req *types.QueryFinalizedChainInfoUntilHeightRequest) (*types.QueryFinalizedChainInfoUntilHeightResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") } - // proof that the epoch is sealed - resp.ProofEpochSealed, err = k.ProveEpochSealed(ctx, finalizedEpoch) + if len(req.ChainId) == 0 { + return nil, status.Error(codes.InvalidArgument, "chain ID cannot be empty") + } + + ctx := sdk.UnwrapSDKContext(c) + resp := &types.QueryFinalizedChainInfoUntilHeightResponse{} + + // find and assign the last finalised chain info and the earliest epoch that snapshots this chain info + finalizedEpoch, chainInfo, err := k.GetLastFinalizedChainInfo(ctx, req.ChainId) if err != nil { return nil, err } + resp.FinalizedChainInfo = chainInfo + + if chainInfo.LatestHeader.Height <= req.Height { // the requested height is after the last finalised chain info + // find and assign the epoch metadata of the finalised epoch + resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) + if err != nil { + return nil, err + } + + rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) + + if err != nil { + return nil, err + } + + resp.RawCheckpoint = rawCheckpoint.Ckpt + + // find and assign the raw checkpoint and the best submission key for the finalised epoch + _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) + if err != nil { + return nil, err + } + } else { // the requested height is before the last finalised chain info + // starting from the requested height, iterate backward until a timestamped header + closestHeader, err := k.FindClosestHeader(ctx, req.ChainId, req.Height) + if err != nil { + return nil, err + } + // assign the finalizedEpoch, and retrieve epoch info, raw ckpt and submission key + finalizedEpoch = closestHeader.BabylonEpoch + chainInfo, err = k.GetEpochChainInfo(ctx, req.ChainId, finalizedEpoch) + if err != nil { + return nil, err + } + resp.FinalizedChainInfo = chainInfo + resp.EpochInfo, err = k.epochingKeeper.GetHistoricalEpoch(ctx, finalizedEpoch) + if err != nil { + return nil, err + } + + rawCheckpoint, err := k.checkpointingKeeper.GetRawCheckpoint(ctx, finalizedEpoch) + + if err != nil { + return nil, err + } + + resp.RawCheckpoint = rawCheckpoint.Ckpt + + _, resp.BtcSubmissionKey, err = k.btccKeeper.GetBestSubmission(ctx, finalizedEpoch) + if err != nil { + return nil, err + } + } - // proof that the epoch's checkpoint is submitted to BTC - // i.e., the two `TransactionInfo`s for the checkpoint - resp.ProofEpochSubmitted, err = k.ProveEpochSubmitted(ctx, *bestSubmissionKey) + // if the query does not want the proofs, return here + if !req.Prove { + return resp, nil + } + + // generate all proofs + resp.Proof, err = k.proveFinalizedChainInfo(ctx, chainInfo, resp.EpochInfo, resp.BtcSubmissionKey) if err != nil { - // The only error in ProveEpochSubmitted is the nil bestSubmission. - // Since the epoch w.r.t. the bestSubmissionKey is finalised, this - // can only be a programming error, so we should panic here. - panic(err) + return nil, err } return resp, nil diff --git a/x/zoneconcierge/keeper/grpc_query_test.go b/x/zoneconcierge/keeper/grpc_query_test.go index 16584ad1a..cb910ebfa 100644 --- a/x/zoneconcierge/keeper/grpc_query_test.go +++ b/x/zoneconcierge/keeper/grpc_query_test.go @@ -2,7 +2,6 @@ package keeper_test import ( "math/rand" - "sort" "testing" "github.com/babylonchain/babylon/testutil/datagen" @@ -10,6 +9,8 @@ import ( btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" zctypes "github.com/babylonchain/babylon/x/zoneconcierge/types" + "github.com/cosmos/cosmos-sdk/types/query" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" @@ -18,43 +19,48 @@ import ( ) func FuzzChainList(f *testing.F) { - datagen.AddRandomSeedsToFuzzer(f, 100) + datagen.AddRandomSeedsToFuzzer(f, 10) f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, _, zcKeeper := SetupTest(t) + _, babylonChain, _, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() // invoke the hook a random number of times with random chain IDs - numHeaders := datagen.RandomInt(100) - expectedChainIDs := []string{} + numHeaders := datagen.RandomInt(100) + 1 + allChainIDs := []string{} for i := uint64(0); i < numHeaders; i++ { var chainID string // simulate the scenario that some headers belong to the same chain if i > 0 && datagen.OneInN(2) { - chainID = expectedChainIDs[rand.Intn(len(expectedChainIDs))] + chainID = allChainIDs[rand.Intn(len(allChainIDs))] } else { chainID = datagen.GenRandomHexStr(30) - expectedChainIDs = append(expectedChainIDs, chainID) + allChainIDs = append(allChainIDs, chainID) } header := datagen.GenRandomIBCTMHeader(chainID, 0) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, false) } + limit := datagen.RandomInt(len(allChainIDs)) + 1 + // make query to get actual chain IDs - resp, err := zcKeeper.ChainList(ctx, &zctypes.QueryChainListRequest{}) + resp, err := zcKeeper.ChainList(ctx, &zctypes.QueryChainListRequest{ + Pagination: &query.PageRequest{ + Limit: limit, + }, + }) require.NoError(t, err) actualChainIDs := resp.ChainIds - // sort them and assert equality - sort.Strings(expectedChainIDs) - sort.Strings(actualChainIDs) - require.Equal(t, len(expectedChainIDs), len(actualChainIDs)) - for i := 0; i < len(expectedChainIDs); i++ { - require.Equal(t, expectedChainIDs[i], actualChainIDs[i]) + require.Equal(t, limit, uint64(len(actualChainIDs))) + allChainIDs = zcKeeper.GetAllChainIDs(ctx) + for i := uint64(0); i < limit; i++ { + require.Equal(t, allChainIDs[i], actualChainIDs[i]) } }) } @@ -65,7 +71,8 @@ func FuzzChainInfo(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, czChain, zcKeeper := SetupTest(t) + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper ctx := babylonChain.GetContext() hooks := zcKeeper.Hooks() @@ -73,7 +80,7 @@ func FuzzChainInfo(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, numHeaders, numForkHeaders) + SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) // check if the chain info of is recorded or not resp, err := zcKeeper.ChainInfo(ctx, &zctypes.QueryChainInfoRequest{ChainId: czChain.ChainID}) @@ -84,6 +91,194 @@ func FuzzChainInfo(f *testing.F) { }) } +func FuzzHeader(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + // invoke the hook a random number of times to simulate a random number of blocks + numHeaders := datagen.RandomInt(100) + 2 + numForkHeaders := datagen.RandomInt(10) + 1 + headers, forkHeaders := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) + + // find header at a random height and assert correctness against the expected header + randomHeight := datagen.RandomInt(int(numHeaders - 1)) + resp, err := zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ChainId: czChain.ChainID, Height: randomHeight}) + require.NoError(t, err) + require.Equal(t, headers[randomHeight].Header.LastCommitHash, resp.Header.Hash) + require.Len(t, resp.ForkHeaders.Headers, 0) + + // find the last header and fork headers then assert correctness + resp, err = zcKeeper.Header(ctx, &zctypes.QueryHeaderRequest{ChainId: czChain.ChainID, Height: numHeaders - 1}) + require.NoError(t, err) + require.Equal(t, headers[numHeaders-1].Header.LastCommitHash, resp.Header.Hash) + require.Len(t, resp.ForkHeaders.Headers, int(numForkHeaders)) + for i := 0; i < int(numForkHeaders); i++ { + require.Equal(t, forkHeaders[i].Header.LastCommitHash, resp.ForkHeaders.Headers[i].Hash) + } + }) +} + +func FuzzEpochChainInfo(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + numReqs := datagen.RandomInt(5) + 1 + + epochNumList := []uint64{datagen.RandomInt(10) + 1} + nextHeightList := []uint64{0} + numHeadersList := []uint64{} + numForkHeadersList := []uint64{} + + // we test the scenario of ending an epoch for multiple times, in order to ensure that + // consecutive epoch infos do not affect each other. + for i := uint64(0); i < numReqs; i++ { + // generate a random number of headers and fork headers + numHeadersList = append(numHeadersList, datagen.RandomInt(100)+1) + numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(10)+1) + // trigger hooks to append these headers and fork headers + SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) + // prepare nextHeight for the next request + nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) + + // simulate the scenario that a random epoch has ended + hooks.AfterEpochEnds(ctx, epochNumList[i]) + // prepare epochNum for the next request + epochNumList = append(epochNumList, epochNumList[i]+datagen.RandomInt(10)+1) + } + + // attest the correctness of epoch info for each tested epoch + for i := uint64(0); i < numReqs; i++ { + resp, err := zcKeeper.EpochChainInfo(ctx, &zctypes.QueryEpochChainInfoRequest{EpochNum: epochNumList[i], ChainId: czChain.ChainID}) + require.NoError(t, err) + chainInfo := resp.ChainInfo + require.Equal(t, nextHeightList[i+1]-1, chainInfo.LatestHeader.Height) + require.Equal(t, numForkHeadersList[i], uint64(len(chainInfo.LatestForks.Headers))) + } + }) +} + +func FuzzListHeaders(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + // invoke the hook a random number of times to simulate a random number of blocks + numHeaders := datagen.RandomInt(100) + 1 + numForkHeaders := datagen.RandomInt(10) + 1 + headers, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, 0, numHeaders, numForkHeaders) + + // a request with randomised pagination + limit := datagen.RandomInt(int(numHeaders)) + 1 + req := &zctypes.QueryListHeadersRequest{ + ChainId: czChain.ChainID, + Pagination: &query.PageRequest{ + Limit: limit, + }, + } + resp, err := zcKeeper.ListHeaders(ctx, req) + require.NoError(t, err) + require.Equal(t, int(limit), len(resp.Headers)) + for i := uint64(0); i < limit; i++ { + require.Equal(t, headers[i].Header.LastCommitHash, resp.Headers[i].Hash) + } + }) +} + +func FuzzListEpochHeaders(f *testing.F) { + datagen.AddRandomSeedsToFuzzer(f, 10) + + f.Fuzz(func(t *testing.T, seed int64) { + rand.Seed(seed) + + _, babylonChain, czChain, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + epochingKeeper := babylonApp.EpochingKeeper + + ctx := babylonChain.GetContext() + hooks := zcKeeper.Hooks() + + numReqs := datagen.RandomInt(5) + 1 + + epochNumList := []uint64{datagen.RandomInt(10) + 1} + nextHeightList := []uint64{0} + numHeadersList := []uint64{} + expectedHeadersMap := map[uint64][]*ibctmtypes.Header{} + numForkHeadersList := []uint64{} + + // we test the scenario of ending an epoch for multiple times, in order to ensure that + // consecutive epoch infos do not affect each other. + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // enter a random epoch + if i == 0 { + for j := uint64(0); j < epochNum; j++ { + epochingKeeper.IncEpoch(ctx) + } + } else { + for j := uint64(0); j < epochNum-epochNumList[i-1]; j++ { + epochingKeeper.IncEpoch(ctx) + } + } + + // generate a random number of headers and fork headers + numHeadersList = append(numHeadersList, datagen.RandomInt(100)+1) + numForkHeadersList = append(numForkHeadersList, datagen.RandomInt(10)+1) + // trigger hooks to append these headers and fork headers + expectedHeaders, _ := SimulateHeadersAndForksViaHook(ctx, hooks, czChain.ChainID, nextHeightList[i], numHeadersList[i], numForkHeadersList[i]) + expectedHeadersMap[epochNum] = expectedHeaders + // prepare nextHeight for the next request + nextHeightList = append(nextHeightList, nextHeightList[i]+numHeadersList[i]) + + // simulate the scenario that a random epoch has ended + hooks.AfterEpochEnds(ctx, epochNum) + // prepare epochNum for the next request + epochNumList = append(epochNumList, epochNum+datagen.RandomInt(10)+1) + } + + // attest the correctness of epoch info for each tested epoch + for i := uint64(0); i < numReqs; i++ { + epochNum := epochNumList[i] + // make request + req := &zctypes.QueryListEpochHeadersRequest{ + ChainId: czChain.ChainID, + EpochNum: epochNum, + } + resp, err := zcKeeper.ListEpochHeaders(ctx, req) + require.NoError(t, err) + + // check if the headers are same as expected + headers := resp.Headers + require.Equal(t, len(expectedHeadersMap[epochNum]), len(headers)) + for j := 0; j < len(expectedHeadersMap[epochNum]); j++ { + require.Equal(t, expectedHeadersMap[epochNum][j].Header.LastCommitHash, headers[j].Hash) + } + } + }) +} + func FuzzFinalizedChainInfo(f *testing.F) { datagen.AddRandomSeedsToFuzzer(f, 10) @@ -105,15 +300,21 @@ func FuzzFinalizedChainInfo(f *testing.F) { checkpointingKeeper.EXPECT().GetBLSPubKeySet(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return([]*checkpointingtypes.ValidatorWithBlsKey{}, nil).AnyTimes() // mock btccheckpoint keeper // TODO: test with BTCSpvProofs + randomRawCkpt := datagen.GenRandomRawCheckpoint() + randomRawCkpt.EpochNum = epoch.EpochNumber btccKeeper := zctypes.NewMockBtcCheckpointKeeper(ctrl) - mockEpochData := &btcctypes.EpochData{ - Key: []*btcctypes.SubmissionKey{ - {Key: []*btcctypes.TransactionKey{}}, + checkpointingKeeper.EXPECT().GetRawCheckpoint(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( + &checkpointingtypes.RawCheckpointWithMeta{ + Ckpt: randomRawCkpt, + }, nil, + ).AnyTimes() + btccKeeper.EXPECT().GetBestSubmission(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return( + btcctypes.Finalized, + &btcctypes.SubmissionKey{ + Key: []*btcctypes.TransactionKey{}, }, - Status: btcctypes.Finalized, - RawCheckpoint: datagen.RandomRawCheckpointDataForEpoch(epoch.EpochNumber).ExpectedOpReturn, - } - btccKeeper.EXPECT().GetEpochData(gomock.Any(), gomock.Eq(epoch.EpochNumber)).Return(mockEpochData).AnyTimes() + nil, + ).AnyTimes() mockSubmissionData := &btcctypes.SubmissionData{TxsInfo: []*btcctypes.TransactionInfo{}} btccKeeper.EXPECT().GetSubmissionData(gomock.Any(), gomock.Any()).Return(mockSubmissionData).AnyTimes() // mock epoching keeper @@ -136,7 +337,7 @@ func FuzzFinalizedChainInfo(f *testing.F) { // invoke the hook a random number of times to simulate a random number of blocks numHeaders := datagen.RandomInt(100) + 1 numForkHeaders := datagen.RandomInt(10) + 1 - SimulateHeadersAndForksViaHook(ctx, hooks, czChainID, numHeaders, numForkHeaders) + SimulateHeadersAndForksViaHook(ctx, hooks, czChainID, 0, numHeaders, numForkHeaders) hooks.AfterEpochEnds(ctx, epoch.EpochNumber) err := hooks.AfterRawCheckpointFinalized(ctx, epoch.EpochNumber) diff --git a/x/zoneconcierge/keeper/hooks.go b/x/zoneconcierge/keeper/hooks.go index af8dfa8cd..d3e931377 100644 --- a/x/zoneconcierge/keeper/hooks.go +++ b/x/zoneconcierge/keeper/hooks.go @@ -1,10 +1,13 @@ package keeper import ( + "fmt" + checkpointingtypes "github.com/babylonchain/babylon/x/checkpointing/types" epochingtypes "github.com/babylonchain/babylon/x/epoching/types" "github.com/babylonchain/babylon/x/zoneconcierge/types" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ibcclientkeeper "github.com/cosmos/ibc-go/v5/modules/core/02-client/keeper" ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" ) @@ -31,6 +34,21 @@ func (h Hooks) AfterHeaderWithValidCommit(ctx sdk.Context, txHash []byte, header BabylonEpoch: h.k.GetEpoch(ctx).EpochNumber, BabylonTxHash: txHash, } + + // initialise chain info if not exist + chainInfo, err := h.k.GetChainInfo(ctx, indexedHeader.ChainId) + if err != nil { + if sdkerrors.IsOf(err, types.ErrEpochChainInfoNotFound) { + // chain info does not exist yet, initialise chain info for this chain + chainInfo, err = h.k.InitChainInfo(ctx, indexedHeader.ChainId) + if err != nil { + panic(fmt.Errorf("failed to initialize chain info of %s: %w", indexedHeader.ChainId, err)) + } + } else { + panic(fmt.Errorf("failed to get chain info of %s: %w", indexedHeader.ChainId, err)) + } + } + if isOnFork { // insert header to fork index if err := h.k.insertForkHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { @@ -41,12 +59,19 @@ func (h Hooks) AfterHeaderWithValidCommit(ctx sdk.Context, txHash []byte, header panic(err) } } else { + // ensure the header is the latest one, otherwise ignore it + // NOTE: while an old header is considered acceptable in IBC-Go (see Case_valid_past_update), but + // ZoneConcierge should not checkpoint it since Babylon requires monotonic checkpointing + if !chainInfo.IsLatestHeader(&indexedHeader) { + return + } + // insert header to canonical chain index if err := h.k.insertHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { panic(err) } // update the latest canonical header in chain info - if err := h.k.tryToUpdateLatestHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { + if err := h.k.updateLatestHeader(ctx, indexedHeader.ChainId, &indexedHeader); err != nil { panic(err) } } @@ -69,7 +94,14 @@ func (h Hooks) AfterRawCheckpointFinalized(ctx sdk.Context, epoch uint64) error // Other unused hooks -func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { return nil } -func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } +func (h Hooks) AfterBlsKeyRegistered(ctx sdk.Context, valAddr sdk.ValAddress) error { return nil } +func (h Hooks) AfterRawCheckpointConfirmed(ctx sdk.Context, epoch uint64) error { return nil } + +func (h Hooks) AfterRawCheckpointForgotten(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} +func (h Hooks) AfterRawCheckpointBlsSigVerified(ctx sdk.Context, ckpt *checkpointingtypes.RawCheckpoint) error { + return nil +} func (h Hooks) AfterEpochBegins(ctx sdk.Context, epoch uint64) {} func (h Hooks) BeforeSlashThreshold(ctx sdk.Context, valSet epochingtypes.ValidatorSet) {} diff --git a/x/zoneconcierge/keeper/keeper_test.go b/x/zoneconcierge/keeper/keeper_test.go index 937754427..12069d90f 100644 --- a/x/zoneconcierge/keeper/keeper_test.go +++ b/x/zoneconcierge/keeper/keeper_test.go @@ -13,13 +13,13 @@ import ( ) // SetupTest creates a coordinator with 2 test chains, and a ZoneConcierge keeper. -func SetupTest(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *ibctesting.TestChain, zckeeper.Keeper) { - var zcKeeper zckeeper.Keeper +func SetupTest(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *ibctesting.TestChain, *app.BabylonApp) { + var bbnApp *app.BabylonApp coordinator := ibctesting.NewCoordinator(t, 2) // replace the first test chain with a Babylon chain ibctesting.DefaultTestingAppInit = func() (ibctesting.TestingApp, map[string]json.RawMessage) { babylonApp := app.Setup(t, false) - zcKeeper = babylonApp.ZoneConciergeKeeper + bbnApp = babylonApp encCdc := app.MakeTestEncodingConfig() genesis := app.NewDefaultGenesisState(encCdc.Marshaler) return babylonApp, genesis @@ -30,15 +30,15 @@ func SetupTest(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *i babylonChain := coordinator.GetChain(ibctesting.GetChainID(1)) czChain := coordinator.GetChain(ibctesting.GetChainID(2)) - return coordinator, babylonChain, czChain, zcKeeper + return coordinator, babylonChain, czChain, bbnApp } // SimulateHeadersViaHook generates a non-zero number of canonical headers via the hook -func SimulateHeadersViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, numHeaders uint64) []*ibctmtypes.Header { +func SimulateHeadersViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, startHeight uint64, numHeaders uint64) []*ibctmtypes.Header { headers := []*ibctmtypes.Header{} // invoke the hook a number of times to simulate a number of blocks for i := uint64(0); i < numHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(chainID, i) + header := datagen.GenRandomIBCTMHeader(chainID, startHeight+i) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, false) headers = append(headers, header) } @@ -46,11 +46,11 @@ func SimulateHeadersViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID strin } // SimulateHeadersViaHook generates a random non-zero number of canonical headers and fork headers via the hook -func SimulateHeadersAndForksViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, numHeaders uint64, numForkHeaders uint64) ([]*ibctmtypes.Header, []*ibctmtypes.Header) { +func SimulateHeadersAndForksViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chainID string, startHeight uint64, numHeaders uint64, numForkHeaders uint64) ([]*ibctmtypes.Header, []*ibctmtypes.Header) { headers := []*ibctmtypes.Header{} // invoke the hook a number of times to simulate a number of blocks for i := uint64(0); i < numHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(chainID, i) + header := datagen.GenRandomIBCTMHeader(chainID, startHeight+i) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, false) headers = append(headers, header) } @@ -58,7 +58,7 @@ func SimulateHeadersAndForksViaHook(ctx sdk.Context, hooks zckeeper.Hooks, chain // generate a number of fork headers forkHeaders := []*ibctmtypes.Header{} for i := uint64(0); i < numForkHeaders; i++ { - header := datagen.GenRandomIBCTMHeader(chainID, numHeaders-1) + header := datagen.GenRandomIBCTMHeader(chainID, startHeight+numHeaders-1) hooks.AfterHeaderWithValidCommit(ctx, datagen.GenRandomByteArray(32), header, true) forkHeaders = append(forkHeaders, header) } diff --git a/x/zoneconcierge/keeper/proof_epoch_sealed_test.go b/x/zoneconcierge/keeper/proof_epoch_sealed_test.go index a3ef1038b..a357bff78 100644 --- a/x/zoneconcierge/keeper/proof_epoch_sealed_test.go +++ b/x/zoneconcierge/keeper/proof_epoch_sealed_test.go @@ -90,12 +90,12 @@ func FuzzProofEpochSealed_BLSSig(f *testing.F) { // verify err = zckeeper.VerifyEpochSealed(epoch, rawCkpt, proof) - if numSubSet <= numVals*1/3 { // BLS sig does not reach a quorum - require.LessOrEqual(t, subsetPower, uint64(numVals*1/3)) + if subsetPower <= valSet.GetTotalPower()*1/3 { // BLS sig does not reach a quorum + require.LessOrEqual(t, numSubSet, numVals*1/3) require.Error(t, err) require.NotErrorIs(t, err, zctypes.ErrInvalidMerkleProof) } else { // BLS sig has a valid quorum - require.Greater(t, subsetPower, valSet.GetTotalPower()*1/3) + require.Greater(t, numSubSet, numVals*1/3) require.Error(t, err) require.ErrorIs(t, err, zctypes.ErrInvalidMerkleProof) } diff --git a/x/zoneconcierge/keeper/proof_epoch_submitted.go b/x/zoneconcierge/keeper/proof_epoch_submitted.go index 54efdd81f..758f2fc3e 100644 --- a/x/zoneconcierge/keeper/proof_epoch_submitted.go +++ b/x/zoneconcierge/keeper/proof_epoch_submitted.go @@ -14,8 +14,8 @@ import ( // ProveEpochSubmitted generates proof that the epoch's checkpoint is submitted to BTC // i.e., the two `TransactionInfo`s for the checkpoint -func (k Keeper) ProveEpochSubmitted(ctx sdk.Context, sk btcctypes.SubmissionKey) ([]*btcctypes.TransactionInfo, error) { - bestSubmissionData := k.btccKeeper.GetSubmissionData(ctx, sk) +func (k Keeper) ProveEpochSubmitted(ctx sdk.Context, sk *btcctypes.SubmissionKey) ([]*btcctypes.TransactionInfo, error) { + bestSubmissionData := k.btccKeeper.GetSubmissionData(ctx, *sk) if bestSubmissionData == nil { return nil, fmt.Errorf("the best submission key for epoch %d has no submission data", bestSubmissionData.Epoch) } diff --git a/x/zoneconcierge/keeper/proof_finalized_chain_info.go b/x/zoneconcierge/keeper/proof_finalized_chain_info.go new file mode 100644 index 000000000..f8d99fc1c --- /dev/null +++ b/x/zoneconcierge/keeper/proof_finalized_chain_info.go @@ -0,0 +1,58 @@ +package keeper + +import ( + btcctypes "github.com/babylonchain/babylon/x/btccheckpoint/types" + epochingtypes "github.com/babylonchain/babylon/x/epoching/types" + "github.com/babylonchain/babylon/x/zoneconcierge/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// proveFinalizedChainInfo generates proofs that a chainInfo has been finalised by the given epoch with epochInfo +// It includes proofTxInBlock, proofHeaderInEpoch, proofEpochSealed and proofEpochSubmitted +// The proofs can be verified by a verifier with access to a BTC and Babylon light client +// CONTRACT: this is only a private helper function for simplifying the implementation of RPC calls +func (k Keeper) proveFinalizedChainInfo( + ctx sdk.Context, + chainInfo *types.ChainInfo, + epochInfo *epochingtypes.Epoch, + bestSubmissionKey *btcctypes.SubmissionKey, +) (*types.ProofFinalizedChainInfo, error) { + var ( + err error + proof = &types.ProofFinalizedChainInfo{} + ) + + // Proof that the Babylon tx is in block + proof.ProofTxInBlock, err = k.ProveTxInBlock(ctx, chainInfo.LatestHeader.BabylonTxHash) + if err != nil { + return nil, err + } + + // proof that the block is in this epoch + proof.ProofHeaderInEpoch, err = k.ProveHeaderInEpoch(ctx, chainInfo.LatestHeader.BabylonHeader, epochInfo) + if err != nil { + return nil, err + } + + // proof that the epoch is sealed + proof.ProofEpochSealed, err = k.ProveEpochSealed(ctx, epochInfo.EpochNumber) + if err != nil { + return nil, err + } + + // proof that the epoch's checkpoint is submitted to BTC + // i.e., the two `TransactionInfo`s for the checkpoint + proof.ProofEpochSubmitted, err = k.ProveEpochSubmitted(ctx, bestSubmissionKey) + if err != nil { + // The only error in ProveEpochSubmitted is the nil bestSubmission. + // Since the epoch w.r.t. the bestSubmissionKey is finalised, this + // can only be a programming error, so we should panic here. + panic(err) + } + + return proof, nil +} + +// TODO: implement a standalone verifier VerifyFinalizedChainInfo that +// verifies whether a chainInfo is finalised or not, with access to +// Bitcoin and Babylon light clients diff --git a/x/zoneconcierge/keeper/proof_tx_in_block_test.go b/x/zoneconcierge/keeper/proof_tx_in_block_test.go index 92e91e7a1..11944081a 100644 --- a/x/zoneconcierge/keeper/proof_tx_in_block_test.go +++ b/x/zoneconcierge/keeper/proof_tx_in_block_test.go @@ -31,7 +31,9 @@ func TestProveTxInBlock(t *testing.T) { err = testNetwork.WaitForNextBlock() require.NoError(t, err) - _, babylonChain, _, zcKeeper := SetupTest(t) + _, babylonChain, _, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + ctx := babylonChain.GetContext() // construct client context diff --git a/x/zoneconcierge/keeper/query_kvstore_test.go b/x/zoneconcierge/keeper/query_kvstore_test.go index aaa730ebe..06f619831 100644 --- a/x/zoneconcierge/keeper/query_kvstore_test.go +++ b/x/zoneconcierge/keeper/query_kvstore_test.go @@ -17,7 +17,9 @@ func FuzzQueryStore(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { rand.Seed(seed) - _, babylonChain, _, zcKeeper := SetupTest(t) + _, babylonChain, _, babylonApp := SetupTest(t) + zcKeeper := babylonApp.ZoneConciergeKeeper + babylonChain.NextBlock() babylonChain.NextBlock() diff --git a/x/zoneconcierge/module_test.go b/x/zoneconcierge/module_test.go index fb0829f5f..8289001a0 100644 --- a/x/zoneconcierge/module_test.go +++ b/x/zoneconcierge/module_test.go @@ -331,18 +331,26 @@ func (suite *ZoneConciergeTestSuite) TestUpdateClientTendermint() { ctx := suite.babylonChain.GetContext() czChainID := suite.czChain.ChainID updateHeaderHeight := uint64(updateHeader.Header.Height) - // updateHeader should be correctly recorded in canonical chain indexer - expUpdateHeader, err := suite.zcKeeper.GetHeader(ctx, czChainID, updateHeaderHeight) - suite.Require().NoError(err) - suite.Require().Equal(expUpdateHeader.Hash, updateHeader.Header.LastCommitHash) - suite.Require().Equal(expUpdateHeader.Height, updateHeaderHeight) + // updateHeader should be correctly recorded in chain info indexer if tc.name != "valid past update" { // we exclude the case of past update since chain info indexer does not record past update - chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) + // updateHeader should be correctly recorded in canonical chain indexer + expUpdateHeader, err := suite.zcKeeper.GetHeader(ctx, czChainID, updateHeaderHeight) + suite.Require().NoError(err) + suite.Require().Equal(expUpdateHeader.Hash, updateHeader.Header.LastCommitHash) + suite.Require().Equal(expUpdateHeader.Height, updateHeaderHeight) + // updateHeader should be correctly recorded in chain info indexer + chainInfo, err := suite.zcKeeper.GetChainInfo(ctx, czChainID) + suite.Require().NoError(err) suite.Require().Equal(chainInfo.LatestHeader.Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestHeader.Height, updateHeaderHeight) - } else { // in the test case where Babylon receives a past CZ header, the latest header should be the last header - chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) + } else { + // there should be no header in updateHeaderHeight + _, err := suite.zcKeeper.GetHeader(ctx, czChainID, updateHeaderHeight) + suite.Require().Error(err) + // the latest header in chain info indexer should be the last header + chainInfo, err := suite.zcKeeper.GetChainInfo(ctx, czChainID) + suite.Require().NoError(err) suite.Require().Equal(chainInfo.LatestHeader.Hash, suite.czChain.LastHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestHeader.Height, uint64(suite.czChain.LastHeader.Header.Height)) } @@ -358,7 +366,8 @@ func (suite *ZoneConciergeTestSuite) TestUpdateClientTendermint() { suite.Require().Equal(expForks.Headers[0].Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(expForks.Headers[0].Height, updateHeaderHeight) // updateHeader should be correctly recorded in chain info indexer - chainInfo := suite.zcKeeper.GetChainInfo(ctx, czChainID) + chainInfo, err := suite.zcKeeper.GetChainInfo(ctx, czChainID) + suite.Require().NoError(err) suite.Require().Equal(1, len(chainInfo.LatestForks.Headers)) suite.Require().Equal(chainInfo.LatestForks.Headers[0].Hash, updateHeader.Header.LastCommitHash) suite.Require().Equal(chainInfo.LatestForks.Headers[0].Height, updateHeaderHeight) diff --git a/x/zoneconcierge/types/errors.go b/x/zoneconcierge/types/errors.go index 71cba5771..483ae213b 100644 --- a/x/zoneconcierge/types/errors.go +++ b/x/zoneconcierge/types/errors.go @@ -16,8 +16,11 @@ var ( ErrNoValidAncestorHeader = sdkerrors.Register(ModuleName, 1105, "no valid ancestor for this header") ErrForkNotFound = sdkerrors.Register(ModuleName, 1106, "cannot find fork") ErrInvalidForks = sdkerrors.Register(ModuleName, 1107, "input forks is invalid") - ErrEpochChainInfoNotFound = sdkerrors.Register(ModuleName, 1108, "no chain info exists at this epoch") - ErrFinalizedEpochNotFound = sdkerrors.Register(ModuleName, 1109, "cannot find a finalized epoch") - ErrInvalidProofEpochSealed = sdkerrors.Register(ModuleName, 1110, "invalid ProofEpochSealed") - ErrInvalidMerkleProof = sdkerrors.Register(ModuleName, 1111, "invalid Merkle inclusion proof") + ErrChainInfoNotFound = sdkerrors.Register(ModuleName, 1108, "no chain info exists") + ErrEpochChainInfoNotFound = sdkerrors.Register(ModuleName, 1109, "no chain info exists at this epoch") + ErrEpochHeadersNotFound = sdkerrors.Register(ModuleName, 1110, "no timestamped header exists at this epoch") + ErrFinalizedEpochNotFound = sdkerrors.Register(ModuleName, 1111, "cannot find a finalized epoch") + ErrInvalidProofEpochSealed = sdkerrors.Register(ModuleName, 1112, "invalid ProofEpochSealed") + ErrInvalidMerkleProof = sdkerrors.Register(ModuleName, 1113, "invalid Merkle inclusion proof") + ErrInvalidChainInfo = sdkerrors.Register(ModuleName, 1114, "invalid chain info") ) diff --git a/x/zoneconcierge/types/expected_keepers.go b/x/zoneconcierge/types/expected_keepers.go index 8317fefe7..190f1daaf 100644 --- a/x/zoneconcierge/types/expected_keepers.go +++ b/x/zoneconcierge/types/expected_keepers.go @@ -68,12 +68,13 @@ type ScopedKeeper interface { } type BtcCheckpointKeeper interface { - GetEpochData(ctx sdk.Context, e uint64) *btcctypes.EpochData + GetBestSubmission(ctx sdk.Context, e uint64) (btcctypes.BtcStatus, *btcctypes.SubmissionKey, error) GetSubmissionData(ctx sdk.Context, sk btcctypes.SubmissionKey) *btcctypes.SubmissionData } type CheckpointingKeeper interface { GetBLSPubKeySet(ctx sdk.Context, epochNumber uint64) ([]*checkpointingtypes.ValidatorWithBlsKey, error) + GetRawCheckpoint(ctx sdk.Context, epochNumber uint64) (*checkpointingtypes.RawCheckpointWithMeta, error) } type EpochingKeeper interface { diff --git a/x/zoneconcierge/types/mocked_keepers.go b/x/zoneconcierge/types/mocked_keepers.go index 8126d2ba7..d58c82a20 100644 --- a/x/zoneconcierge/types/mocked_keepers.go +++ b/x/zoneconcierge/types/mocked_keepers.go @@ -502,18 +502,20 @@ func (m *MockBtcCheckpointKeeper) EXPECT() *MockBtcCheckpointKeeperMockRecorder return m.recorder } -// GetEpochData mocks base method. -func (m *MockBtcCheckpointKeeper) GetEpochData(ctx types2.Context, e uint64) *types.EpochData { +// GetBestSubmission mocks base method. +func (m *MockBtcCheckpointKeeper) GetBestSubmission(ctx types2.Context, e uint64) (types.BtcStatus, *types.SubmissionKey, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetEpochData", ctx, e) - ret0, _ := ret[0].(*types.EpochData) - return ret0 + ret := m.ctrl.Call(m, "GetBestSubmission", ctx, e) + ret0, _ := ret[0].(types.BtcStatus) + ret1, _ := ret[1].(*types.SubmissionKey) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } -// GetEpochData indicates an expected call of GetEpochData. -func (mr *MockBtcCheckpointKeeperMockRecorder) GetEpochData(ctx, e interface{}) *gomock.Call { +// GetBestSubmission indicates an expected call of GetBestSubmission. +func (mr *MockBtcCheckpointKeeperMockRecorder) GetBestSubmission(ctx, e interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpochData", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetEpochData), ctx, e) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBestSubmission", reflect.TypeOf((*MockBtcCheckpointKeeper)(nil).GetBestSubmission), ctx, e) } // GetSubmissionData mocks base method. @@ -568,6 +570,21 @@ func (mr *MockCheckpointingKeeperMockRecorder) GetBLSPubKeySet(ctx, epochNumber return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBLSPubKeySet", reflect.TypeOf((*MockCheckpointingKeeper)(nil).GetBLSPubKeySet), ctx, epochNumber) } +// GetRawCheckpoint mocks base method. +func (m *MockCheckpointingKeeper) GetRawCheckpoint(ctx types2.Context, epochNumber uint64) (*types0.RawCheckpointWithMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRawCheckpoint", ctx, epochNumber) + ret0, _ := ret[0].(*types0.RawCheckpointWithMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRawCheckpoint indicates an expected call of GetRawCheckpoint. +func (mr *MockCheckpointingKeeperMockRecorder) GetRawCheckpoint(ctx, epochNumber interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRawCheckpoint", reflect.TypeOf((*MockCheckpointingKeeper)(nil).GetRawCheckpoint), ctx, epochNumber) +} + // MockEpochingKeeper is a mock of EpochingKeeper interface. type MockEpochingKeeper struct { ctrl *gomock.Controller diff --git a/x/zoneconcierge/types/query.pb.go b/x/zoneconcierge/types/query.pb.go index 8533b2361..aa2c0e391 100644 --- a/x/zoneconcierge/types/query.pb.go +++ b/x/zoneconcierge/types/query.pb.go @@ -9,12 +9,10 @@ import ( types2 "github.com/babylonchain/babylon/x/btccheckpoint/types" types1 "github.com/babylonchain/babylon/x/checkpointing/types" types "github.com/babylonchain/babylon/x/epoching/types" - _ "github.com/cosmos/cosmos-sdk/types/query" + query "github.com/cosmos/cosmos-sdk/types/query" _ "github.com/gogo/protobuf/gogoproto" grpc1 "github.com/gogo/protobuf/grpc" proto "github.com/gogo/protobuf/proto" - crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" - types3 "github.com/tendermint/tendermint/proto/tendermint/types" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -118,15 +116,123 @@ func (m *QueryParamsResponse) GetParams() Params { return Params{} } +// QueryHeaderRequest is request type for the Query/Header RPC method. +type QueryHeaderRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *QueryHeaderRequest) Reset() { *m = QueryHeaderRequest{} } +func (m *QueryHeaderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryHeaderRequest) ProtoMessage() {} +func (*QueryHeaderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{2} +} +func (m *QueryHeaderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryHeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryHeaderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryHeaderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryHeaderRequest.Merge(m, src) +} +func (m *QueryHeaderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryHeaderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryHeaderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryHeaderRequest proto.InternalMessageInfo + +func (m *QueryHeaderRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *QueryHeaderRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +// QueryParamsResponse is response type for the Query/Header RPC method. +type QueryHeaderResponse struct { + Header *IndexedHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + ForkHeaders *Forks `protobuf:"bytes,2,opt,name=fork_headers,json=forkHeaders,proto3" json:"fork_headers,omitempty"` +} + +func (m *QueryHeaderResponse) Reset() { *m = QueryHeaderResponse{} } +func (m *QueryHeaderResponse) String() string { return proto.CompactTextString(m) } +func (*QueryHeaderResponse) ProtoMessage() {} +func (*QueryHeaderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{3} +} +func (m *QueryHeaderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryHeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryHeaderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryHeaderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryHeaderResponse.Merge(m, src) +} +func (m *QueryHeaderResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryHeaderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryHeaderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryHeaderResponse proto.InternalMessageInfo + +func (m *QueryHeaderResponse) GetHeader() *IndexedHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *QueryHeaderResponse) GetForkHeaders() *Forks { + if m != nil { + return m.ForkHeaders + } + return nil +} + // QueryChainListRequest is request type for the Query/ChainList RPC method type QueryChainListRequest struct { + // pagination defines whether to have the pagination in the request + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryChainListRequest) Reset() { *m = QueryChainListRequest{} } func (m *QueryChainListRequest) String() string { return proto.CompactTextString(m) } func (*QueryChainListRequest) ProtoMessage() {} func (*QueryChainListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{2} + return fileDescriptor_2caab7ee15063236, []int{4} } func (m *QueryChainListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -155,16 +261,26 @@ func (m *QueryChainListRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryChainListRequest proto.InternalMessageInfo +func (m *QueryChainListRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + // QueryChainListResponse is response type for the Query/ChainList RPC method type QueryChainListResponse struct { + // chain_ids are IDs of the chains in ascending alphabetical order ChainIds []string `protobuf:"bytes,1,rep,name=chain_ids,json=chainIds,proto3" json:"chain_ids,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } func (m *QueryChainListResponse) Reset() { *m = QueryChainListResponse{} } func (m *QueryChainListResponse) String() string { return proto.CompactTextString(m) } func (*QueryChainListResponse) ProtoMessage() {} func (*QueryChainListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{3} + return fileDescriptor_2caab7ee15063236, []int{5} } func (m *QueryChainListResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -200,6 +316,13 @@ func (m *QueryChainListResponse) GetChainIds() []string { return nil } +func (m *QueryChainListResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + // QueryChainInfoRequest is request type for the Query/ChainInfo RPC method. type QueryChainInfoRequest struct { ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` @@ -209,7 +332,7 @@ func (m *QueryChainInfoRequest) Reset() { *m = QueryChainInfoRequest{} } func (m *QueryChainInfoRequest) String() string { return proto.CompactTextString(m) } func (*QueryChainInfoRequest) ProtoMessage() {} func (*QueryChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{4} + return fileDescriptor_2caab7ee15063236, []int{6} } func (m *QueryChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -255,7 +378,7 @@ func (m *QueryChainInfoResponse) Reset() { *m = QueryChainInfoResponse{} func (m *QueryChainInfoResponse) String() string { return proto.CompactTextString(m) } func (*QueryChainInfoResponse) ProtoMessage() {} func (*QueryChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{5} + return fileDescriptor_2caab7ee15063236, []int{7} } func (m *QueryChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -291,26 +414,24 @@ func (m *QueryChainInfoResponse) GetChainInfo() *ChainInfo { return nil } -// QueryFinalizedChainInfoRequest is request type for the Query/FinalizedChainInfo RPC method. -type QueryFinalizedChainInfoRequest struct { - // chain_id is the ID of the CZ - ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - // prove indicates whether the querier wants to get proofs of this timestamp - Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` +// QueryEpochChainInfoRequest is request type for the Query/EpochChainInfo RPC method. +type QueryEpochChainInfoRequest struct { + EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` } -func (m *QueryFinalizedChainInfoRequest) Reset() { *m = QueryFinalizedChainInfoRequest{} } -func (m *QueryFinalizedChainInfoRequest) String() string { return proto.CompactTextString(m) } -func (*QueryFinalizedChainInfoRequest) ProtoMessage() {} -func (*QueryFinalizedChainInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{6} +func (m *QueryEpochChainInfoRequest) Reset() { *m = QueryEpochChainInfoRequest{} } +func (m *QueryEpochChainInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEpochChainInfoRequest) ProtoMessage() {} +func (*QueryEpochChainInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{8} } -func (m *QueryFinalizedChainInfoRequest) XXX_Unmarshal(b []byte) error { +func (m *QueryEpochChainInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryFinalizedChainInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryEpochChainInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryFinalizedChainInfoRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryEpochChainInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -320,65 +441,50 @@ func (m *QueryFinalizedChainInfoRequest) XXX_Marshal(b []byte, deterministic boo return b[:n], nil } } -func (m *QueryFinalizedChainInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinalizedChainInfoRequest.Merge(m, src) +func (m *QueryEpochChainInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochChainInfoRequest.Merge(m, src) } -func (m *QueryFinalizedChainInfoRequest) XXX_Size() int { +func (m *QueryEpochChainInfoRequest) XXX_Size() int { return m.Size() } -func (m *QueryFinalizedChainInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinalizedChainInfoRequest.DiscardUnknown(m) +func (m *QueryEpochChainInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochChainInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryFinalizedChainInfoRequest proto.InternalMessageInfo +var xxx_messageInfo_QueryEpochChainInfoRequest proto.InternalMessageInfo -func (m *QueryFinalizedChainInfoRequest) GetChainId() string { +func (m *QueryEpochChainInfoRequest) GetEpochNum() uint64 { if m != nil { - return m.ChainId + return m.EpochNum } - return "" + return 0 } -func (m *QueryFinalizedChainInfoRequest) GetProve() bool { +func (m *QueryEpochChainInfoRequest) GetChainId() string { if m != nil { - return m.Prove + return m.ChainId } - return false + return "" } -// QueryFinalizedChainInfoResponse is response type for the Query/FinalizedChainInfo RPC method. -type QueryFinalizedChainInfoResponse struct { - // finalized_chain_info is the info of the CZ - FinalizedChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=finalized_chain_info,json=finalizedChainInfo,proto3" json:"finalized_chain_info,omitempty"` - // epoch_info is the metadata of the last BTC-finalised epoch - EpochInfo *types.Epoch `protobuf:"bytes,2,opt,name=epoch_info,json=epochInfo,proto3" json:"epoch_info,omitempty"` - // raw_checkpoint is the raw checkpoint of this epoch - RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` - // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch - BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,4,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` - // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block - ProofTxInBlock *types3.TxProof `protobuf:"bytes,5,opt,name=proof_tx_in_block,json=proofTxInBlock,proto3" json:"proof_tx_in_block,omitempty"` - // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch - ProofHeaderInEpoch *crypto.Proof `protobuf:"bytes,6,opt,name=proof_header_in_epoch,json=proofHeaderInEpoch,proto3" json:"proof_header_in_epoch,omitempty"` - // proof_epoch_sealed is the proof that the epoch is sealed - ProofEpochSealed *ProofEpochSealed `protobuf:"bytes,7,opt,name=proof_epoch_sealed,json=proofEpochSealed,proto3" json:"proof_epoch_sealed,omitempty"` - // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger - // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission - ProofEpochSubmitted []*types2.TransactionInfo `protobuf:"bytes,8,rep,name=proof_epoch_submitted,json=proofEpochSubmitted,proto3" json:"proof_epoch_submitted,omitempty"` +// QueryEpochChainInfoResponse is response type for the Query/EpochChainInfo RPC method. +type QueryEpochChainInfoResponse struct { + // chain_info is the info of the CZ + ChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=chain_info,json=chainInfo,proto3" json:"chain_info,omitempty"` } -func (m *QueryFinalizedChainInfoResponse) Reset() { *m = QueryFinalizedChainInfoResponse{} } -func (m *QueryFinalizedChainInfoResponse) String() string { return proto.CompactTextString(m) } -func (*QueryFinalizedChainInfoResponse) ProtoMessage() {} -func (*QueryFinalizedChainInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2caab7ee15063236, []int{7} +func (m *QueryEpochChainInfoResponse) Reset() { *m = QueryEpochChainInfoResponse{} } +func (m *QueryEpochChainInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEpochChainInfoResponse) ProtoMessage() {} +func (*QueryEpochChainInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{9} } -func (m *QueryFinalizedChainInfoResponse) XXX_Unmarshal(b []byte) error { +func (m *QueryEpochChainInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryFinalizedChainInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *QueryEpochChainInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryFinalizedChainInfoResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_QueryEpochChainInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -388,826 +494,2823 @@ func (m *QueryFinalizedChainInfoResponse) XXX_Marshal(b []byte, deterministic bo return b[:n], nil } } -func (m *QueryFinalizedChainInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryFinalizedChainInfoResponse.Merge(m, src) +func (m *QueryEpochChainInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochChainInfoResponse.Merge(m, src) } -func (m *QueryFinalizedChainInfoResponse) XXX_Size() int { +func (m *QueryEpochChainInfoResponse) XXX_Size() int { return m.Size() } -func (m *QueryFinalizedChainInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryFinalizedChainInfoResponse.DiscardUnknown(m) +func (m *QueryEpochChainInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochChainInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryFinalizedChainInfoResponse proto.InternalMessageInfo +var xxx_messageInfo_QueryEpochChainInfoResponse proto.InternalMessageInfo -func (m *QueryFinalizedChainInfoResponse) GetFinalizedChainInfo() *ChainInfo { +func (m *QueryEpochChainInfoResponse) GetChainInfo() *ChainInfo { if m != nil { - return m.FinalizedChainInfo + return m.ChainInfo } return nil } -func (m *QueryFinalizedChainInfoResponse) GetEpochInfo() *types.Epoch { - if m != nil { - return m.EpochInfo - } - return nil +// QueryListHeadersRequest is request type for the Query/ListHeaders RPC method. +type QueryListHeadersRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // pagination defines whether to have the pagination in the request + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` } -func (m *QueryFinalizedChainInfoResponse) GetRawCheckpoint() *types1.RawCheckpoint { - if m != nil { - return m.RawCheckpoint +func (m *QueryListHeadersRequest) Reset() { *m = QueryListHeadersRequest{} } +func (m *QueryListHeadersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryListHeadersRequest) ProtoMessage() {} +func (*QueryListHeadersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{10} +} +func (m *QueryListHeadersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryListHeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListHeadersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return nil +} +func (m *QueryListHeadersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListHeadersRequest.Merge(m, src) +} +func (m *QueryListHeadersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryListHeadersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListHeadersRequest.DiscardUnknown(m) } -func (m *QueryFinalizedChainInfoResponse) GetBtcSubmissionKey() *types2.SubmissionKey { +var xxx_messageInfo_QueryListHeadersRequest proto.InternalMessageInfo + +func (m *QueryListHeadersRequest) GetChainId() string { if m != nil { - return m.BtcSubmissionKey + return m.ChainId } - return nil + return "" } -func (m *QueryFinalizedChainInfoResponse) GetProofTxInBlock() *types3.TxProof { +func (m *QueryListHeadersRequest) GetPagination() *query.PageRequest { if m != nil { - return m.ProofTxInBlock + return m.Pagination } return nil } -func (m *QueryFinalizedChainInfoResponse) GetProofHeaderInEpoch() *crypto.Proof { - if m != nil { - return m.ProofHeaderInEpoch +// QueryListHeadersResponse is response type for the Query/ListHeaders RPC method. +type QueryListHeadersResponse struct { + // headers is the list of headers + Headers []*IndexedHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + // pagination defines the pagination in the response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryListHeadersResponse) Reset() { *m = QueryListHeadersResponse{} } +func (m *QueryListHeadersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryListHeadersResponse) ProtoMessage() {} +func (*QueryListHeadersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{11} +} +func (m *QueryListHeadersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryListHeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListHeadersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return nil +} +func (m *QueryListHeadersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListHeadersResponse.Merge(m, src) +} +func (m *QueryListHeadersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryListHeadersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListHeadersResponse.DiscardUnknown(m) } -func (m *QueryFinalizedChainInfoResponse) GetProofEpochSealed() *ProofEpochSealed { +var xxx_messageInfo_QueryListHeadersResponse proto.InternalMessageInfo + +func (m *QueryListHeadersResponse) GetHeaders() []*IndexedHeader { if m != nil { - return m.ProofEpochSealed + return m.Headers } return nil } -func (m *QueryFinalizedChainInfoResponse) GetProofEpochSubmitted() []*types2.TransactionInfo { +func (m *QueryListHeadersResponse) GetPagination() *query.PageResponse { if m != nil { - return m.ProofEpochSubmitted + return m.Pagination } return nil } -func init() { - proto.RegisterType((*QueryParamsRequest)(nil), "babylon.zoneconcierge.v1.QueryParamsRequest") - proto.RegisterType((*QueryParamsResponse)(nil), "babylon.zoneconcierge.v1.QueryParamsResponse") - proto.RegisterType((*QueryChainListRequest)(nil), "babylon.zoneconcierge.v1.QueryChainListRequest") - proto.RegisterType((*QueryChainListResponse)(nil), "babylon.zoneconcierge.v1.QueryChainListResponse") - proto.RegisterType((*QueryChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryChainInfoRequest") - proto.RegisterType((*QueryChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryChainInfoResponse") - proto.RegisterType((*QueryFinalizedChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoRequest") - proto.RegisterType((*QueryFinalizedChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse") +// QueryListEpochHeadersRequest is request type for the Query/ListEpochHeaders RPC method. +type QueryListEpochHeadersRequest struct { + EpochNum uint64 `protobuf:"varint,1,opt,name=epoch_num,json=epochNum,proto3" json:"epoch_num,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` } -func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } - -var fileDescriptor_2caab7ee15063236 = []byte{ - // 877 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0x5d, 0xef, 0x36, 0xdb, 0xcd, 0x54, 0x54, 0x65, 0xba, 0x05, 0x37, 0x80, 0x1b, 0x8c, 0x04, - 0x69, 0x05, 0x36, 0x5e, 0x54, 0xc1, 0x0a, 0x09, 0x89, 0x2d, 0x20, 0x56, 0x45, 0xd0, 0xba, 0xbb, - 0x12, 0x42, 0x20, 0x6b, 0xec, 0x4c, 0x1c, 0x6b, 0x93, 0x19, 0xd7, 0x33, 0x49, 0x93, 0x22, 0x6e, - 0x78, 0x01, 0x90, 0xb8, 0xe1, 0x09, 0xe0, 0x8a, 0xf7, 0xe8, 0x05, 0x17, 0x95, 0xb8, 0xe1, 0x0a, - 0xa1, 0x5d, 0x1e, 0x04, 0xf9, 0x9b, 0xb1, 0x63, 0xe7, 0x87, 0x0d, 0xbd, 0x89, 0xec, 0xf9, 0xce, - 0x39, 0xdf, 0xf9, 0xc6, 0x33, 0x27, 0xe8, 0xd5, 0x90, 0x84, 0xd3, 0x01, 0x67, 0xee, 0x63, 0xce, - 0x68, 0xc4, 0x59, 0x94, 0xd0, 0x2c, 0xa6, 0xee, 0xc3, 0x11, 0xcd, 0xa6, 0x4e, 0x9a, 0x71, 0xc9, - 0xb1, 0xa9, 0x21, 0x4e, 0x0d, 0xe2, 0x8c, 0xbd, 0xd6, 0x6e, 0xcc, 0x63, 0x0e, 0x20, 0x37, 0x7f, - 0x52, 0xf8, 0xd6, 0xcb, 0x31, 0xe7, 0xf1, 0x80, 0xba, 0x24, 0x4d, 0x5c, 0xc2, 0x18, 0x97, 0x44, - 0x26, 0x9c, 0x89, 0xa2, 0x2a, 0x29, 0xeb, 0xd2, 0x6c, 0x98, 0x30, 0xe9, 0xca, 0x69, 0x4a, 0x85, - 0xfa, 0xd5, 0xd5, 0x57, 0x2a, 0xd5, 0x28, 0x9b, 0xa6, 0x92, 0xbb, 0x69, 0xc6, 0x79, 0x4f, 0x97, - 0x6f, 0x45, 0x5c, 0x0c, 0xb9, 0x70, 0x43, 0x22, 0xb4, 0x47, 0x77, 0xec, 0x85, 0x54, 0x12, 0xcf, - 0x4d, 0x49, 0x9c, 0x30, 0xe8, 0xa4, 0xb1, 0x56, 0x31, 0x59, 0x28, 0xa3, 0xa8, 0x4f, 0xa3, 0x93, - 0x94, 0x43, 0xcf, 0x89, 0xae, 0xdf, 0x5c, 0x5e, 0xaf, 0xbd, 0x69, 0x68, 0xb9, 0x49, 0xb3, 0x4a, - 0xc2, 0xe2, 0xea, 0x26, 0xb5, 0x5e, 0x5f, 0x0e, 0x59, 0x90, 0xb2, 0x0b, 0x1c, 0x4d, 0x79, 0xd4, - 0xcf, 0x21, 0x63, 0xaf, 0x7c, 0x9e, 0xc7, 0xd4, 0xbf, 0x49, 0x4a, 0x32, 0x32, 0x14, 0xf3, 0xee, - 0xeb, 0x98, 0xfa, 0x27, 0x02, 0xa8, 0xbd, 0x8b, 0xf0, 0xfd, 0xdc, 0xe9, 0x3d, 0xe0, 0xfb, 0xf4, - 0xe1, 0x88, 0x0a, 0x69, 0x1f, 0xa3, 0xab, 0xb5, 0x55, 0x91, 0x72, 0x26, 0x28, 0xfe, 0x00, 0x6d, - 0xab, 0x3e, 0xa6, 0xd1, 0x36, 0x3a, 0x97, 0xf6, 0xda, 0xce, 0xaa, 0xaf, 0xef, 0x28, 0xe6, 0xc1, - 0x85, 0x27, 0x7f, 0xdd, 0xd8, 0xf0, 0x35, 0xcb, 0x7e, 0x11, 0x5d, 0x03, 0xd9, 0x3b, 0x7d, 0x92, - 0xb0, 0xcf, 0x12, 0x21, 0x8b, 0x7e, 0xb7, 0xd1, 0x0b, 0xf3, 0x05, 0xdd, 0xf2, 0x25, 0xd4, 0x8c, - 0xf2, 0xc5, 0x20, 0xe9, 0xe6, 0x5d, 0xb7, 0x3a, 0x4d, 0x7f, 0x07, 0x16, 0x0e, 0xbb, 0xc2, 0xde, - 0xab, 0xea, 0x1d, 0xb2, 0x1e, 0xd7, 0x7a, 0xf8, 0x3a, 0xda, 0x29, 0x58, 0x60, 0xb5, 0xe9, 0x5f, - 0xd4, 0x24, 0xfb, 0xeb, 0x6a, 0x2b, 0xc5, 0xd1, 0xad, 0x0e, 0x10, 0xd2, 0x24, 0xd6, 0xe3, 0x7a, - 0xc2, 0xd7, 0x56, 0x4f, 0x38, 0x13, 0x50, 0x0e, 0xf3, 0x47, 0xfb, 0x3e, 0xb2, 0x40, 0xfd, 0x93, - 0x84, 0x91, 0x41, 0xf2, 0x98, 0x76, 0xff, 0x87, 0x35, 0xbc, 0x8b, 0x1a, 0x69, 0xc6, 0xc7, 0xd4, - 0xdc, 0x6c, 0x1b, 0x9d, 0x1d, 0x5f, 0xbd, 0xd8, 0xbf, 0x34, 0xd0, 0x8d, 0x95, 0x9a, 0xda, 0xfa, - 0x31, 0xda, 0xed, 0x15, 0xd5, 0xe0, 0xd9, 0x86, 0xc0, 0xbd, 0x05, 0x79, 0xbc, 0x8f, 0x10, 0x9c, - 0x3e, 0x25, 0xb6, 0x09, 0x62, 0xad, 0x52, 0xac, 0x3c, 0x98, 0x63, 0xcf, 0xf9, 0x38, 0x7f, 0xf6, - 0x9b, 0xb0, 0x04, 0xd4, 0xcf, 0xd1, 0xe5, 0x8c, 0x3c, 0x0a, 0x66, 0x47, 0xdc, 0xdc, 0x02, 0xfa, - 0x1b, 0x25, 0xbd, 0x76, 0x17, 0x72, 0x0d, 0x9f, 0x3c, 0xba, 0x53, 0xae, 0xf9, 0xcf, 0x65, 0xd5, - 0x57, 0x7c, 0x8c, 0x70, 0x28, 0xa3, 0x40, 0x8c, 0xc2, 0x61, 0x22, 0x44, 0xc2, 0x59, 0x70, 0x42, - 0xa7, 0xe6, 0x85, 0x39, 0xcd, 0xfa, 0xfd, 0x1c, 0x7b, 0xce, 0x83, 0x12, 0x7f, 0x97, 0x4e, 0xfd, - 0x2b, 0xa1, 0x8c, 0x6a, 0x2b, 0xf8, 0x23, 0xf4, 0x3c, 0x44, 0x48, 0x20, 0x27, 0x41, 0xc2, 0x82, - 0x70, 0xc0, 0xa3, 0x13, 0xb3, 0x01, 0xaa, 0xd7, 0x9d, 0x59, 0xdc, 0x38, 0x2a, 0x86, 0x8e, 0x26, - 0xf7, 0x72, 0xb0, 0x7f, 0x19, 0x38, 0x47, 0x93, 0x43, 0x76, 0x90, 0x13, 0xf0, 0x5d, 0x74, 0x4d, - 0xa9, 0xf4, 0x29, 0xe9, 0xd2, 0x2c, 0x57, 0x82, 0x9d, 0x30, 0xb7, 0x41, 0xc9, 0xac, 0x2a, 0xa9, - 0xe0, 0x72, 0x94, 0x10, 0x06, 0xda, 0xa7, 0xc0, 0x3a, 0x64, 0xb0, 0x89, 0xf8, 0x4b, 0xa4, 0x56, - 0x95, 0x44, 0x20, 0x28, 0x19, 0xd0, 0xae, 0x79, 0x11, 0x94, 0x6e, 0xfd, 0xc7, 0x85, 0xcb, 0x39, - 0xa0, 0xf0, 0x00, 0x18, 0xfe, 0x95, 0x74, 0x6e, 0x05, 0x7f, 0x53, 0xd8, 0xd4, 0xca, 0xf9, 0x4e, - 0x48, 0x49, 0xbb, 0xe6, 0x4e, 0x7b, 0xab, 0x73, 0x69, 0xef, 0xe6, 0xea, 0x6d, 0x3c, 0xca, 0x08, - 0x13, 0x24, 0xca, 0x03, 0x14, 0x0e, 0xcb, 0xd5, 0x8a, 0x76, 0xa1, 0xb2, 0xf7, 0x5b, 0x03, 0x35, - 0xe0, 0xa0, 0xe2, 0x1f, 0x0c, 0xb4, 0xad, 0x02, 0x00, 0xbf, 0xb9, 0xda, 0xf1, 0x62, 0xee, 0xb4, - 0xde, 0x5a, 0x13, 0xad, 0x8e, 0xbd, 0xdd, 0xf9, 0xfe, 0x8f, 0x7f, 0x7e, 0xda, 0xb4, 0x71, 0xdb, - 0x5d, 0x1e, 0x78, 0x63, 0x4f, 0xe7, 0x22, 0xfe, 0xd9, 0x40, 0xcd, 0x32, 0x5c, 0xb0, 0x7b, 0x4e, - 0x9b, 0xf9, 0x7c, 0x6a, 0xbd, 0xbd, 0x3e, 0x61, 0x7d, 0x6b, 0x70, 0x4f, 0x05, 0xfe, 0xb5, 0xb0, - 0x06, 0xf7, 0x66, 0x2d, 0x6b, 0x95, 0x3c, 0x59, 0xcf, 0x5a, 0x35, 0x2c, 0xec, 0x77, 0xc1, 0x9a, - 0x87, 0xdd, 0x73, 0xac, 0xc1, 0xad, 0x77, 0xbf, 0x2d, 0xd2, 0xea, 0x3b, 0xfc, 0xbb, 0x81, 0xf0, - 0x62, 0x08, 0xe1, 0xf7, 0xce, 0x71, 0xb0, 0x32, 0x0b, 0x5b, 0xfb, 0xcf, 0xc0, 0xd4, 0x43, 0x7c, - 0x08, 0x43, 0xbc, 0x8f, 0xf7, 0x57, 0x0f, 0xb1, 0x2c, 0x11, 0x2b, 0xe3, 0x1c, 0x7c, 0xf1, 0xe4, - 0xd4, 0x32, 0x9e, 0x9e, 0x5a, 0xc6, 0xdf, 0xa7, 0x96, 0xf1, 0xe3, 0x99, 0xb5, 0xf1, 0xf4, 0xcc, - 0xda, 0xf8, 0xf3, 0xcc, 0xda, 0xf8, 0xea, 0x76, 0x9c, 0xc8, 0xfe, 0x28, 0x74, 0x22, 0x3e, 0x2c, - 0xe4, 0x81, 0x56, 0xf6, 0x9a, 0xcc, 0x75, 0x83, 0x78, 0x08, 0xb7, 0xe1, 0x2f, 0xf5, 0x9d, 0x7f, - 0x03, 0x00, 0x00, 0xff, 0xff, 0x0f, 0x5d, 0x8e, 0xb7, 0x37, 0x09, 0x00, 0x00, +func (m *QueryListEpochHeadersRequest) Reset() { *m = QueryListEpochHeadersRequest{} } +func (m *QueryListEpochHeadersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryListEpochHeadersRequest) ProtoMessage() {} +func (*QueryListEpochHeadersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{12} +} +func (m *QueryListEpochHeadersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryListEpochHeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListEpochHeadersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryListEpochHeadersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListEpochHeadersRequest.Merge(m, src) +} +func (m *QueryListEpochHeadersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryListEpochHeadersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListEpochHeadersRequest.DiscardUnknown(m) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +var xxx_messageInfo_QueryListEpochHeadersRequest proto.InternalMessageInfo -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Parameters queries the parameters of the module. - Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) - // ChainList queries the list of chains that checkpoint to Babylon - ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) - // ChainInfo queries the latest info of a chain in Babylon's view - ChainInfo(ctx context.Context, in *QueryChainInfoRequest, opts ...grpc.CallOption) (*QueryChainInfoResponse, error) - // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs - FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) +func (m *QueryListEpochHeadersRequest) GetEpochNum() uint64 { + if m != nil { + return m.EpochNum + } + return 0 } -type queryClient struct { - cc grpc1.ClientConn +func (m *QueryListEpochHeadersRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" } -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} +// QueryListEpochHeadersResponse is response type for the Query/ListEpochHeaders RPC method. +type QueryListEpochHeadersResponse struct { + // headers is the list of headers + Headers []*IndexedHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` } -func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { - out := new(QueryParamsResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/Params", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *QueryListEpochHeadersResponse) Reset() { *m = QueryListEpochHeadersResponse{} } +func (m *QueryListEpochHeadersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryListEpochHeadersResponse) ProtoMessage() {} +func (*QueryListEpochHeadersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{13} } - -func (c *queryClient) ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) { - out := new(QueryChainListResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ChainList", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *QueryListEpochHeadersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (c *queryClient) ChainInfo(ctx context.Context, in *QueryChainInfoRequest, opts ...grpc.CallOption) (*QueryChainInfoResponse, error) { - out := new(QueryChainInfoResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ChainInfo", in, out, opts...) - if err != nil { - return nil, err +func (m *QueryListEpochHeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryListEpochHeadersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (m *QueryListEpochHeadersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryListEpochHeadersResponse.Merge(m, src) +} +func (m *QueryListEpochHeadersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryListEpochHeadersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryListEpochHeadersResponse.DiscardUnknown(m) } -func (c *queryClient) FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) { - out := new(QueryFinalizedChainInfoResponse) - err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainInfo", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_QueryListEpochHeadersResponse proto.InternalMessageInfo + +func (m *QueryListEpochHeadersResponse) GetHeaders() []*IndexedHeader { + if m != nil { + return m.Headers } - return out, nil + return nil } -// QueryServer is the server API for Query service. -type QueryServer interface { - // Parameters queries the parameters of the module. - Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) - // ChainList queries the list of chains that checkpoint to Babylon - ChainList(context.Context, *QueryChainListRequest) (*QueryChainListResponse, error) - // ChainInfo queries the latest info of a chain in Babylon's view - ChainInfo(context.Context, *QueryChainInfoRequest) (*QueryChainInfoResponse, error) - // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs - FinalizedChainInfo(context.Context, *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) +// QueryFinalizedChainInfoRequest is request type for the Query/FinalizedChainInfo RPC method. +type QueryFinalizedChainInfoRequest struct { + // chain_id is the ID of the CZ + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // prove indicates whether the querier wants to get proofs of this timestamp + Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` } -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { +func (m *QueryFinalizedChainInfoRequest) Reset() { *m = QueryFinalizedChainInfoRequest{} } +func (m *QueryFinalizedChainInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedChainInfoRequest) ProtoMessage() {} +func (*QueryFinalizedChainInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{14} } - -func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +func (m *QueryFinalizedChainInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (*UnimplementedQueryServer) ChainList(ctx context.Context, req *QueryChainListRequest) (*QueryChainListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ChainList not implemented") +func (m *QueryFinalizedChainInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedChainInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (*UnimplementedQueryServer) ChainInfo(ctx context.Context, req *QueryChainInfoRequest) (*QueryChainInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ChainInfo not implemented") +func (m *QueryFinalizedChainInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedChainInfoRequest.Merge(m, src) } -func (*UnimplementedQueryServer) FinalizedChainInfo(ctx context.Context, req *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfo not implemented") +func (m *QueryFinalizedChainInfoRequest) XXX_Size() int { + return m.Size() } - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) +func (m *QueryFinalizedChainInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedChainInfoRequest.DiscardUnknown(m) } -func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryParamsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Params(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/Params", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) +var xxx_messageInfo_QueryFinalizedChainInfoRequest proto.InternalMessageInfo + +func (m *QueryFinalizedChainInfoRequest) GetChainId() string { + if m != nil { + return m.ChainId } - return interceptor(ctx, in, info, handler) + return "" } -func _Query_ChainList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryChainListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ChainList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/ChainList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ChainList(ctx, req.(*QueryChainListRequest)) +func (m *QueryFinalizedChainInfoRequest) GetProve() bool { + if m != nil { + return m.Prove } - return interceptor(ctx, in, info, handler) + return false } -func _Query_ChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryChainInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ChainInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/ChainInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ChainInfo(ctx, req.(*QueryChainInfoRequest)) - } - return interceptor(ctx, in, info, handler) +// QueryFinalizedChainInfoResponse is response type for the Query/FinalizedChainInfo RPC method. +type QueryFinalizedChainInfoResponse struct { + // finalized_chain_info is the info of the CZ + FinalizedChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=finalized_chain_info,json=finalizedChainInfo,proto3" json:"finalized_chain_info,omitempty"` + // epoch_info is the metadata of the last BTC-finalised epoch + EpochInfo *types.Epoch `protobuf:"bytes,2,opt,name=epoch_info,json=epochInfo,proto3" json:"epoch_info,omitempty"` + // raw_checkpoint is the raw checkpoint of this epoch + RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` + // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch + BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,4,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` + // proof is the proof that the chain info is finalized + Proof *ProofFinalizedChainInfo `protobuf:"bytes,5,opt,name=proof,proto3" json:"proof,omitempty"` } -func _Query_FinalizedChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryFinalizedChainInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).FinalizedChainInfo(ctx, in) +func (m *QueryFinalizedChainInfoResponse) Reset() { *m = QueryFinalizedChainInfoResponse{} } +func (m *QueryFinalizedChainInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedChainInfoResponse) ProtoMessage() {} +func (*QueryFinalizedChainInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{15} +} +func (m *QueryFinalizedChainInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedChainInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedChainInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/babylon.zoneconcierge.v1.Query/FinalizedChainInfo", +} +func (m *QueryFinalizedChainInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedChainInfoResponse.Merge(m, src) +} +func (m *QueryFinalizedChainInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedChainInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedChainInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedChainInfoResponse proto.InternalMessageInfo + +func (m *QueryFinalizedChainInfoResponse) GetFinalizedChainInfo() *ChainInfo { + if m != nil { + return m.FinalizedChainInfo } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).FinalizedChainInfo(ctx, req.(*QueryFinalizedChainInfoRequest)) + return nil +} + +func (m *QueryFinalizedChainInfoResponse) GetEpochInfo() *types.Epoch { + if m != nil { + return m.EpochInfo } - return interceptor(ctx, in, info, handler) + return nil } -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "babylon.zoneconcierge.v1.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Params", - Handler: _Query_Params_Handler, - }, - { - MethodName: "ChainList", - Handler: _Query_ChainList_Handler, - }, - { - MethodName: "ChainInfo", - Handler: _Query_ChainInfo_Handler, - }, - { - MethodName: "FinalizedChainInfo", - Handler: _Query_FinalizedChainInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "babylon/zoneconcierge/query.proto", +func (m *QueryFinalizedChainInfoResponse) GetRawCheckpoint() *types1.RawCheckpoint { + if m != nil { + return m.RawCheckpoint + } + return nil } -func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *QueryFinalizedChainInfoResponse) GetBtcSubmissionKey() *types2.SubmissionKey { + if m != nil { + return m.BtcSubmissionKey } - return dAtA[:n], nil + return nil } -func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *QueryFinalizedChainInfoResponse) GetProof() *ProofFinalizedChainInfo { + if m != nil { + return m.Proof + } + return nil } -func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +// QueryFinalizedChainInfoUntilHeightRequest is request type for the Query/FinalizedChainInfoUntilHeight RPC method. +type QueryFinalizedChainInfoUntilHeightRequest struct { + // chain_id is the ID of the CZ + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // height is the height of the CZ chain + // such that the returned finalised chain info will be no later than this height + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + // prove indicates whether the querier wants to get proofs of this timestamp + Prove bool `protobuf:"varint,3,opt,name=prove,proto3" json:"prove,omitempty"` } -func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *QueryFinalizedChainInfoUntilHeightRequest) Reset() { + *m = QueryFinalizedChainInfoUntilHeightRequest{} } - -func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *QueryFinalizedChainInfoUntilHeightRequest) String() string { + return proto.CompactTextString(m) } - -func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) +func (*QueryFinalizedChainInfoUntilHeightRequest) ProtoMessage() {} +func (*QueryFinalizedChainInfoUntilHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{16} +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) + return b[:n], nil } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.Merge(m, src) +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest.DiscardUnknown(m) } -func (m *QueryChainListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_QueryFinalizedChainInfoUntilHeightRequest proto.InternalMessageInfo + +func (m *QueryFinalizedChainInfoUntilHeightRequest) GetChainId() string { + if m != nil { + return m.ChainId } - return dAtA[:n], nil + return "" } -func (m *QueryChainListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *QueryFinalizedChainInfoUntilHeightRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 } -func (m *QueryChainListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (m *QueryFinalizedChainInfoUntilHeightRequest) GetProve() bool { + if m != nil { + return m.Prove + } + return false } -func (m *QueryChainListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +// QueryFinalizedChainInfoUntilHeightResponse is response type for the Query/FinalizedChainInfoUntilHeight RPC method. +type QueryFinalizedChainInfoUntilHeightResponse struct { + // finalized_chain_info is the info of the CZ + FinalizedChainInfo *ChainInfo `protobuf:"bytes,1,opt,name=finalized_chain_info,json=finalizedChainInfo,proto3" json:"finalized_chain_info,omitempty"` + // epoch_info is the metadata of the last BTC-finalised epoch + EpochInfo *types.Epoch `protobuf:"bytes,2,opt,name=epoch_info,json=epochInfo,proto3" json:"epoch_info,omitempty"` + // raw_checkpoint is the raw checkpoint of this epoch + RawCheckpoint *types1.RawCheckpoint `protobuf:"bytes,3,opt,name=raw_checkpoint,json=rawCheckpoint,proto3" json:"raw_checkpoint,omitempty"` + // btc_submission_key is position of two BTC txs that include the raw checkpoint of this epoch + BtcSubmissionKey *types2.SubmissionKey `protobuf:"bytes,4,opt,name=btc_submission_key,json=btcSubmissionKey,proto3" json:"btc_submission_key,omitempty"` + // proof is the proof that the chain info is finalized + Proof *ProofFinalizedChainInfo `protobuf:"bytes,5,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) Reset() { + *m = QueryFinalizedChainInfoUntilHeightResponse{} +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) String() string { + return proto.CompactTextString(m) +} +func (*QueryFinalizedChainInfoUntilHeightResponse) ProtoMessage() {} +func (*QueryFinalizedChainInfoUntilHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2caab7ee15063236, []int{17} +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.Merge(m, src) +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse.DiscardUnknown(m) } -func (m *QueryChainListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var xxx_messageInfo_QueryFinalizedChainInfoUntilHeightResponse proto.InternalMessageInfo + +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetFinalizedChainInfo() *ChainInfo { + if m != nil { + return m.FinalizedChainInfo + } + return nil } -func (m *QueryChainListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChainIds) > 0 { - for iNdEx := len(m.ChainIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ChainIds[iNdEx]) - copy(dAtA[i:], m.ChainIds[iNdEx]) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainIds[iNdEx]))) - i-- - dAtA[i] = 0xa - } +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetEpochInfo() *types.Epoch { + if m != nil { + return m.EpochInfo } - return len(dAtA) - i, nil + return nil } -func (m *QueryChainInfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetRawCheckpoint() *types1.RawCheckpoint { + if m != nil { + return m.RawCheckpoint } - return dAtA[:n], nil + return nil } -func (m *QueryChainInfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetBtcSubmissionKey() *types2.SubmissionKey { + if m != nil { + return m.BtcSubmissionKey + } + return nil } -func (m *QueryChainInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0xa +func (m *QueryFinalizedChainInfoUntilHeightResponse) GetProof() *ProofFinalizedChainInfo { + if m != nil { + return m.Proof } - return len(dAtA) - i, nil + return nil } -func (m *QueryChainInfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "babylon.zoneconcierge.v1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "babylon.zoneconcierge.v1.QueryParamsResponse") + proto.RegisterType((*QueryHeaderRequest)(nil), "babylon.zoneconcierge.v1.QueryHeaderRequest") + proto.RegisterType((*QueryHeaderResponse)(nil), "babylon.zoneconcierge.v1.QueryHeaderResponse") + proto.RegisterType((*QueryChainListRequest)(nil), "babylon.zoneconcierge.v1.QueryChainListRequest") + proto.RegisterType((*QueryChainListResponse)(nil), "babylon.zoneconcierge.v1.QueryChainListResponse") + proto.RegisterType((*QueryChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryChainInfoRequest") + proto.RegisterType((*QueryChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryChainInfoResponse") + proto.RegisterType((*QueryEpochChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryEpochChainInfoRequest") + proto.RegisterType((*QueryEpochChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryEpochChainInfoResponse") + proto.RegisterType((*QueryListHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListHeadersRequest") + proto.RegisterType((*QueryListHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListHeadersResponse") + proto.RegisterType((*QueryListEpochHeadersRequest)(nil), "babylon.zoneconcierge.v1.QueryListEpochHeadersRequest") + proto.RegisterType((*QueryListEpochHeadersResponse)(nil), "babylon.zoneconcierge.v1.QueryListEpochHeadersResponse") + proto.RegisterType((*QueryFinalizedChainInfoRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoRequest") + proto.RegisterType((*QueryFinalizedChainInfoResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoResponse") + proto.RegisterType((*QueryFinalizedChainInfoUntilHeightRequest)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightRequest") + proto.RegisterType((*QueryFinalizedChainInfoUntilHeightResponse)(nil), "babylon.zoneconcierge.v1.QueryFinalizedChainInfoUntilHeightResponse") +} + +func init() { proto.RegisterFile("babylon/zoneconcierge/query.proto", fileDescriptor_2caab7ee15063236) } + +var fileDescriptor_2caab7ee15063236 = []byte{ + // 1155 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x8f, 0xf3, 0xaf, 0xc9, 0x0b, 0xad, 0xaa, 0x21, 0x94, 0xc5, 0x69, 0x37, 0xc1, 0x48, 0x25, + 0xad, 0x8a, 0x8d, 0x97, 0x96, 0x12, 0x21, 0x51, 0x65, 0x53, 0x92, 0x86, 0xa2, 0xd2, 0x18, 0xc2, + 0x01, 0x21, 0xad, 0x6c, 0xef, 0xac, 0xd7, 0x4a, 0xd6, 0xb3, 0xf5, 0x78, 0xb7, 0xd9, 0x86, 0x70, + 0xe0, 0x0b, 0x80, 0xc4, 0x05, 0x71, 0x45, 0x2a, 0x12, 0x87, 0x7e, 0x8e, 0x22, 0xf5, 0x50, 0x89, + 0x0b, 0x27, 0x84, 0x12, 0xbe, 0x06, 0x12, 0xf2, 0xcc, 0xd8, 0x6b, 0xef, 0xda, 0xdd, 0x3f, 0xe4, + 0xc0, 0x81, 0xdb, 0x8e, 0xe7, 0xbd, 0xdf, 0xfb, 0xbd, 0x7f, 0xf3, 0x9e, 0x16, 0x5e, 0xb7, 0x4c, + 0xab, 0xb3, 0x4f, 0x3c, 0xed, 0x11, 0xf1, 0xb0, 0x4d, 0x3c, 0xdb, 0xc5, 0xbe, 0x83, 0xb5, 0x07, + 0x2d, 0xec, 0x77, 0xd4, 0xa6, 0x4f, 0x02, 0x82, 0x0a, 0x42, 0x44, 0x4d, 0x89, 0xa8, 0x6d, 0x5d, + 0x5e, 0x74, 0x88, 0x43, 0x98, 0x90, 0x16, 0xfe, 0xe2, 0xf2, 0xf2, 0x45, 0x87, 0x10, 0x67, 0x1f, + 0x6b, 0x66, 0xd3, 0xd5, 0x4c, 0xcf, 0x23, 0x81, 0x19, 0xb8, 0xc4, 0xa3, 0xe2, 0xf6, 0xaa, 0x4d, + 0x68, 0x83, 0x50, 0xcd, 0x32, 0xa9, 0x30, 0xa3, 0xb5, 0x75, 0x0b, 0x07, 0xa6, 0xae, 0x35, 0x4d, + 0xc7, 0xf5, 0x98, 0xb0, 0x90, 0x2d, 0x46, 0xe4, 0xac, 0xc0, 0xb6, 0xeb, 0xd8, 0xde, 0x6b, 0x12, + 0xd7, 0x0b, 0xb4, 0xe0, 0x40, 0xdc, 0x5f, 0xc9, 0xbe, 0x4f, 0x9d, 0x84, 0x68, 0xec, 0x67, 0xf7, + 0xc6, 0xf5, 0x9c, 0xa4, 0x9f, 0xf2, 0xe5, 0x6c, 0x91, 0x3e, 0x28, 0x25, 0x92, 0xc3, 0x4d, 0x62, + 0xd7, 0x43, 0x91, 0xb6, 0x1e, 0xff, 0xee, 0x95, 0x49, 0x87, 0xb5, 0x69, 0xfa, 0x66, 0x83, 0xf6, + 0xb2, 0x4f, 0xcb, 0xa4, 0xa3, 0xcc, 0x44, 0x95, 0x45, 0x40, 0x3b, 0x21, 0xd3, 0xfb, 0x4c, 0xdf, + 0xc0, 0x0f, 0x5a, 0x98, 0x06, 0xca, 0x2e, 0xbc, 0x9c, 0xfa, 0x4a, 0x9b, 0xc4, 0xa3, 0x18, 0x7d, + 0x00, 0xb3, 0xdc, 0x4e, 0x41, 0x5a, 0x91, 0x56, 0x17, 0x4a, 0x2b, 0x6a, 0x5e, 0x02, 0x55, 0xae, + 0x59, 0x9e, 0x7e, 0xfa, 0xc7, 0xf2, 0x84, 0x21, 0xb4, 0x94, 0x2d, 0x61, 0xec, 0x0e, 0x36, 0xab, + 0xd8, 0x17, 0xc6, 0xd0, 0x6b, 0x30, 0x67, 0xd7, 0x4d, 0xd7, 0xab, 0xb8, 0x55, 0x86, 0x3b, 0x6f, + 0x9c, 0x61, 0xe7, 0xed, 0x2a, 0xba, 0x00, 0xb3, 0x75, 0xec, 0x3a, 0xf5, 0xa0, 0x30, 0xb9, 0x22, + 0xad, 0x4e, 0x1b, 0xe2, 0xa4, 0xfc, 0x28, 0x09, 0x82, 0x11, 0x92, 0x20, 0x78, 0x2b, 0x94, 0x0f, + 0xbf, 0x08, 0x82, 0x6f, 0xe6, 0x13, 0xdc, 0xf6, 0xaa, 0xf8, 0x00, 0x57, 0x05, 0x80, 0x50, 0x43, + 0x65, 0x78, 0xa9, 0x46, 0xfc, 0xbd, 0x0a, 0x3f, 0x52, 0x66, 0x76, 0xa1, 0xb4, 0x9c, 0x0f, 0xb3, + 0x49, 0xfc, 0x3d, 0x6a, 0x2c, 0x84, 0x4a, 0x1c, 0x8a, 0x2a, 0x15, 0x78, 0x85, 0x71, 0xdb, 0x08, + 0x9d, 0xf8, 0xd8, 0xa5, 0x41, 0xe4, 0xe8, 0x26, 0x40, 0xb7, 0x10, 0x05, 0xc3, 0xcb, 0x2a, 0xaf, + 0x5a, 0x35, 0xac, 0x5a, 0x95, 0x17, 0x8d, 0xa8, 0x5a, 0xf5, 0xbe, 0xe9, 0x60, 0xa1, 0x6b, 0x24, + 0x34, 0x95, 0xaf, 0xe1, 0x42, 0xaf, 0x01, 0xe1, 0xff, 0x12, 0xcc, 0x47, 0xa1, 0x0c, 0x73, 0x34, + 0xb5, 0x3a, 0x6f, 0xcc, 0x89, 0x58, 0x52, 0xb4, 0x95, 0x32, 0x3f, 0x29, 0x02, 0x34, 0xc8, 0x3c, + 0x47, 0x4e, 0xd9, 0x2f, 0x25, 0x1d, 0xdc, 0xf6, 0x6a, 0x64, 0x70, 0x26, 0x95, 0x2f, 0x93, 0x9c, + 0xb9, 0x8e, 0xe0, 0x5c, 0x06, 0x10, 0x4a, 0x5e, 0x8d, 0x88, 0xa8, 0xbc, 0x91, 0x1f, 0xf0, 0x2e, + 0x00, 0x77, 0x35, 0xfc, 0xa9, 0x7c, 0x06, 0x32, 0x43, 0xff, 0x30, 0xec, 0x95, 0x3e, 0x5a, 0x4b, + 0x30, 0xcf, 0x9a, 0xa8, 0xe2, 0xb5, 0x1a, 0xcc, 0xc0, 0xb4, 0x31, 0xc7, 0x3e, 0xdc, 0x6b, 0x35, + 0x52, 0x9c, 0x27, 0xd3, 0x9c, 0x4d, 0x58, 0xca, 0x44, 0x3d, 0x45, 0xe2, 0x5f, 0xc1, 0xab, 0xcc, + 0x44, 0x98, 0x45, 0x51, 0x3f, 0x43, 0xb4, 0xc5, 0x66, 0x46, 0x26, 0xc7, 0x29, 0xa4, 0xc7, 0x12, + 0x14, 0xfa, 0xcd, 0x0b, 0xf7, 0xd6, 0xe1, 0x4c, 0xd4, 0x05, 0x61, 0x25, 0x8d, 0xd0, 0x4c, 0x91, + 0xde, 0xe9, 0x55, 0xdc, 0xe7, 0x70, 0x31, 0xe6, 0xc9, 0xb2, 0xd1, 0x13, 0xab, 0x71, 0x33, 0x6c, + 0xc1, 0xa5, 0x1c, 0xdc, 0x53, 0x0b, 0x82, 0xb2, 0x03, 0x45, 0x66, 0x63, 0xd3, 0xf5, 0xcc, 0x7d, + 0xf7, 0x11, 0xae, 0x8e, 0xd0, 0x36, 0x68, 0x11, 0x66, 0x9a, 0x3e, 0x69, 0x63, 0x46, 0x7c, 0xce, + 0xe0, 0x07, 0xe5, 0xa7, 0x29, 0x58, 0xce, 0xc5, 0x14, 0xcc, 0x77, 0x61, 0xb1, 0x16, 0xdd, 0x56, + 0xc6, 0xab, 0x53, 0x54, 0xeb, 0x83, 0x47, 0x6b, 0x00, 0x3c, 0xd2, 0x0c, 0x8c, 0xa7, 0x54, 0x8e, + 0xc1, 0xe2, 0x59, 0xd5, 0xd6, 0x55, 0x16, 0x4f, 0x83, 0xe7, 0x85, 0xa9, 0xde, 0x83, 0x73, 0xbe, + 0xf9, 0xb0, 0xd2, 0x9d, 0x7a, 0x85, 0xa9, 0x9e, 0x47, 0x3a, 0x35, 0x1e, 0x43, 0x0c, 0xc3, 0x7c, + 0xb8, 0x11, 0x7f, 0x33, 0xce, 0xfa, 0xc9, 0x23, 0xda, 0x05, 0x64, 0x05, 0x76, 0x85, 0xb6, 0xac, + 0x86, 0x4b, 0xa9, 0x4b, 0xbc, 0xca, 0x1e, 0xee, 0x14, 0xa6, 0x7b, 0x30, 0xd3, 0x23, 0xbb, 0xad, + 0xab, 0x9f, 0xc6, 0xf2, 0x77, 0x71, 0xc7, 0x38, 0x6f, 0x05, 0x76, 0xea, 0x0b, 0xda, 0x62, 0x21, + 0x27, 0xb5, 0xc2, 0x0c, 0x43, 0xd2, 0x5f, 0x30, 0xe3, 0x42, 0xb1, 0x8c, 0x14, 0x70, 0x7d, 0x25, + 0x80, 0x2b, 0x39, 0x49, 0xda, 0xf5, 0x02, 0x77, 0xff, 0x0e, 0x1b, 0x65, 0xe3, 0x0f, 0xc1, 0x6e, + 0x6d, 0x4c, 0x25, 0x6b, 0xe3, 0xc9, 0x14, 0x5c, 0x1d, 0xc6, 0xec, 0xff, 0x65, 0xf2, 0xdf, 0x28, + 0x93, 0xd2, 0xe3, 0xb3, 0x30, 0xc3, 0x12, 0x86, 0xbe, 0x95, 0x60, 0x96, 0xef, 0x4d, 0xe8, 0x5a, + 0x3e, 0x5c, 0xff, 0xba, 0x26, 0xbf, 0x35, 0xa4, 0x34, 0xcf, 0xb9, 0xb2, 0xfa, 0xcd, 0x6f, 0x7f, + 0x7d, 0x3f, 0xa9, 0xa0, 0x15, 0x2d, 0x7b, 0x4f, 0x6c, 0xeb, 0x62, 0x9d, 0x44, 0x4f, 0x24, 0x98, + 0xe5, 0xef, 0xd9, 0x40, 0x46, 0xa9, 0x9d, 0x6e, 0x20, 0xa3, 0xf4, 0xde, 0xa6, 0x6c, 0x31, 0x46, + 0xeb, 0xe8, 0x56, 0x3e, 0xa3, 0x6e, 0x6d, 0x6a, 0x87, 0x51, 0xa7, 0x1c, 0x69, 0xfc, 0x91, 0xd5, + 0x0e, 0x79, 0x4b, 0x1c, 0xa1, 0x1f, 0x24, 0x98, 0x8f, 0xd7, 0x22, 0xa4, 0x0d, 0x60, 0xd1, 0xbb, + 0xa1, 0xc9, 0x6f, 0x0f, 0xaf, 0x30, 0x7c, 0x2c, 0x19, 0x5b, 0x8a, 0x7e, 0x8e, 0xa8, 0xb1, 0x2a, + 0x1f, 0x8a, 0x5a, 0x62, 0x48, 0x0c, 0x47, 0x2d, 0x39, 0x01, 0x94, 0x9b, 0x8c, 0x9a, 0x8e, 0xb4, + 0x11, 0x83, 0x8a, 0x7e, 0x95, 0xe0, 0x5c, 0x7a, 0xe7, 0x41, 0xd7, 0x07, 0x58, 0xcf, 0x5c, 0xbc, + 0xe4, 0x1b, 0x23, 0x6a, 0x09, 0xe2, 0x1f, 0x31, 0xe2, 0xb7, 0x51, 0x79, 0xd4, 0x6a, 0x60, 0x8f, + 0x08, 0xd5, 0x0e, 0xe3, 0x5d, 0xe0, 0x08, 0xfd, 0x22, 0xc1, 0x42, 0x62, 0xbb, 0x41, 0xfa, 0x00, + 0x4a, 0xfd, 0x8b, 0x98, 0x5c, 0x1a, 0x45, 0x45, 0xb8, 0x70, 0x9d, 0xb9, 0xa0, 0xa2, 0x6b, 0xf9, + 0x2e, 0x88, 0xfd, 0x20, 0x19, 0xf8, 0x67, 0x12, 0x9c, 0xef, 0x5d, 0x45, 0xd0, 0xbb, 0x43, 0x98, + 0xcf, 0xd8, 0x89, 0xe4, 0x9b, 0x23, 0xeb, 0x0d, 0xdf, 0x8c, 0xfd, 0xdc, 0xb3, 0x62, 0xff, 0x4c, + 0x02, 0xd4, 0xff, 0xee, 0xa1, 0xf7, 0x06, 0x10, 0xcb, 0x5d, 0x94, 0xe4, 0xb5, 0x31, 0x34, 0x85, + 0x53, 0xeb, 0xcc, 0xa9, 0xf7, 0xd1, 0x5a, 0xbe, 0x53, 0x59, 0x73, 0x30, 0x99, 0x9d, 0xbf, 0x25, + 0xb8, 0xf4, 0xc2, 0xa1, 0x8a, 0x36, 0x46, 0xe6, 0xd7, 0xbf, 0x09, 0xc8, 0xb7, 0xff, 0x1d, 0x88, + 0xf0, 0x77, 0x87, 0xf9, 0x7b, 0x17, 0x6d, 0x8f, 0xed, 0xaf, 0xc6, 0xdf, 0xd4, 0xf8, 0x6d, 0x2d, + 0x7f, 0xf2, 0xf4, 0xb8, 0x28, 0x3d, 0x3f, 0x2e, 0x4a, 0x7f, 0x1e, 0x17, 0xa5, 0xef, 0x4e, 0x8a, + 0x13, 0xcf, 0x4f, 0x8a, 0x13, 0xbf, 0x9f, 0x14, 0x27, 0xbe, 0xb8, 0xe1, 0xb8, 0x41, 0xbd, 0x65, + 0xa9, 0x36, 0x69, 0x44, 0xe6, 0x18, 0x4c, 0x6c, 0xfb, 0xa0, 0xc7, 0x7a, 0xd0, 0x69, 0x62, 0x6a, + 0xcd, 0xb2, 0xbf, 0x20, 0xde, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xf5, 0xc1, 0xb9, 0x2a, + 0x12, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // Header queries the CZ header and fork headers at a given height. + Header(ctx context.Context, in *QueryHeaderRequest, opts ...grpc.CallOption) (*QueryHeaderResponse, error) + // ChainList queries the list of chains that checkpoint to Babylon + ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) + // ChainInfo queries the latest info of a chain in Babylon's view + ChainInfo(ctx context.Context, in *QueryChainInfoRequest, opts ...grpc.CallOption) (*QueryChainInfoResponse, error) + // EpochChainInfo queries the latest info of a chain in a given epoch of Babylon's view + EpochChainInfo(ctx context.Context, in *QueryEpochChainInfoRequest, opts ...grpc.CallOption) (*QueryEpochChainInfoResponse, error) + // ListHeaders queries the headers of a chain in Babylon's view, with pagination support + ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) + // ListEpochHeaders queries the headers of a chain timestamped in a given epoch of Babylon, with pagination support + ListEpochHeaders(ctx context.Context, in *QueryListEpochHeadersRequest, opts ...grpc.CallOption) (*QueryListEpochHeadersResponse, error) + // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs + FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) + // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than the provided CZ height, with proofs + FinalizedChainInfoUntilHeight(ctx context.Context, in *QueryFinalizedChainInfoUntilHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoUntilHeightResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/Params", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *QueryChainInfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *queryClient) Header(ctx context.Context, in *QueryHeaderRequest, opts ...grpc.CallOption) (*QueryHeaderResponse, error) { + out := new(QueryHeaderResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/Header", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *QueryChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ChainInfo != nil { - { - size, err := m.ChainInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (c *queryClient) ChainList(ctx context.Context, in *QueryChainListRequest, opts ...grpc.CallOption) (*QueryChainListResponse, error) { + out := new(QueryChainListResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ChainList", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *QueryFinalizedChainInfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *queryClient) ChainInfo(ctx context.Context, in *QueryChainInfoRequest, opts ...grpc.CallOption) (*QueryChainInfoResponse, error) { + out := new(QueryChainInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ChainInfo", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *QueryFinalizedChainInfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *queryClient) EpochChainInfo(ctx context.Context, in *QueryEpochChainInfoRequest, opts ...grpc.CallOption) (*QueryEpochChainInfoResponse, error) { + out := new(QueryEpochChainInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/EpochChainInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *QueryFinalizedChainInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Prove { - i-- - if m.Prove { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0xa +func (c *queryClient) ListHeaders(ctx context.Context, in *QueryListHeadersRequest, opts ...grpc.CallOption) (*QueryListHeadersResponse, error) { + out := new(QueryListHeadersResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ListHeaders", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *QueryFinalizedChainInfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *queryClient) ListEpochHeaders(ctx context.Context, in *QueryListEpochHeadersRequest, opts ...grpc.CallOption) (*QueryListEpochHeadersResponse, error) { + out := new(QueryListEpochHeadersResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/ListEpochHeaders", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *QueryFinalizedChainInfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *queryClient) FinalizedChainInfo(ctx context.Context, in *QueryFinalizedChainInfoRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoResponse, error) { + out := new(QueryFinalizedChainInfoResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *QueryFinalizedChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProofEpochSubmitted) > 0 { - for iNdEx := len(m.ProofEpochSubmitted) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProofEpochSubmitted[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.ProofEpochSealed != nil { - { - size, err := m.ProofEpochSealed.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.ProofHeaderInEpoch != nil { - { - size, err := m.ProofHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.ProofTxInBlock != nil { - { - size, err := m.ProofTxInBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.BtcSubmissionKey != nil { - { - size, err := m.BtcSubmissionKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.RawCheckpoint != nil { - { - size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.EpochInfo != nil { - { - size, err := m.EpochInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FinalizedChainInfo != nil { - { - size, err := m.FinalizedChainInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (c *queryClient) FinalizedChainInfoUntilHeight(ctx context.Context, in *QueryFinalizedChainInfoUntilHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedChainInfoUntilHeightResponse, error) { + out := new(QueryFinalizedChainInfoUntilHeightResponse) + err := c.cc.Invoke(ctx, "/babylon.zoneconcierge.v1.Query/FinalizedChainInfoUntilHeight", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // Header queries the CZ header and fork headers at a given height. + Header(context.Context, *QueryHeaderRequest) (*QueryHeaderResponse, error) + // ChainList queries the list of chains that checkpoint to Babylon + ChainList(context.Context, *QueryChainListRequest) (*QueryChainListResponse, error) + // ChainInfo queries the latest info of a chain in Babylon's view + ChainInfo(context.Context, *QueryChainInfoRequest) (*QueryChainInfoResponse, error) + // EpochChainInfo queries the latest info of a chain in a given epoch of Babylon's view + EpochChainInfo(context.Context, *QueryEpochChainInfoRequest) (*QueryEpochChainInfoResponse, error) + // ListHeaders queries the headers of a chain in Babylon's view, with pagination support + ListHeaders(context.Context, *QueryListHeadersRequest) (*QueryListHeadersResponse, error) + // ListEpochHeaders queries the headers of a chain timestamped in a given epoch of Babylon, with pagination support + ListEpochHeaders(context.Context, *QueryListEpochHeadersRequest) (*QueryListEpochHeadersResponse, error) + // FinalizedChainInfo queries the BTC-finalised info of a chain, with proofs + FinalizedChainInfo(context.Context, *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) + // FinalizedChainInfoUntilHeight queries the BTC-finalised info no later than the provided CZ height, with proofs + FinalizedChainInfoUntilHeight(context.Context, *QueryFinalizedChainInfoUntilHeightRequest) (*QueryFinalizedChainInfoUntilHeightResponse, error) } -func (m *QueryParamsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { } -func (m *QueryParamsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovQuery(uint64(l)) - return n +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (*UnimplementedQueryServer) Header(ctx context.Context, req *QueryHeaderRequest) (*QueryHeaderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Header not implemented") +} +func (*UnimplementedQueryServer) ChainList(ctx context.Context, req *QueryChainListRequest) (*QueryChainListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChainList not implemented") +} +func (*UnimplementedQueryServer) ChainInfo(ctx context.Context, req *QueryChainInfoRequest) (*QueryChainInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChainInfo not implemented") +} +func (*UnimplementedQueryServer) EpochChainInfo(ctx context.Context, req *QueryEpochChainInfoRequest) (*QueryEpochChainInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EpochChainInfo not implemented") +} +func (*UnimplementedQueryServer) ListHeaders(ctx context.Context, req *QueryListHeadersRequest) (*QueryListHeadersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListHeaders not implemented") +} +func (*UnimplementedQueryServer) ListEpochHeaders(ctx context.Context, req *QueryListEpochHeadersRequest) (*QueryListEpochHeadersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListEpochHeaders not implemented") +} +func (*UnimplementedQueryServer) FinalizedChainInfo(ctx context.Context, req *QueryFinalizedChainInfoRequest) (*QueryFinalizedChainInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfo not implemented") +} +func (*UnimplementedQueryServer) FinalizedChainInfoUntilHeight(ctx context.Context, req *QueryFinalizedChainInfoUntilHeightRequest) (*QueryFinalizedChainInfoUntilHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizedChainInfoUntilHeight not implemented") } -func (m *QueryChainListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) } -func (m *QueryChainListResponse) Size() (n int) { - if m == nil { - return 0 +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err } - var l int - _ = l - if len(m.ChainIds) > 0 { - for _, s := range m.ChainIds { - l = len(s) - n += 1 + l + sovQuery(uint64(l)) - } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) } - return n + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *QueryChainInfoRequest) Size() (n int) { - if m == nil { - return 0 +func _Query_Header_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryHeaderRequest) + if err := dec(in); err != nil { + return nil, err } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) + if interceptor == nil { + return srv.(QueryServer).Header(ctx, in) } - return n + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/Header", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Header(ctx, req.(*QueryHeaderRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *QueryChainInfoResponse) Size() (n int) { - if m == nil { - return 0 +func _Query_ChainList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryChainListRequest) + if err := dec(in); err != nil { + return nil, err } - var l int - _ = l - if m.ChainInfo != nil { - l = m.ChainInfo.Size() - n += 1 + l + sovQuery(uint64(l)) + if interceptor == nil { + return srv.(QueryServer).ChainList(ctx, in) } - return n + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/ChainList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ChainList(ctx, req.(*QueryChainListRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *QueryFinalizedChainInfoRequest) Size() (n int) { - if m == nil { - return 0 +func _Query_ChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryChainInfoRequest) + if err := dec(in); err != nil { + return nil, err } - var l int - _ = l - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) + if interceptor == nil { + return srv.(QueryServer).ChainInfo(ctx, in) } - if m.Prove { - n += 2 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/ChainInfo", } - return n + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ChainInfo(ctx, req.(*QueryChainInfoRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *QueryFinalizedChainInfoResponse) Size() (n int) { - if m == nil { - return 0 +func _Query_EpochChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEpochChainInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EpochChainInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/EpochChainInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EpochChainInfo(ctx, req.(*QueryEpochChainInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ListHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryListHeadersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ListHeaders(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/ListHeaders", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ListHeaders(ctx, req.(*QueryListHeadersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ListEpochHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryListEpochHeadersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ListEpochHeaders(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/ListEpochHeaders", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ListEpochHeaders(ctx, req.(*QueryListEpochHeadersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_FinalizedChainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryFinalizedChainInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).FinalizedChainInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/FinalizedChainInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).FinalizedChainInfo(ctx, req.(*QueryFinalizedChainInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_FinalizedChainInfoUntilHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryFinalizedChainInfoUntilHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).FinalizedChainInfoUntilHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/babylon.zoneconcierge.v1.Query/FinalizedChainInfoUntilHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).FinalizedChainInfoUntilHeight(ctx, req.(*QueryFinalizedChainInfoUntilHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "babylon.zoneconcierge.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "Header", + Handler: _Query_Header_Handler, + }, + { + MethodName: "ChainList", + Handler: _Query_ChainList_Handler, + }, + { + MethodName: "ChainInfo", + Handler: _Query_ChainInfo_Handler, + }, + { + MethodName: "EpochChainInfo", + Handler: _Query_EpochChainInfo_Handler, + }, + { + MethodName: "ListHeaders", + Handler: _Query_ListHeaders_Handler, + }, + { + MethodName: "ListEpochHeaders", + Handler: _Query_ListEpochHeaders_Handler, + }, + { + MethodName: "FinalizedChainInfo", + Handler: _Query_FinalizedChainInfo_Handler, + }, + { + MethodName: "FinalizedChainInfoUntilHeight", + Handler: _Query_FinalizedChainInfoUntilHeight_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "babylon/zoneconcierge/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.FinalizedChainInfo != nil { - l = m.FinalizedChainInfo.Size() - n += 1 + l + sovQuery(uint64(l)) - } - if m.EpochInfo != nil { - l = m.EpochInfo.Size() - n += 1 + l + sovQuery(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.RawCheckpoint != nil { - l = m.RawCheckpoint.Size() - n += 1 + l + sovQuery(uint64(l)) + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) } - if m.BtcSubmissionKey != nil { - l = m.BtcSubmissionKey.Size() - n += 1 + l + sovQuery(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryHeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.ProofTxInBlock != nil { - l = m.ProofTxInBlock.Size() - n += 1 + l + sovQuery(uint64(l)) + return dAtA[:n], nil +} + +func (m *QueryHeaderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryHeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 } - if m.ProofHeaderInEpoch != nil { - l = m.ProofHeaderInEpoch.Size() - n += 1 + l + sovQuery(uint64(l)) + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa } - if m.ProofEpochSealed != nil { - l = m.ProofEpochSealed.Size() - n += 1 + l + sovQuery(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *QueryHeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.ProofEpochSubmitted) > 0 { - for _, e := range m.ProofEpochSubmitted { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) + return dAtA[:n], nil +} + +func (m *QueryHeaderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryHeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ForkHeaders != nil { + { + size, err := m.ForkHeaders.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChainListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChainListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChainListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChainListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChainListResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChainListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ChainIds) > 0 { + for iNdEx := len(m.ChainIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ChainIds[iNdEx]) + copy(dAtA[i:], m.ChainIds[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryChainInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChainInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChainInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChainInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChainInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ChainInfo != nil { + { + size, err := m.ChainInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryEpochChainInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochChainInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochChainInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.EpochNum != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryEpochChainInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochChainInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ChainInfo != nil { + { + size, err := m.ChainInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryListHeadersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListHeadersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryListHeadersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListHeadersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListHeadersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryListEpochHeadersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListEpochHeadersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListEpochHeadersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.EpochNum != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.EpochNum)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryListEpochHeadersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryListEpochHeadersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryListEpochHeadersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedChainInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedChainInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedChainInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedChainInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedChainInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedChainInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.BtcSubmissionKey != nil { + { + size, err := m.BtcSubmissionKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RawCheckpoint != nil { + { + size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EpochInfo != nil { + { + size, err := m.EpochInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FinalizedChainInfo != nil { + { + size, err := m.FinalizedChainInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedChainInfoUntilHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedChainInfoUntilHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedChainInfoUntilHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.BtcSubmissionKey != nil { + { + size, err := m.BtcSubmissionKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RawCheckpoint != nil { + { + size, err := m.RawCheckpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EpochInfo != nil { + { + size, err := m.EpochInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FinalizedChainInfo != nil { + { + size, err := m.FinalizedChainInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryHeaderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovQuery(uint64(m.Height)) + } + return n +} + +func (m *QueryHeaderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.ForkHeaders != nil { + l = m.ForkHeaders.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChainListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChainListResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChainIds) > 0 { + for _, s := range m.ChainIds { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChainInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChainInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChainInfo != nil { + l = m.ChainInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEpochChainInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryEpochChainInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChainInfo != nil { + l = m.ChainInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryListHeadersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryListHeadersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryListEpochHeadersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNum != 0 { + n += 1 + sovQuery(uint64(m.EpochNum)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryListEpochHeadersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryFinalizedChainInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *QueryFinalizedChainInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizedChainInfo != nil { + l = m.FinalizedChainInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.EpochInfo != nil { + l = m.EpochInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.RawCheckpoint != nil { + l = m.RawCheckpoint.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.BtcSubmissionKey != nil { + l = m.BtcSubmissionKey.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryFinalizedChainInfoUntilHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovQuery(uint64(m.Height)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *QueryFinalizedChainInfoUntilHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizedChainInfo != nil { + l = m.FinalizedChainInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.EpochInfo != nil { + l = m.EpochInfo.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.RawCheckpoint != nil { + l = m.RawCheckpoint.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.BtcSubmissionKey != nil { + l = m.BtcSubmissionKey.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryHeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryHeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryHeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryHeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryHeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryHeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &IndexedHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForkHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ForkHeaders == nil { + m.ForkHeaders = &Forks{} + } + if err := m.ForkHeaders.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChainListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChainListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChainListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChainListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChainListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainIds = append(m.ChainIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChainInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChainInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChainInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChainInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChainInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChainInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ChainInfo == nil { + m.ChainInfo = &ChainInfo{} + } + if err := m.ChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - return n -} -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { +func (m *QueryEpochChainInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1230,12 +3333,63 @@ func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryEpochChainInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryEpochChainInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -1257,7 +3411,7 @@ func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { +func (m *QueryEpochChainInfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1280,15 +3434,15 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryEpochChainInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryEpochChainInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ChainInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1315,7 +3469,10 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ChainInfo == nil { + m.ChainInfo = &ChainInfo{} + } + if err := m.ChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1340,7 +3497,7 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryChainListRequest) Unmarshal(dAtA []byte) error { +func (m *QueryListHeadersRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1363,12 +3520,80 @@ func (m *QueryChainListRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryChainListRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryListHeadersRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryListHeadersRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -1390,7 +3615,7 @@ func (m *QueryChainListRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error { +func (m *QueryListHeadersResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1413,17 +3638,17 @@ func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryChainListResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryListHeadersResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryListHeadersResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainIds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -1433,23 +3658,61 @@ func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthQuery } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthQuery } if postIndex > l { return io.ErrUnexpectedEOF } - m.ChainIds = append(m.ChainIds, string(dAtA[iNdEx:postIndex])) + m.Headers = append(m.Headers, &IndexedHeader{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1472,7 +3735,7 @@ func (m *QueryChainListResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryChainInfoRequest) Unmarshal(dAtA []byte) error { +func (m *QueryListEpochHeadersRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1495,13 +3758,32 @@ func (m *QueryChainInfoRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryChainInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryListEpochHeadersRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryListEpochHeadersRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNum", wireType) + } + m.EpochNum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) } @@ -1554,7 +3836,7 @@ func (m *QueryChainInfoRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryChainInfoResponse) Unmarshal(dAtA []byte) error { +func (m *QueryListEpochHeadersResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1577,15 +3859,15 @@ func (m *QueryChainInfoResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryChainInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryListEpochHeadersResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryChainInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryListEpochHeadersResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1612,10 +3894,8 @@ func (m *QueryChainInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ChainInfo == nil { - m.ChainInfo = &ChainInfo{} - } - if err := m.ChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Headers = append(m.Headers, &IndexedHeader{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1917,7 +4197,250 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofTxInBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &ProofFinalizedChainInfo{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedChainInfoUntilHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedChainInfoUntilHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedChainInfoUntilHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedChainInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FinalizedChainInfo == nil { + m.FinalizedChainInfo = &ChainInfo{} + } + if err := m.FinalizedChainInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1944,16 +4467,16 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProofTxInBlock == nil { - m.ProofTxInBlock = &types3.TxProof{} + if m.EpochInfo == nil { + m.EpochInfo = &types.Epoch{} } - if err := m.ProofTxInBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EpochInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofHeaderInEpoch", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RawCheckpoint", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1980,16 +4503,16 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProofHeaderInEpoch == nil { - m.ProofHeaderInEpoch = &crypto.Proof{} + if m.RawCheckpoint == nil { + m.RawCheckpoint = &types1.RawCheckpoint{} } - if err := m.ProofHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RawCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSealed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BtcSubmissionKey", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2016,16 +4539,16 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProofEpochSealed == nil { - m.ProofEpochSealed = &ProofEpochSealed{} + if m.BtcSubmissionKey == nil { + m.BtcSubmissionKey = &types2.SubmissionKey{} } - if err := m.ProofEpochSealed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.BtcSubmissionKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSubmitted", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2052,8 +4575,10 @@ func (m *QueryFinalizedChainInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProofEpochSubmitted = append(m.ProofEpochSubmitted, &types2.TransactionInfo{}) - if err := m.ProofEpochSubmitted[len(m.ProofEpochSubmitted)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Proof == nil { + m.Proof = &ProofFinalizedChainInfo{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/x/zoneconcierge/types/query.pb.gw.go b/x/zoneconcierge/types/query.pb.gw.go index 3dac1f83e..4a9974981 100644 --- a/x/zoneconcierge/types/query.pb.gw.go +++ b/x/zoneconcierge/types/query.pb.gw.go @@ -51,26 +51,402 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal } +func request_Query_Header_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryHeaderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + msg, err := client.Header(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Header_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryHeaderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + msg, err := server.Header(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ChainList_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + func request_Query_ChainList_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq QueryChainListRequest var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainList_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.ChainList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err -} +} + +func local_request_Query_ChainList_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChainListRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ChainList_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ChainList(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChainInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := client.ChainInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChainInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := server.ChainInfo(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_EpochChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochChainInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := client.EpochChainInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_EpochChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryEpochChainInfoRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := server.EpochChainInfo(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ListHeaders_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ListHeaders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListHeaders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ListHeaders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ListHeaders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListHeaders(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListEpochHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } + + msg, err := client.ListEpochHeaders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ListEpochHeaders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryListEpochHeadersRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + val, ok = pathParams["epoch_num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "epoch_num") + } + + protoReq.EpochNum, err = runtime.Uint64(val) -func local_request_Query_ChainList_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryChainListRequest - var metadata runtime.ServerMetadata + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "epoch_num", err) + } - msg, err := server.ChainList(ctx, &protoReq) + msg, err := server.ListEpochHeaders(ctx, &protoReq) return msg, metadata, err } -func request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryChainInfoRequest +var ( + filter_Query_FinalizedChainInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_FinalizedChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedChainInfoRequest var metadata runtime.ServerMetadata var ( @@ -91,13 +467,20 @@ func request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) } - msg, err := client.ChainInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.FinalizedChainInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryChainInfoRequest +func local_request_Query_FinalizedChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedChainInfoRequest var metadata runtime.ServerMetadata var ( @@ -118,17 +501,24 @@ func local_request_Query_ChainInfo_0(ctx context.Context, marshaler runtime.Mars return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) } - msg, err := server.ChainInfo(ctx, &protoReq) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.FinalizedChainInfo(ctx, &protoReq) return msg, metadata, err } var ( - filter_Query_FinalizedChainInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} + filter_Query_FinalizedChainInfoUntilHeight_0 = &utilities.DoubleArray{Encoding: map[string]int{"chain_id": 0, "height": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} ) -func request_Query_FinalizedChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinalizedChainInfoRequest +func request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedChainInfoUntilHeightRequest var metadata runtime.ServerMetadata var ( @@ -149,20 +539,31 @@ func request_Query_FinalizedChainInfo_0(ctx context.Context, marshaler runtime.M return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) } + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfo_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfoUntilHeight_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.FinalizedChainInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.FinalizedChainInfoUntilHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Query_FinalizedChainInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryFinalizedChainInfoRequest +func local_request_Query_FinalizedChainInfoUntilHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedChainInfoUntilHeightRequest var metadata runtime.ServerMetadata var ( @@ -183,14 +584,25 @@ func local_request_Query_FinalizedChainInfo_0(ctx context.Context, marshaler run return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) } + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfo_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_FinalizedChainInfoUntilHeight_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.FinalizedChainInfo(ctx, &protoReq) + msg, err := server.FinalizedChainInfoUntilHeight(ctx, &protoReq) return msg, metadata, err } @@ -224,6 +636,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_Header_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Header_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Header_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_ChainList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -270,6 +705,75 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_EpochChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_EpochChainInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochChainInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ListHeaders_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ListEpochHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ListEpochHeaders_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListEpochHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_FinalizedChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -293,6 +797,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv }) + mux.Handle("GET", pattern_Query_FinalizedChainInfoUntilHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_FinalizedChainInfoUntilHeight_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_FinalizedChainInfoUntilHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -354,6 +881,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_Header_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Header_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Header_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_ChainList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -394,6 +941,66 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_EpochChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_EpochChainInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_EpochChainInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ListHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ListHeaders_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ListEpochHeaders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ListEpochHeaders_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ListEpochHeaders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_Query_FinalizedChainInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -414,25 +1021,65 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("GET", pattern_Query_FinalizedChainInfoUntilHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_FinalizedChainInfoUntilHeight_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_FinalizedChainInfoUntilHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } var ( pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_Header_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id", "header", "height"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_ChainList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"babylon", "zoneconcierge", "v1", "chains"}, "", runtime.AssumeColonVerbOpt(false))) pattern_Query_ChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_EpochChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "chain_info", "chain_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ListHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ListEpochHeaders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"babylon", "zoneconcierge", "v1", "headers", "chain_id", "epochs", "epoch_num"}, "", runtime.AssumeColonVerbOpt(false))) + pattern_Query_FinalizedChainInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_FinalizedChainInfoUntilHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 5}, []string{"babylon", "zoneconcierge", "v1", "finalized_chain_info", "chain_id", "height"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( forward_Query_Params_0 = runtime.ForwardResponseMessage + forward_Query_Header_0 = runtime.ForwardResponseMessage + forward_Query_ChainList_0 = runtime.ForwardResponseMessage forward_Query_ChainInfo_0 = runtime.ForwardResponseMessage + forward_Query_EpochChainInfo_0 = runtime.ForwardResponseMessage + + forward_Query_ListHeaders_0 = runtime.ForwardResponseMessage + + forward_Query_ListEpochHeaders_0 = runtime.ForwardResponseMessage + forward_Query_FinalizedChainInfo_0 = runtime.ForwardResponseMessage + + forward_Query_FinalizedChainInfoUntilHeight_0 = runtime.ForwardResponseMessage ) diff --git a/x/zoneconcierge/types/types.go b/x/zoneconcierge/types/types.go index ab1254f4c..6e95fb34e 100644 --- a/x/zoneconcierge/types/types.go +++ b/x/zoneconcierge/types/types.go @@ -1 +1,9 @@ package types + +// IsLatestHeader checks if a given header is higher than the latest header in chain info +func (ci *ChainInfo) IsLatestHeader(header *IndexedHeader) bool { + if ci.LatestHeader != nil && ci.LatestHeader.Height > header.Height { + return false + } + return true +} diff --git a/x/zoneconcierge/types/zoneconcierge.go b/x/zoneconcierge/types/zoneconcierge.go index 7a50dfff6..be4071344 100644 --- a/x/zoneconcierge/types/zoneconcierge.go +++ b/x/zoneconcierge/types/zoneconcierge.go @@ -1,5 +1,10 @@ package types +import ( + "bytes" + "fmt" +) + func (p *ProofEpochSealed) ValidateBasic() error { if p.ValidatorSet == nil { return ErrInvalidProofEpochSealed.Wrap("ValidatorSet is nil") @@ -12,3 +17,77 @@ func (p *ProofEpochSealed) ValidateBasic() error { } return nil } + +func (ih *IndexedHeader) ValidateBasic() error { + if len(ih.ChainId) == 0 { + return fmt.Errorf("empty ChainID") + } else if len(ih.Hash) == 0 { + return fmt.Errorf("empty Hash") + } else if ih.BabylonHeader == nil { + return fmt.Errorf("nil BabylonHeader") + } else if len(ih.BabylonTxHash) == 0 { + return fmt.Errorf("empty BabylonTxHash") + } + return nil +} + +func (ih *IndexedHeader) Equal(ih2 *IndexedHeader) bool { + if ih.ValidateBasic() != nil || ih2.ValidateBasic() != nil { + return false + } + + if ih.ChainId != ih2.ChainId { + return false + } else if !bytes.Equal(ih.Hash, ih2.Hash) { + return false + } else if ih.Height != ih2.Height { + return false + } else if !bytes.Equal(ih.BabylonHeader.LastCommitHash, ih2.BabylonHeader.LastCommitHash) { + return false + } else if ih.BabylonEpoch != ih2.BabylonEpoch { + return false + } + return bytes.Equal(ih.BabylonTxHash, ih2.BabylonTxHash) +} + +func (ci *ChainInfo) Equal(ci2 *ChainInfo) bool { + if ci.ValidateBasic() != nil || ci2.ValidateBasic() != nil { + return false + } + + if ci.ChainId != ci2.ChainId { + return false + } + if !ci.LatestHeader.Equal(ci2.LatestHeader) { + return false + } + if len(ci.LatestForks.Headers) != len(ci2.LatestForks.Headers) { + return false + } + for i := 0; i < len(ci.LatestForks.Headers); i++ { + if !ci.LatestForks.Headers[i].Equal(ci2.LatestForks.Headers[i]) { + return false + } + } + return ci.TimestampedHeadersCount == ci2.TimestampedHeadersCount +} + +func (ci *ChainInfo) ValidateBasic() error { + if len(ci.ChainId) == 0 { + return ErrInvalidChainInfo.Wrap("ChainID is empty") + } else if ci.LatestHeader == nil { + return ErrInvalidChainInfo.Wrap("LatestHeader is nil") + } else if ci.LatestForks == nil { + return ErrInvalidChainInfo.Wrap("LatestForks is nil") + } + if err := ci.LatestHeader.ValidateBasic(); err != nil { + return err + } + for _, forkHeader := range ci.LatestForks.Headers { + if err := forkHeader.ValidateBasic(); err != nil { + return err + } + } + + return nil +} diff --git a/x/zoneconcierge/types/zoneconcierge.pb.go b/x/zoneconcierge/types/zoneconcierge.pb.go index 3931f274c..bfb3066fc 100644 --- a/x/zoneconcierge/types/zoneconcierge.pb.go +++ b/x/zoneconcierge/types/zoneconcierge.pb.go @@ -5,6 +5,7 @@ package types import ( fmt "fmt" + types2 "github.com/babylonchain/babylon/x/btccheckpoint/types" types1 "github.com/babylonchain/babylon/x/checkpointing/types" proto "github.com/gogo/protobuf/proto" crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" @@ -180,10 +181,12 @@ func (m *Forks) GetHeaders() []*IndexedHeader { type ChainInfo struct { // chain_id is the ID of the chain ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - // latest_header is the latest header in the canonical chain of CZ + // latest_header is the latest header in CZ's canonical chain LatestHeader *IndexedHeader `protobuf:"bytes,2,opt,name=latest_header,json=latestHeader,proto3" json:"latest_header,omitempty"` // latest_forks is the latest forks, formed as a series of IndexedHeader (from low to high) LatestForks *Forks `protobuf:"bytes,3,opt,name=latest_forks,json=latestForks,proto3" json:"latest_forks,omitempty"` + // timestamped_headers_count is the number of timestamped headers in CZ's canonical chain + TimestampedHeadersCount uint64 `protobuf:"varint,4,opt,name=timestamped_headers_count,json=timestampedHeadersCount,proto3" json:"timestamped_headers_count,omitempty"` } func (m *ChainInfo) Reset() { *m = ChainInfo{} } @@ -240,6 +243,13 @@ func (m *ChainInfo) GetLatestForks() *Forks { return nil } +func (m *ChainInfo) GetTimestampedHeadersCount() uint64 { + if m != nil { + return m.TimestampedHeadersCount + } + return 0 +} + // ProofEpochSealed is the proof that an epoch is sealed by the sealer header, i.e., the 2nd header of the next epoch // With the access of metadata // - Metadata of this epoch, which includes the sealer header @@ -313,11 +323,86 @@ func (m *ProofEpochSealed) GetProofEpochValSet() *crypto.ProofOps { return nil } +// ProofFinalizedChainInfo is a set of proofs that attest a chain info is BTC-finalised +type ProofFinalizedChainInfo struct { + // proof_tx_in_block is the proof that tx that carries the header is included in a certain Babylon block + ProofTxInBlock *types.TxProof `protobuf:"bytes,4,opt,name=proof_tx_in_block,json=proofTxInBlock,proto3" json:"proof_tx_in_block,omitempty"` + // proof_header_in_epoch is the proof that the Babylon header is in a certain epoch + ProofHeaderInEpoch *crypto.Proof `protobuf:"bytes,5,opt,name=proof_header_in_epoch,json=proofHeaderInEpoch,proto3" json:"proof_header_in_epoch,omitempty"` + // proof_epoch_sealed is the proof that the epoch is sealed + ProofEpochSealed *ProofEpochSealed `protobuf:"bytes,6,opt,name=proof_epoch_sealed,json=proofEpochSealed,proto3" json:"proof_epoch_sealed,omitempty"` + // proof_epoch_submitted is the proof that the epoch's checkpoint is included in BTC ledger + // It is the two TransactionInfo in the best (i.e., earliest) checkpoint submission + ProofEpochSubmitted []*types2.TransactionInfo `protobuf:"bytes,7,rep,name=proof_epoch_submitted,json=proofEpochSubmitted,proto3" json:"proof_epoch_submitted,omitempty"` +} + +func (m *ProofFinalizedChainInfo) Reset() { *m = ProofFinalizedChainInfo{} } +func (m *ProofFinalizedChainInfo) String() string { return proto.CompactTextString(m) } +func (*ProofFinalizedChainInfo) ProtoMessage() {} +func (*ProofFinalizedChainInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c76d28ce8dde4532, []int{4} +} +func (m *ProofFinalizedChainInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofFinalizedChainInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofFinalizedChainInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofFinalizedChainInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofFinalizedChainInfo.Merge(m, src) +} +func (m *ProofFinalizedChainInfo) XXX_Size() int { + return m.Size() +} +func (m *ProofFinalizedChainInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ProofFinalizedChainInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofFinalizedChainInfo proto.InternalMessageInfo + +func (m *ProofFinalizedChainInfo) GetProofTxInBlock() *types.TxProof { + if m != nil { + return m.ProofTxInBlock + } + return nil +} + +func (m *ProofFinalizedChainInfo) GetProofHeaderInEpoch() *crypto.Proof { + if m != nil { + return m.ProofHeaderInEpoch + } + return nil +} + +func (m *ProofFinalizedChainInfo) GetProofEpochSealed() *ProofEpochSealed { + if m != nil { + return m.ProofEpochSealed + } + return nil +} + +func (m *ProofFinalizedChainInfo) GetProofEpochSubmitted() []*types2.TransactionInfo { + if m != nil { + return m.ProofEpochSubmitted + } + return nil +} + func init() { proto.RegisterType((*IndexedHeader)(nil), "babylon.zoneconcierge.v1.IndexedHeader") proto.RegisterType((*Forks)(nil), "babylon.zoneconcierge.v1.Forks") proto.RegisterType((*ChainInfo)(nil), "babylon.zoneconcierge.v1.ChainInfo") proto.RegisterType((*ProofEpochSealed)(nil), "babylon.zoneconcierge.v1.ProofEpochSealed") + proto.RegisterType((*ProofFinalizedChainInfo)(nil), "babylon.zoneconcierge.v1.ProofFinalizedChainInfo") } func init() { @@ -325,41 +410,52 @@ func init() { } var fileDescriptor_c76d28ce8dde4532 = []byte{ - // 537 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xee, 0x36, 0x69, 0x4a, 0x37, 0x49, 0x89, 0x8c, 0x84, 0x4c, 0x01, 0x13, 0xa5, 0x52, 0x09, - 0x07, 0x6c, 0x11, 0xc4, 0x19, 0x11, 0x54, 0xd4, 0x16, 0xa4, 0x22, 0x07, 0x15, 0x89, 0x8b, 0xb5, - 0xb1, 0x27, 0xd9, 0x55, 0xdc, 0x5d, 0xcb, 0x5e, 0xa2, 0x84, 0xa7, 0xe0, 0x71, 0x78, 0x04, 0x8e, - 0x3d, 0x72, 0x44, 0x09, 0x2f, 0xc1, 0x0d, 0x65, 0xbc, 0xce, 0x0f, 0xa2, 0xc0, 0xc5, 0xf2, 0xb7, - 0x33, 0xf3, 0xed, 0x37, 0xdf, 0xcc, 0xd2, 0x47, 0x7d, 0xd6, 0x9f, 0xc6, 0x4a, 0x7a, 0x9f, 0x94, - 0x84, 0x50, 0xc9, 0x50, 0x40, 0x3a, 0x84, 0x4d, 0xe4, 0x26, 0xa9, 0xd2, 0xca, 0xb2, 0x4d, 0xaa, - 0xbb, 0x19, 0x1c, 0x3f, 0x39, 0x38, 0x2c, 0x48, 0x42, 0x0e, 0xe1, 0x28, 0x51, 0x42, 0x6a, 0x21, - 0x87, 0x5e, 0x3f, 0xce, 0x82, 0x11, 0x4c, 0xf3, 0xf2, 0x83, 0xa3, 0x3f, 0x27, 0xad, 0x90, 0xc9, - 0xbb, 0xa7, 0x41, 0x46, 0x90, 0x5e, 0x0a, 0xa9, 0x3d, 0x3d, 0x4d, 0x20, 0xcb, 0xbf, 0x26, 0x7a, - 0x7f, 0x2d, 0x1a, 0xa6, 0xd3, 0x44, 0x2b, 0x2f, 0x49, 0x95, 0x1a, 0xe4, 0xe1, 0xd6, 0x0f, 0x42, - 0xeb, 0xa7, 0x32, 0x82, 0x09, 0x44, 0x27, 0xc0, 0x22, 0x48, 0xad, 0x3b, 0xf4, 0x46, 0xc8, 0x99, - 0x90, 0x81, 0x88, 0x6c, 0xd2, 0x24, 0xed, 0x3d, 0x7f, 0x17, 0xf1, 0x69, 0x64, 0x59, 0xb4, 0xcc, - 0x59, 0xc6, 0xed, 0xed, 0x26, 0x69, 0xd7, 0x7c, 0xfc, 0xb7, 0x6e, 0xd3, 0x0a, 0x07, 0x31, 0xe4, - 0xda, 0x2e, 0x35, 0x49, 0xbb, 0xec, 0x1b, 0x64, 0x3d, 0xa7, 0xfb, 0x46, 0x7f, 0xc0, 0x91, 0xd8, - 0x2e, 0x37, 0x49, 0xbb, 0xda, 0xb1, 0xdd, 0x95, 0x20, 0x37, 0x17, 0x9a, 0x5f, 0xec, 0xd7, 0x4d, - 0xbe, 0xd1, 0x71, 0x48, 0x8b, 0x83, 0x00, 0x12, 0x15, 0x72, 0x7b, 0x07, 0xf9, 0x6b, 0xe6, 0xf0, - 0x78, 0x71, 0x66, 0x1d, 0xd1, 0x9b, 0x45, 0x92, 0x9e, 0x04, 0x28, 0xae, 0x82, 0xe2, 0x8a, 0xda, - 0x77, 0x93, 0x13, 0x96, 0xf1, 0xd6, 0x19, 0xdd, 0x79, 0xa5, 0xd2, 0x51, 0x66, 0xbd, 0xa0, 0xbb, - 0xb9, 0x9c, 0xcc, 0x2e, 0x35, 0x4b, 0xed, 0x6a, 0xe7, 0xa1, 0x7b, 0xdd, 0x94, 0xdc, 0x0d, 0x5f, - 0xfc, 0xa2, 0xae, 0xf5, 0x85, 0xd0, 0xbd, 0x97, 0xe8, 0x88, 0x1c, 0xa8, 0xbf, 0xd9, 0xf5, 0x86, - 0xd6, 0x63, 0xa6, 0x21, 0xd3, 0x85, 0x03, 0xdb, 0xe8, 0xc0, 0x7f, 0xdf, 0x58, 0xcb, 0xab, 0x8d, - 0x1f, 0x5d, 0x6a, 0x70, 0x30, 0x58, 0x74, 0x82, 0x76, 0x57, 0x3b, 0x0f, 0xae, 0x27, 0xc3, 0x86, - 0xfd, 0x6a, 0x5e, 0x84, 0xa0, 0xf5, 0x93, 0xd0, 0xc6, 0xdb, 0xc5, 0xf4, 0xd1, 0xbd, 0x1e, 0xb0, - 0x18, 0x22, 0xcb, 0xa7, 0xf5, 0x31, 0x8b, 0x45, 0xc4, 0xb4, 0x4a, 0x83, 0x0c, 0xb4, 0x4d, 0xd0, - 0x98, 0xc7, 0x4b, 0xe6, 0x8d, 0xfd, 0x5b, 0x30, 0x5f, 0x14, 0xe9, 0xef, 0x85, 0xe6, 0xdd, 0x38, - 0x7b, 0x0d, 0x53, 0xbf, 0xb6, 0xe4, 0xe8, 0x81, 0xb6, 0x8e, 0x69, 0x03, 0xb7, 0x2c, 0x1f, 0x5d, - 0x20, 0xe4, 0x40, 0x99, 0xee, 0xef, 0xae, 0xcf, 0x3f, 0x5f, 0x48, 0x17, 0x25, 0x9d, 0x27, 0x99, - 0xbf, 0x9f, 0x2c, 0xc5, 0xa1, 0xb9, 0x67, 0xf4, 0xd6, 0x3a, 0xcd, 0x98, 0xc5, 0x28, 0xb0, 0xf4, - 0x6f, 0xa6, 0xc6, 0x8a, 0xe9, 0x82, 0xc5, 0x3d, 0xd0, 0xdd, 0xf3, 0xaf, 0x33, 0x87, 0x5c, 0xcd, - 0x1c, 0xf2, 0x7d, 0xe6, 0x90, 0xcf, 0x73, 0x67, 0xeb, 0x6a, 0xee, 0x6c, 0x7d, 0x9b, 0x3b, 0x5b, - 0x1f, 0x9e, 0x0d, 0x85, 0xe6, 0x1f, 0xfb, 0x6e, 0xa8, 0x2e, 0x3d, 0xd3, 0x33, 0x8e, 0xb0, 0x00, - 0xde, 0xe4, 0xb7, 0xc7, 0x8e, 0x6b, 0xdb, 0xaf, 0xe0, 0x0b, 0x7a, 0xfa, 0x2b, 0x00, 0x00, 0xff, - 0xff, 0x62, 0xcd, 0x4c, 0xba, 0x12, 0x04, 0x00, 0x00, + // 719 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x51, 0x6f, 0xd3, 0x3c, + 0x14, 0x5d, 0xd6, 0x6e, 0xfb, 0xe6, 0xb6, 0xfb, 0x46, 0x26, 0x58, 0x36, 0xa0, 0x94, 0x4e, 0x1a, + 0x1d, 0x12, 0xa9, 0x56, 0xc4, 0x0b, 0x2f, 0x88, 0x8e, 0x4d, 0xeb, 0x86, 0x34, 0x94, 0x55, 0x03, + 0x21, 0xa1, 0xc8, 0x49, 0xdc, 0xc6, 0x6a, 0x6a, 0x87, 0xd8, 0xad, 0xd2, 0xfd, 0x0a, 0x7e, 0x16, + 0x8f, 0x7b, 0xe4, 0x11, 0x6d, 0xf0, 0x0f, 0x78, 0xe1, 0x0d, 0xc5, 0x76, 0xda, 0xb4, 0x6c, 0xc0, + 0x4b, 0x55, 0xe7, 0x9e, 0x7b, 0xee, 0xf1, 0xb9, 0xf7, 0x1a, 0xec, 0x38, 0xd0, 0x19, 0x05, 0x94, + 0xd4, 0xcf, 0x29, 0x41, 0x2e, 0x25, 0x2e, 0x46, 0x51, 0x17, 0x4d, 0x9f, 0xcc, 0x30, 0xa2, 0x9c, + 0xea, 0x86, 0x82, 0x9a, 0xd3, 0xc1, 0xe1, 0xee, 0xe6, 0x3d, 0x8e, 0x88, 0x87, 0xa2, 0x3e, 0x26, + 0xbc, 0xce, 0x47, 0x21, 0x62, 0xf2, 0x57, 0xe6, 0x6d, 0xde, 0xcf, 0x44, 0xdd, 0x68, 0x14, 0x72, + 0x5a, 0x0f, 0x23, 0x4a, 0x3b, 0x2a, 0x3c, 0x56, 0xe0, 0x70, 0xd7, 0xf5, 0x91, 0xdb, 0x0b, 0x69, + 0x82, 0x9c, 0x3a, 0x29, 0xe8, 0x56, 0x0a, 0x9d, 0x44, 0x30, 0xe9, 0xd6, 0x9d, 0x80, 0xd9, 0x3d, + 0x34, 0x52, 0xa0, 0x87, 0xd7, 0x83, 0x3e, 0x0e, 0x50, 0x94, 0x42, 0xb6, 0xaf, 0x87, 0xcc, 0xd6, + 0xab, 0x7e, 0xd3, 0x40, 0xa9, 0x45, 0x3c, 0x14, 0x23, 0xef, 0x10, 0x41, 0x0f, 0x45, 0xfa, 0x06, + 0xf8, 0xcf, 0xf5, 0x21, 0x26, 0x36, 0xf6, 0x0c, 0xad, 0xa2, 0xd5, 0x96, 0xad, 0x25, 0x71, 0x6e, + 0x79, 0xba, 0x0e, 0xf2, 0x3e, 0x64, 0xbe, 0x31, 0x5f, 0xd1, 0x6a, 0x45, 0x4b, 0xfc, 0xd7, 0xef, + 0x80, 0x45, 0x1f, 0xe1, 0xae, 0xcf, 0x8d, 0x5c, 0x45, 0xab, 0xe5, 0x2d, 0x75, 0xd2, 0x5f, 0x80, + 0x15, 0x25, 0xc1, 0xf6, 0x05, 0xb1, 0x91, 0xaf, 0x68, 0xb5, 0x42, 0xc3, 0x30, 0x27, 0x5e, 0x99, + 0xd2, 0x43, 0x59, 0xd8, 0x2a, 0x29, 0xbc, 0xd2, 0xb1, 0x05, 0xd2, 0x0f, 0x36, 0x0a, 0xa9, 0xeb, + 0x1b, 0x0b, 0x82, 0xbf, 0xa8, 0x3e, 0xee, 0x27, 0xdf, 0xf4, 0x6d, 0xf0, 0x7f, 0x0a, 0xe2, 0xb1, + 0x2d, 0xc4, 0x2d, 0x0a, 0x71, 0x69, 0x6e, 0x3b, 0x3e, 0x84, 0xcc, 0xaf, 0x1e, 0x81, 0x85, 0x03, + 0x1a, 0xf5, 0x98, 0xfe, 0x12, 0x2c, 0x49, 0x39, 0xcc, 0xc8, 0x55, 0x72, 0xb5, 0x42, 0xe3, 0x91, + 0x79, 0x53, 0xcf, 0xcd, 0x29, 0x5f, 0xac, 0x34, 0xaf, 0xfa, 0x43, 0x03, 0xcb, 0x7b, 0xc2, 0x11, + 0xd2, 0xa1, 0x7f, 0xb2, 0xeb, 0x35, 0x28, 0x05, 0x90, 0x23, 0xc6, 0x53, 0x07, 0xe6, 0x85, 0x03, + 0xff, 0x5c, 0xb1, 0x28, 0xb3, 0x95, 0x1f, 0x4d, 0xa0, 0xce, 0x76, 0x27, 0xb9, 0x89, 0xb0, 0xbb, + 0xd0, 0x78, 0x70, 0x33, 0x99, 0xb8, 0xb0, 0x55, 0x90, 0x49, 0xf2, 0xf6, 0xcf, 0xc1, 0x06, 0xc7, + 0x7d, 0xc4, 0x38, 0xec, 0x87, 0xc8, 0x53, 0xb2, 0x98, 0xed, 0xd2, 0x01, 0xe1, 0xa2, 0x3f, 0x79, + 0x6b, 0x3d, 0x03, 0x90, 0x95, 0xd9, 0x5e, 0x12, 0xae, 0xfe, 0xd4, 0xc0, 0xea, 0x9b, 0x64, 0xa8, + 0x85, 0xf3, 0xa7, 0x08, 0x06, 0xc8, 0xd3, 0x2d, 0x50, 0x1a, 0xc2, 0x00, 0x7b, 0x90, 0xd3, 0xc8, + 0x66, 0x88, 0x1b, 0x9a, 0x30, 0xf5, 0xc9, 0x58, 0xd5, 0xd4, 0xf8, 0x25, 0xaa, 0xce, 0x52, 0xf8, + 0x5b, 0xcc, 0xfd, 0x66, 0xc0, 0x8e, 0xd1, 0xc8, 0x2a, 0x8e, 0x39, 0x4e, 0x11, 0xd7, 0xf7, 0xc1, + 0xaa, 0x58, 0x1e, 0xd9, 0x76, 0x1b, 0x93, 0x0e, 0x55, 0xce, 0xdd, 0xcd, 0xce, 0x8e, 0xdc, 0x33, + 0x53, 0x48, 0x3a, 0x09, 0x99, 0xb5, 0x12, 0x8e, 0xc5, 0x89, 0xc6, 0x1c, 0x81, 0xb5, 0x2c, 0xcd, + 0x10, 0x06, 0x42, 0x60, 0xee, 0xef, 0x4c, 0xab, 0x13, 0xa6, 0x33, 0x18, 0x9c, 0x22, 0x5e, 0xfd, + 0x3e, 0x0f, 0xd6, 0x45, 0xf8, 0x00, 0x13, 0x18, 0xe0, 0x73, 0xe4, 0x4d, 0x06, 0xe0, 0x15, 0xb8, + 0x25, 0xeb, 0xf0, 0xd8, 0xc6, 0xc4, 0x76, 0x02, 0xea, 0xf6, 0xd4, 0xac, 0x6f, 0xfc, 0x3e, 0xeb, + 0xed, 0x58, 0xf0, 0x28, 0xb5, 0xed, 0xb8, 0x45, 0x9a, 0x49, 0x82, 0x7e, 0x0c, 0x6e, 0x4b, 0x16, + 0xd9, 0x93, 0x84, 0x69, 0x32, 0xf5, 0x33, 0x5b, 0x93, 0xd5, 0x6b, 0xe9, 0x22, 0x4d, 0x76, 0xaa, + 0xa5, 0xb6, 0xe2, 0x1d, 0xd0, 0xb3, 0x57, 0x67, 0xa2, 0x57, 0x62, 0x31, 0x0a, 0x8d, 0xc7, 0x37, + 0x0f, 0xcc, 0x6c, 0x77, 0xb3, 0x46, 0xa8, 0x7e, 0x7f, 0x48, 0x65, 0x2a, 0xe6, 0x81, 0xd3, 0xc7, + 0x9c, 0x23, 0xcf, 0x58, 0x12, 0x7d, 0xdf, 0x19, 0x93, 0x4f, 0xbf, 0x6d, 0xc3, 0x5d, 0xb3, 0x1d, + 0x41, 0xc2, 0xa0, 0xcb, 0x31, 0x15, 0xb6, 0x59, 0x6b, 0x19, 0xee, 0x94, 0xa5, 0x79, 0xf2, 0xf9, + 0xb2, 0xac, 0x5d, 0x5c, 0x96, 0xb5, 0xaf, 0x97, 0x65, 0xed, 0xd3, 0x55, 0x79, 0xee, 0xe2, 0xaa, + 0x3c, 0xf7, 0xe5, 0xaa, 0x3c, 0xf7, 0xfe, 0x59, 0x17, 0x73, 0x7f, 0xe0, 0x98, 0x2e, 0xed, 0xd7, + 0x55, 0x0d, 0xb1, 0x66, 0xe9, 0xa1, 0x1e, 0xcf, 0x3c, 0xef, 0xc2, 0x6e, 0x67, 0x51, 0xbc, 0x72, + 0x4f, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x91, 0x2a, 0x68, 0xc0, 0x04, 0x06, 0x00, 0x00, } func (m *IndexedHeader) Marshal() (dAtA []byte, err error) { @@ -485,6 +581,11 @@ func (m *ChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TimestampedHeadersCount != 0 { + i = encodeVarintZoneconcierge(dAtA, i, uint64(m.TimestampedHeadersCount)) + i-- + dAtA[i] = 0x20 + } if m.LatestForks != nil { { size, err := m.LatestForks.MarshalToSizedBuffer(dAtA[:i]) @@ -580,6 +681,79 @@ func (m *ProofEpochSealed) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ProofFinalizedChainInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofFinalizedChainInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofFinalizedChainInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProofEpochSubmitted) > 0 { + for iNdEx := len(m.ProofEpochSubmitted) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ProofEpochSubmitted[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.ProofEpochSealed != nil { + { + size, err := m.ProofEpochSealed.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.ProofHeaderInEpoch != nil { + { + size, err := m.ProofHeaderInEpoch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ProofTxInBlock != nil { + { + size, err := m.ProofTxInBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintZoneconcierge(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} + func encodeVarintZoneconcierge(dAtA []byte, offset int, v uint64) int { offset -= sovZoneconcierge(v) base := offset @@ -655,6 +829,9 @@ func (m *ChainInfo) Size() (n int) { l = m.LatestForks.Size() n += 1 + l + sovZoneconcierge(uint64(l)) } + if m.TimestampedHeadersCount != 0 { + n += 1 + sovZoneconcierge(uint64(m.TimestampedHeadersCount)) + } return n } @@ -681,6 +858,33 @@ func (m *ProofEpochSealed) Size() (n int) { return n } +func (m *ProofFinalizedChainInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProofTxInBlock != nil { + l = m.ProofTxInBlock.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + if m.ProofHeaderInEpoch != nil { + l = m.ProofHeaderInEpoch.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + if m.ProofEpochSealed != nil { + l = m.ProofEpochSealed.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + if len(m.ProofEpochSubmitted) > 0 { + for _, e := range m.ProofEpochSubmitted { + l = e.Size() + n += 1 + l + sovZoneconcierge(uint64(l)) + } + } + return n +} + func sovZoneconcierge(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1128,6 +1332,25 @@ func (m *ChainInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampedHeadersCount", wireType) + } + m.TimestampedHeadersCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampedHeadersCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipZoneconcierge(dAtA[iNdEx:]) @@ -1305,6 +1528,198 @@ func (m *ProofEpochSealed) Unmarshal(dAtA []byte) error { } return nil } +func (m *ProofFinalizedChainInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofFinalizedChainInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofFinalizedChainInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofTxInBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofTxInBlock == nil { + m.ProofTxInBlock = &types.TxProof{} + } + if err := m.ProofTxInBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeaderInEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofHeaderInEpoch == nil { + m.ProofHeaderInEpoch = &crypto.Proof{} + } + if err := m.ProofHeaderInEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSealed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProofEpochSealed == nil { + m.ProofEpochSealed = &ProofEpochSealed{} + } + if err := m.ProofEpochSealed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofEpochSubmitted", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZoneconcierge + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthZoneconcierge + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthZoneconcierge + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofEpochSubmitted = append(m.ProofEpochSubmitted, &types2.TransactionInfo{}) + if err := m.ProofEpochSubmitted[len(m.ProofEpochSubmitted)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipZoneconcierge(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthZoneconcierge + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipZoneconcierge(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0