diff --git a/app/app.go b/app/app.go index 701c4bd14..95d17eb27 100644 --- a/app/app.go +++ b/app/app.go @@ -9,6 +9,8 @@ import ( "path/filepath" "strings" + "github.com/neutron-org/neutron/x/cron" + "github.com/CosmWasm/wasmd/x/wasm" wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" "github.com/cosmos/cosmos-sdk/baseapp" @@ -78,6 +80,8 @@ import ( ibckeeper "github.com/cosmos/ibc-go/v4/modules/core/keeper" "github.com/cosmos/interchain-security/legacy_ibc_testing/core" ibctesting "github.com/cosmos/interchain-security/legacy_ibc_testing/testing" + cronkeeper "github.com/neutron-org/neutron/x/cron/keeper" + crontypes "github.com/neutron-org/neutron/x/cron/types" "github.com/spf13/cast" abci "github.com/tendermint/tendermint/abci/types" tmjson "github.com/tendermint/tendermint/libs/json" @@ -194,6 +198,7 @@ var ( feerefunder.AppModuleBasic{}, feeburner.AppModuleBasic{}, contractmanager.AppModuleBasic{}, + cron.AppModuleBasic{}, adminmodulemodule.NewAppModuleBasic( govclient.NewProposalHandler( adminmodulecli.NewSubmitParamChangeProposalTxCmd, @@ -224,6 +229,7 @@ var ( ccvconsumertypes.ConsumerRedistributeName: {authtypes.Burner}, ccvconsumertypes.ConsumerToSendToProviderName: nil, tokenfactorytypes.ModuleName: {authtypes.Minter, authtypes.Burner}, + crontypes.ModuleName: nil, } ) @@ -281,6 +287,7 @@ type App struct { FeeBurnerKeeper *feeburnerkeeper.Keeper ConsumerKeeper ccvconsumerkeeper.Keeper TokenFactoryKeeper *tokenfactorykeeper.Keeper + CronKeeper cronkeeper.Keeper RouterKeeper *routerkeeper.Keeper RouterModule router.AppModule @@ -338,7 +345,8 @@ func New( evidencetypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, capabilitytypes.StoreKey, interchainqueriesmoduletypes.StoreKey, contractmanagermoduletypes.StoreKey, interchaintxstypes.StoreKey, wasm.StoreKey, feetypes.StoreKey, - feeburnertypes.StoreKey, adminmodulemoduletypes.StoreKey, ccvconsumertypes.StoreKey, tokenfactorytypes.StoreKey, routertypes.StoreKey, ibchookstypes.StoreKey, + feeburnertypes.StoreKey, adminmodulemoduletypes.StoreKey, ccvconsumertypes.StoreKey, tokenfactorytypes.StoreKey, routertypes.StoreKey, + crontypes.StoreKey, ibchookstypes.StoreKey, ) tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey) memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey, feetypes.MemStoreKey) @@ -557,7 +565,8 @@ func New( app.FeeKeeper, ) - wasmOpts = append(wasmbinding.RegisterCustomPlugins(&app.InterchainTxsKeeper, &app.InterchainQueriesKeeper, app.TransferKeeper, &app.AdminmoduleKeeper, app.FeeBurnerKeeper, app.FeeKeeper, &app.BankKeeper, app.TokenFactoryKeeper), wasmOpts...) + app.CronKeeper = *cronkeeper.NewKeeper(appCodec, keys[crontypes.StoreKey], keys[crontypes.MemStoreKey], app.GetSubspace(crontypes.ModuleName), app.AccountKeeper) + wasmOpts = append(wasmbinding.RegisterCustomPlugins(&app.InterchainTxsKeeper, &app.InterchainQueriesKeeper, app.TransferKeeper, &app.AdminmoduleKeeper, app.FeeBurnerKeeper, app.FeeKeeper, &app.BankKeeper, app.TokenFactoryKeeper, &app.CronKeeper), wasmOpts...) app.WasmKeeper = wasm.NewKeeper( appCodec, @@ -580,6 +589,9 @@ func New( ) wasmHooks.ContractKeeper = wasmkeeper.NewDefaultPermissionKeeper(app.WasmKeeper) + app.CronKeeper.WasmMsgServer = wasmkeeper.NewMsgServerImpl(wasmkeeper.NewDefaultPermissionKeeper(app.WasmKeeper)) + cronModule := cron.NewAppModule(appCodec, &app.CronKeeper) + if len(enabledProposals) != 0 { app.AdminmoduleKeeper.Router().AddRoute(wasm.RouterKey, wasm.NewWasmProposalHandler(app.WasmKeeper, enabledProposals)) } @@ -658,6 +670,7 @@ func New( adminModule, ibcHooksModule, tokenfactory.NewAppModule(appCodec, *app.TokenFactoryKeeper, app.AccountKeeper, app.BankKeeper), + cronModule, ) // During begin block slashing happens after distr.BeginBlocker so that @@ -690,6 +703,7 @@ func New( adminmodulemoduletypes.ModuleName, ibchookstypes.ModuleName, routertypes.ModuleName, + crontypes.ModuleName, ) app.mm.SetOrderEndBlockers( @@ -718,6 +732,7 @@ func New( adminmodulemoduletypes.ModuleName, ibchookstypes.ModuleName, routertypes.ModuleName, + crontypes.ModuleName, ) // NOTE: The genutils module must occur after staking so that pools are @@ -751,6 +766,7 @@ func New( adminmodulemoduletypes.ModuleName, ibchookstypes.ModuleName, // after auth keeper routertypes.ModuleName, + crontypes.ModuleName, ) app.mm.RegisterInvariants(&app.CrisisKeeper) @@ -772,6 +788,7 @@ func New( transferModule, interchainQueriesModule, interchainTxsModule, + cronModule, ) app.sm.RegisterStoreDecoders() @@ -1008,6 +1025,7 @@ func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino paramsKeeper.Subspace(wasm.ModuleName) paramsKeeper.Subspace(feetypes.ModuleName) paramsKeeper.Subspace(feeburnertypes.ModuleName) + paramsKeeper.Subspace(crontypes.ModuleName) return paramsKeeper } diff --git a/docs/static/openapi.yml b/docs/static/openapi.yml index 2510e7795..721021746 100644 --- a/docs/static/openapi.yml +++ b/docs/static/openapi.yml @@ -1123,7 +1123,6 @@ paths: type: object properties: account: - description: account defines the account of the corresponding address. type: object properties: '@type': @@ -1184,6 +1183,114 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryAccountResponse is the response type for the Query/Account RPC method. @@ -3544,13 +3651,20 @@ paths: type: object properties: balance: - description: balance is the balance of the coin. type: object properties: denom: type: string amount: type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. description: >- QueryBalanceResponse is the response type for the Query/Balance RPC method. @@ -3784,9 +3898,6 @@ paths: type: object properties: metadata: - description: >- - metadata describes and provides all the client information for - the requested token. type: object properties: description: @@ -3854,6 +3965,9 @@ paths: Since: cosmos-sdk 0.43 + description: |- + Metadata represents a struct that describes + a basic token. description: >- QueryDenomMetadataResponse is the response type for the Query/DenomMetadata RPC @@ -4215,13 +4329,20 @@ paths: type: object properties: amount: - description: amount is the supply of the coin. type: object properties: denom: type: string amount: type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. description: >- QuerySupplyOfResponse is the response type for the Query/SupplyOf RPC method. @@ -4330,44 +4451,33 @@ paths: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs from the - previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. data: type: object @@ -4439,7 +4549,6 @@ paths: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -4497,7 +4606,6 @@ paths: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -4586,48 +4694,35 @@ paths: last_commit_hash: type: string format: byte - description: >- - commit from validators from the last - block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: >- hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs - from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: >- Header defines the structure of a Tendermint block header. @@ -5110,44 +5205,33 @@ paths: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs from the - previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. data: type: object @@ -5219,7 +5303,6 @@ paths: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -5277,7 +5360,6 @@ paths: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -5366,48 +5448,35 @@ paths: last_commit_hash: type: string format: byte - description: >- - commit from validators from the last - block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: >- hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs - from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: >- Header defines the structure of a Tendermint block header. @@ -7691,7 +7760,6 @@ paths: type: object properties: evidence: - description: evidence returns the requested evidence. type: object properties: '@type': @@ -7752,6 +7820,114 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryEvidenceResponse is the response type for the Query/Evidence RPC method. @@ -9264,9 +9440,6 @@ paths: type: object properties: val_signing_info: - title: >- - val_signing_info is the signing info of requested val cons - address type: object properties: address: @@ -9316,6 +9489,9 @@ paths: monitoring their liveness activity. + title: >- + val_signing_info is the signing info of requested val cons + address title: >- QuerySigningInfoResponse is the response type for the Query/SigningInfo RPC @@ -9409,7 +9585,6 @@ paths: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -9616,9 +9791,6 @@ paths: } parameters: - name: body - description: |- - SimulateRequest is the request type for the Service.Simulate - RPC method. in: body required: true schema: @@ -9909,7 +10081,6 @@ paths: type: object properties: tx_response: - description: tx_response is the queried TxResponses. type: object properties: height: @@ -9996,7 +10167,6 @@ paths: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -10058,6 +10228,117 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -10088,7 +10369,6 @@ paths: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -10115,6 +10395,11 @@ paths: Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. description: |- BroadcastTxResponse is the response type for the Service.BroadcastTx method. @@ -10305,11 +10590,6 @@ paths: } parameters: - name: body - description: >- - BroadcastTxRequest is the request type for the - Service.BroadcastTxRequest - - RPC method. in: body required: true schema: @@ -11080,13 +11360,6 @@ paths: such as a git commit that validators could automatically upgrade to upgraded_client_state: - description: >- - Deprecated: UpgradedClientState field has been deprecated. - IBC upgrade logic has been - - moved to the IBC module in the sub module 02-client. - - If this field is not empty, an error will be thrown. type: object properties: '@type': @@ -11148,6 +11421,117 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryCurrentPlanResponse is the response type for the Query/CurrentPlan RPC @@ -11818,7 +12202,6 @@ paths: code_id: type: string format: uint64 - title: id for legacy support creator: type: string data_hash: @@ -12109,6 +12492,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /cosmwasm/wasm/v1/code/{code_id}: @@ -12127,7 +12520,6 @@ paths: code_id: type: string format: uint64 - title: id for legacy support creator: type: string data_hash: @@ -12358,7 +12750,6 @@ paths: } parameters: - name: code_id - description: grpc-gateway_out does not support Go style CodID in: path required: true type: string @@ -12588,7 +12979,6 @@ paths: } parameters: - name: code_id - description: grpc-gateway_out does not support Go style CodID in: path required: true type: string @@ -12639,6 +13029,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /cosmwasm/wasm/v1/codes/params: @@ -13162,6 +13562,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /cosmwasm/wasm/v1/contract/{address}: @@ -13214,11 +13624,6 @@ paths: ibc_port_id: type: string extension: - description: >- - Extension is an extension point to store custom metadata - within the - - persistence model. type: object properties: '@type': @@ -13280,6 +13685,117 @@ paths: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } title: ContractInfo stores a WASM contract instance title: >- QueryContractInfoResponse is the response type for the @@ -13794,6 +14310,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /cosmwasm/wasm/v1/contract/{address}/raw/{query_data}: @@ -14509,6 +15035,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /cosmwasm/wasm/v1/contracts/creator/{creator_address}: @@ -14784,6 +15320,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /ibc/apps/interchain_accounts/controller/v1/owners/{owner}/connections/{connection_id}: @@ -15650,7 +16196,6 @@ paths: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -15823,6 +16368,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -16075,7 +16621,6 @@ paths: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -16244,6 +16789,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -19366,7 +19912,6 @@ paths: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -19541,6 +20086,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -19825,7 +20371,6 @@ paths: type: object properties: client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -19994,6 +20539,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier proof: type: string format: byte @@ -20485,7 +21031,6 @@ paths: gets reset consensus_state: - title: consensus state type: object properties: '@type': @@ -20660,6 +21205,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state description: >- ConsensusStateWithHeight defines a consensus state with an additional height @@ -21271,9 +21817,6 @@ paths: type: object properties: consensus_state: - title: >- - consensus state associated with the client identifier at the - given height type: object properties: '@type': @@ -21442,12 +21985,14 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + consensus state associated with the client identifier at the + given height proof: type: string format: byte title: merkle proof of existence proof_height: - title: height at which the proof was retrieved type: object properties: revision_number: @@ -21475,6 +22020,13 @@ paths: RevisionHeight gets reset + title: >- + Height is a monotonically increasing data type + + that can be compared against another Height for the purposes + of updating and + + freezing clients title: >- QueryConsensusStateResponse is the response type for the Query/ConsensusState @@ -21705,7 +22257,6 @@ paths: type: object properties: upgraded_client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -21874,6 +22425,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier description: |- QueryUpgradedClientStateResponse is the response type for the Query/UpgradedClientState RPC method. @@ -22075,7 +22627,6 @@ paths: type: object properties: upgraded_consensus_state: - title: Consensus state associated with the request identifier type: object properties: '@type': @@ -22244,6 +22795,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: Consensus state associated with the request identifier description: |- QueryUpgradedConsensusStateResponse is the response type for the Query/UpgradedConsensusState RPC method. @@ -23430,7 +23982,6 @@ paths: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -23603,6 +24154,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -23850,7 +24402,6 @@ paths: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -24019,6 +24570,7 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -25372,13 +25924,10 @@ paths: title: Params defines the parameters for CCV consumer module provider_client_id: type: string - description: empty for a new chain, filled in on restart. provider_channel_id: type: string - description: empty for a new chain, filled in on restart. new_chain: type: boolean - description: true for new chain GenesisState, false for chain restart. provider_client_state: description: >- ProviderClientState filled in on new chain, nil on @@ -25501,11 +26050,7 @@ paths: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data - passed. Note this is an illegal argument - some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_key: type: string enum: @@ -25517,11 +26062,7 @@ paths: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data - passed. Note this is an illegal argument - some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_value: type: string enum: @@ -25533,11 +26074,7 @@ paths: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data - passed. Note this is an illegal argument - some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' length: type: string enum: @@ -25660,11 +26197,10 @@ paths: - BITCOIN - SHA512_256 default: NO_HASH + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' title: >- - - NO_HASH: NO_HASH is the default if no data - passed. Note this is an illegal argument - some places. - - BITCOIN: ripemd160(sha256(x)) + hash is the algorithm that must be used for + each InnerOp description: >- InnerSpec contains all store-specific structure info to determine if two proofs from a @@ -25893,11 +26429,9 @@ paths: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: >- PubKey pub_key = 2 [(gogoproto.nullable)=false]; @@ -26191,11 +26725,9 @@ paths: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -26518,11 +27050,9 @@ paths: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -27304,45 +27834,280 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /neutron/contractmanager/failures/{address}: + get: + summary: Queries a Failure by address. + operationId: NeutronContractmanagerAddressFailures + responses: + '200': + description: A successful response. + schema: + type: object + properties: + failures: + type: array + items: + type: object + properties: + channel_id: + type: string + title: ChannelId + address: + type: string + title: Address of the failed contract + id: + type: string + format: uint64 + title: id of the failure under specific address + ack_id: + type: string + format: uint64 + title: ACK id to restore + ack_type: + type: string + title: Acknowledgement type + description: >- + Failure message contains information about ACK failures and + can be used to + + replay ACK in case of requirement. + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + parameters: + - name: address + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query - /neutron/contractmanager/failures/{address}: + /neutron/contractmanager/params: get: - summary: Queries a Failure by address. - operationId: NeutronContractmanagerAddressFailures + summary: Parameters queries the parameters of the module. + operationId: NeutronContractmanagerParams responses: '200': description: A successful response. schema: type: object properties: - failures: + params: + description: params holds all the parameters of this module. + type: object + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: type: array items: type: object properties: - channel_id: + '@type': type: string - title: ChannelId - address: + additionalProperties: {} + tags: + - Query + /neutron/cron/params: + get: + summary: Queries the parameters of the module. + operationId: NeutronCronParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + security_address: + type: string + title: Security address that can remove schedules + limit: + type: string + format: uint64 + title: Limit of schedules executed in one block + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': type: string - title: Address of the failed contract - id: + additionalProperties: {} + tags: + - Query + /neutron/cron/schedule: + get: + summary: Queries a list of Schedule items. + operationId: NeutronCronSchedules + responses: + '200': + description: A successful response. + schema: + type: object + properties: + schedules: + type: array + items: + type: object + properties: + name: type: string - format: uint64 - title: id of the failure under specific address - ack_id: + title: Name of schedule + period: type: string format: uint64 - title: ACK id to restore - ack_type: + title: Period in blocks + msgs: + type: array + items: + type: object + properties: + contract: + type: string + title: Contract is the address of the smart contract + msg: + type: string + title: >- + Msg is json encoded message to be passed to the + contract + title: Msgs that will be executed every period amount of time + last_execute_height: type: string - title: Acknowledgement type - description: >- - Failure message contains information about ACK failures and - can be used to - - replay ACK in case of requirement. + format: uint64 + title: Last execution's block height pagination: type: object properties: @@ -27389,10 +28154,6 @@ paths: type: string additionalProperties: {} parameters: - - name: address - in: path - required: true - type: string - name: pagination.key description: |- key is a value returned in PageResponse.next_key to begin @@ -27441,22 +28202,44 @@ paths: type: boolean tags: - Query - /neutron/contractmanager/params: + /neutron/cron/schedule/{name}: get: - summary: Parameters queries the parameters of the module. - operationId: NeutronContractmanagerParams + summary: Queries a Schedule by name. + operationId: NeutronCronSchedule responses: '200': description: A successful response. schema: type: object properties: - params: - description: params holds all the parameters of this module. + schedule: type: object - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. + properties: + name: + type: string + title: Name of schedule + period: + type: string + format: uint64 + title: Period in blocks + msgs: + type: array + items: + type: object + properties: + contract: + type: string + title: Contract is the address of the smart contract + msg: + type: string + title: >- + Msg is json encoded message to be passed to the + contract + title: Msgs that will be executed every period amount of time + last_execute_height: + type: string + format: uint64 + title: Last execution's block height default: description: An unexpected error response. schema: @@ -27475,6 +28258,11 @@ paths: '@type': type: string additionalProperties: {} + parameters: + - name: name + in: path + required: true + type: string tags: - Query /neutron/feeburner/params: @@ -28097,14 +28885,6 @@ paths: type: object properties: next_block_header: - title: >- - We need to know block X+1 to verify response of - transaction for block X - - since LastResultsHash is root hash of all results from - the txs from the - - previous block type: object properties: '@type': @@ -28279,10 +29059,15 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - header: title: >- - We need to know block X to verify inclusion of + We need to know block X+1 to verify response of transaction for block X + + since LastResultsHash is root hash of all results from + the txs from the + + previous block + header: type: object properties: '@type': @@ -28457,6 +29242,9 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + We need to know block X to verify inclusion of + transaction for block X tx: type: object properties: @@ -28471,10 +29259,8 @@ paths: format: byte log: type: string - title: nondeterministic info: type: string - title: nondeterministic gas_wanted: type: string format: int64 @@ -28501,7 +29287,6 @@ paths: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -28514,7 +29299,6 @@ paths: Later, transactions may be queried using these events. - title: nondeterministic codespace: type: string delivery_proof: @@ -29141,6 +29925,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /neutron/interchainqueries/registered_query: @@ -30148,6 +30942,16 @@ paths: in: query required: false type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query /ibc/apps/transfer/v1/denom_traces/{hash}: @@ -30161,9 +30965,6 @@ paths: type: object properties: denom_trace: - description: >- - denom_trace returns the requested denomination trace - information. type: object properties: path: @@ -30176,6 +30977,11 @@ paths: base_denom: type: string description: base denomination of the relayed fungible token. + description: >- + DenomTrace contains the base denomination for ICS20 fungible + tokens and the + + source tracing information path. description: >- QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC @@ -30721,6 +31527,45 @@ paths: additionalProperties: {} tags: - Query + /ibc/apps/router/v1/params: + get: + summary: Params queries all parameters of the router module. + operationId: RouterV1Params + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + fee_percentage: + type: string + description: >- + QueryParamsResponse is the response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + '@type': + type: string + additionalProperties: {} + tags: + - Query definitions: cosmos.adminmodule.adminmodule.MsgAddAdminResponse: type: object @@ -31604,7 +32449,6 @@ definitions: type: object properties: account: - description: account defines the account of the corresponding address. type: object properties: '@type': @@ -31660,6 +32504,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryAccountResponse is the response type for the Query/Account RPC method. @@ -33297,13 +34242,17 @@ definitions: type: object properties: balance: - description: balance is the balance of the coin. type: object properties: denom: type: string amount: type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. description: >- QueryBalanceResponse is the response type for the Query/Balance RPC method. @@ -33311,9 +34260,6 @@ definitions: type: object properties: metadata: - description: >- - metadata describes and provides all the client information for the - requested token. type: object properties: description: @@ -33376,6 +34322,9 @@ definitions: Since: cosmos-sdk 0.43 + description: |- + Metadata represents a struct that describes + a basic token. description: >- QueryDenomMetadataResponse is the response type for the Query/DenomMetadata RPC @@ -33549,13 +34498,17 @@ definitions: type: object properties: amount: - description: amount is the supply of the coin. type: object properties: denom: type: string amount: type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. description: >- QuerySupplyOfResponse is the response type for the Query/SupplyOf RPC method. @@ -33685,42 +34638,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: root hash of all results from the txs from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. data: type: object @@ -33789,7 +34733,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -33847,7 +34790,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -33936,48 +34878,35 @@ definitions: last_commit_hash: type: string format: byte - description: >- - commit from validators from the last - block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: >- hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs - from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: >- Header defines the structure of a Tendermint block header. @@ -34260,42 +35189,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: root hash of all results from the txs from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. data: type: object @@ -34364,7 +35284,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -34422,7 +35341,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -34511,48 +35429,35 @@ definitions: last_commit_hash: type: string format: byte - description: >- - commit from validators from the last - block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: >- hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs - from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: >- Header defines the structure of a Tendermint block header. @@ -35594,42 +36499,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: root hash of all results from the txs from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. data: type: object @@ -35698,7 +36594,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -35756,7 +36651,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -35844,46 +36738,35 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: >- hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs from - the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: >- Header defines the structure of a Tendermint block header. @@ -36250,7 +37133,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -36307,7 +37189,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -36382,7 +37263,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -36439,7 +37319,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -36527,44 +37406,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs from the - previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. commit: type: object @@ -36768,7 +37636,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -36826,7 +37693,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -36914,44 +37780,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs from the - previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: >- Header defines the structure of a Tendermint block header. @@ -37155,42 +38010,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: root hash of all results from the txs from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. tendermint.types.LightBlock: type: object @@ -37247,42 +38093,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: root hash of all results from the txs from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. commit: type: object @@ -37451,44 +38288,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs from the previous - block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. commit: type: object @@ -37701,42 +38527,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: root hash of all results from the txs from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. commit: type: object @@ -37923,7 +38740,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -38160,7 +38976,6 @@ definitions: type: object properties: evidence: - description: evidence returns the requested evidence. type: object properties: '@type': @@ -38216,6 +39031,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryEvidenceResponse is the response type for the Query/Evidence RPC method. @@ -38660,7 +39576,6 @@ definitions: type: object properties: val_signing_info: - title: val_signing_info is the signing info of requested val cons address type: object properties: address: @@ -38707,6 +39622,7 @@ definitions: their liveness activity. + title: val_signing_info is the signing info of requested val cons address title: >- QuerySigningInfoResponse is the response type for the Query/SigningInfo RPC @@ -38937,7 +39853,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -39059,7 +39974,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -39115,6 +40029,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -39145,7 +40160,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -39226,7 +40240,6 @@ definitions: signer_infos: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.SignerInfo' description: >- signer_infos defines the signing modes for the required signers. The @@ -39354,7 +40367,6 @@ definitions: type: object properties: tx_response: - description: tx_response is the queried TxResponses. type: object properties: height: @@ -39439,7 +40451,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -39498,6 +40509,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -39528,7 +40643,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -39554,6 +40668,11 @@ definitions: Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. description: |- BroadcastTxResponse is the response type for the Service.BroadcastTx method. @@ -39617,7 +40736,6 @@ definitions: txs: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.Tx' description: txs are the transactions in the block. block_id: @@ -39689,42 +40807,33 @@ definitions: last_commit_hash: type: string format: byte - description: commit from validators from the last block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: root hash of all results from the txs from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: Header defines the structure of a Tendermint block header. data: type: object @@ -39793,7 +40902,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -39851,7 +40959,6 @@ definitions: format: byte title: PartsetHeader title: BlockID - description: zero if vote is nil. timestamp: type: string format: date-time @@ -39940,48 +41047,35 @@ definitions: last_commit_hash: type: string format: byte - description: >- - commit from validators from the last - block title: hashes of block data data_hash: type: string format: byte - title: transactions validators_hash: type: string format: byte - description: validators for the current block title: >- hashes from the app output from the prev block next_validators_hash: type: string format: byte - title: validators for the next block consensus_hash: type: string format: byte - title: consensus params for current block app_hash: type: string format: byte - title: state after txs from the previous block last_results_hash: type: string format: byte - title: >- - root hash of all results from the txs - from the previous block evidence_hash: type: string format: byte - description: evidence included in the block title: consensus info proposer_address: type: string format: byte - title: original proposer of the block description: >- Header defines the structure of a Tendermint block header. @@ -40220,7 +41314,6 @@ definitions: $ref: '#/definitions/cosmos.tx.v1beta1.Tx' description: tx is the queried transaction. tx_response: - description: tx_response is the queried TxResponses. type: object properties: height: @@ -40305,7 +41398,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -40364,6 +41456,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -40394,7 +41590,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -40420,6 +41615,11 @@ definitions: Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. description: GetTxResponse is the response type for the Service.GetTx method. cosmos.tx.v1beta1.GetTxsEventResponse: type: object @@ -40427,7 +41627,6 @@ definitions: txs: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.Tx' description: txs is the list of queried transactions. tx_responses: @@ -40517,7 +41716,6 @@ definitions: format: int64 description: Amount of gas consumed by transaction. tx: - description: The request transaction bytes. type: object properties: '@type': @@ -40578,6 +41776,112 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } timestamp: type: string description: >- @@ -40608,7 +41912,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -40740,7 +42043,6 @@ definitions: mode_infos: type: array items: - type: object $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' title: |- mode_infos is the corresponding modes of the signers of the multisig @@ -40811,14 +42113,6 @@ definitions: type: object properties: public_key: - description: >- - public_key is the public key of the signer. It is optional for - accounts - - that already exist in state. If unset, the verifier can use the - required \ - - signer address for this position and lookup the public key. type: object properties: '@type': @@ -40874,6 +42168,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } mode_info: $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' title: |- @@ -40965,7 +42360,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -42124,7 +43518,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: EventAttribute is a single key-value pair, associated with an event. description: >- Event allows application developers to attach additional information to @@ -42144,7 +43537,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: EventAttribute is a single key-value pair, associated with an event. cosmos.upgrade.v1beta1.ModuleVersion: type: object @@ -42205,13 +43597,6 @@ definitions: Any application specific upgrade info to be included on-chain such as a git commit that validators could automatically upgrade to upgraded_client_state: - description: >- - Deprecated: UpgradedClientState field has been deprecated. IBC upgrade - logic has been - - moved to the IBC module in the sub module 02-client. - - If this field is not empty, an error will be thrown. type: object properties: '@type': @@ -42267,6 +43652,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- Plan specifies information about a planned upgrade and when it should occur. @@ -42335,13 +43821,6 @@ definitions: such as a git commit that validators could automatically upgrade to upgraded_client_state: - description: >- - Deprecated: UpgradedClientState field has been deprecated. IBC - upgrade logic has been - - moved to the IBC module in the sub module 02-client. - - If this field is not empty, an error will be thrown. type: object properties: '@type': @@ -42400,6 +43879,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } description: >- QueryCurrentPlanResponse is the response type for the Query/CurrentPlan RPC @@ -42525,7 +44108,6 @@ definitions: code_id: type: string format: uint64 - title: id for legacy support creator: type: string data_hash: @@ -42656,9 +44238,6 @@ definitions: ibc_port_id: type: string extension: - description: |- - Extension is an extension point to store custom metadata within the - persistence model. type: object properties: '@type': @@ -42714,6 +44293,107 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } title: ContractInfo stores a WASM contract instance cosmwasm.wasm.v1.Model: type: object @@ -42885,7 +44565,6 @@ definitions: code_id: type: string format: uint64 - title: id for legacy support creator: type: string data_hash: @@ -42938,7 +44617,6 @@ definitions: code_id: type: string format: uint64 - title: id for legacy support creator: type: string data_hash: @@ -43107,11 +44785,6 @@ definitions: ibc_port_id: type: string extension: - description: >- - Extension is an extension point to store custom metadata within - the - - persistence model. type: object properties: '@type': @@ -43170,6 +44843,110 @@ definitions: used with implementation specific semantics. additionalProperties: {} + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } title: ContractInfo stores a WASM contract instance title: >- QueryContractInfoResponse is the response type for the Query/ContractInfo @@ -43761,7 +45538,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -43924,6 +45700,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: |- IdentifiedClientState defines a client state with an additional client identifier field. @@ -43966,7 +45743,6 @@ definitions: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -44123,6 +45899,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -45040,7 +46817,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -45197,6 +46973,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: |- IdentifiedClientState defines a client state with an additional client identifier field. @@ -45232,7 +47009,6 @@ definitions: gets reset consensus_state: - title: consensus state type: object properties: '@type': @@ -45389,6 +47165,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state description: >- ConsensusStateWithHeight defines a consensus state with an additional height @@ -45438,7 +47215,6 @@ definitions: type: object properties: client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -45595,6 +47371,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier proof: type: string format: byte @@ -45646,7 +47423,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -45813,6 +47589,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: >- IdentifiedClientState defines a client state with an additional client @@ -45934,9 +47711,6 @@ definitions: type: object properties: consensus_state: - title: >- - consensus state associated with the client identifier at the given - height type: object properties: '@type': @@ -46093,12 +47867,14 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + consensus state associated with the client identifier at the given + height proof: type: string format: byte title: merkle proof of existence proof_height: - title: height at which the proof was retrieved type: object properties: revision_number: @@ -46125,6 +47901,13 @@ definitions: RevisionHeight gets reset + title: >- + Height is a monotonically increasing data type + + that can be compared against another Height for the purposes of + updating and + + freezing clients title: >- QueryConsensusStateResponse is the response type for the Query/ConsensusState @@ -46168,7 +47951,6 @@ definitions: gets reset consensus_state: - title: consensus state type: object properties: '@type': @@ -46335,6 +48117,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state description: >- ConsensusStateWithHeight defines a consensus state with an additional height @@ -46374,7 +48157,6 @@ definitions: type: object properties: upgraded_client_state: - title: client state associated with the request identifier type: object properties: '@type': @@ -46531,6 +48313,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state associated with the request identifier description: |- QueryUpgradedClientStateResponse is the response type for the Query/UpgradedClientState RPC method. @@ -46538,7 +48321,6 @@ definitions: type: object properties: upgraded_consensus_state: - title: Consensus state associated with the request identifier type: object properties: '@type': @@ -46695,6 +48477,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: Consensus state associated with the request identifier description: |- QueryUpgradedConsensusStateResponse is the response type for the Query/UpgradedConsensusState RPC method. @@ -46981,7 +48764,6 @@ definitions: type: string title: client identifier client_state: - title: client state type: object properties: '@type': @@ -47144,6 +48926,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: client state description: |- IdentifiedClientState defines a client state with an additional client identifier field. @@ -47186,7 +48969,6 @@ definitions: type: object properties: consensus_state: - title: consensus state associated with the channel type: object properties: '@type': @@ -47343,6 +49125,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: consensus state associated with the channel client_id: type: string title: client ID associated with the consensus state @@ -47886,10 +49669,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note - this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_key: type: string enum: @@ -47901,10 +49681,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note - this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_value: type: string enum: @@ -47916,10 +49693,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note - this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' length: type: string enum: @@ -48036,10 +49810,8 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note - this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' + title: hash is the algorithm that must be used for each InnerOp description: >- InnerSpec contains all store-specific structure info to determine if two proofs from a @@ -48180,10 +49952,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is an - illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' ics23.InnerSpec: type: object properties: @@ -48222,10 +49991,8 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is an - illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' + title: hash is the algorithm that must be used for each InnerOp description: >- InnerSpec contains all store-specific structure info to determine if two proofs from a @@ -48255,10 +50022,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is an - illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_key: type: string enum: @@ -48270,10 +50034,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is an - illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_value: type: string enum: @@ -48285,10 +50046,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is an - illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' length: type: string enum: @@ -48410,10 +50168,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is - an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_key: type: string enum: @@ -48425,10 +50180,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is - an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_value: type: string enum: @@ -48440,10 +50192,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is - an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' length: type: string enum: @@ -48558,10 +50307,8 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. Note this is - an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' + title: hash is the algorithm that must be used for each InnerOp description: >- InnerSpec contains all store-specific structure info to determine if two proofs from a @@ -48687,13 +50434,10 @@ definitions: title: Params defines the parameters for CCV consumer module provider_client_id: type: string - description: empty for a new chain, filled in on restart. provider_channel_id: type: string - description: empty for a new chain, filled in on restart. new_chain: type: boolean - description: true for new chain GenesisState, false for chain restart. provider_client_state: description: ProviderClientState filled in on new chain, nil on restart. type: object @@ -48810,10 +50554,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_key: type: string enum: @@ -48825,10 +50566,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_value: type: string enum: @@ -48840,10 +50578,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' length: type: string enum: @@ -48962,10 +50697,8 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' + title: hash is the algorithm that must be used for each InnerOp description: >- InnerSpec contains all store-specific structure info to determine if two proofs from a @@ -49180,11 +50913,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -49967,13 +51698,10 @@ definitions: title: Params defines the parameters for CCV consumer module provider_client_id: type: string - description: empty for a new chain, filled in on restart. provider_channel_id: type: string - description: empty for a new chain, filled in on restart. new_chain: type: boolean - description: true for new chain GenesisState, false for chain restart. provider_client_state: description: ProviderClientState filled in on new chain, nil on restart. type: object @@ -50092,10 +51820,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_key: type: string enum: @@ -50107,10 +51832,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' prehash_value: type: string enum: @@ -50122,10 +51844,7 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH - title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' length: type: string enum: @@ -50244,10 +51963,10 @@ definitions: - BITCOIN - SHA512_256 default: NO_HASH + description: ' - NO_HASH: NO_HASH is the default if no data passed. Note this is an illegal argument some places.' title: >- - - NO_HASH: NO_HASH is the default if no data passed. - Note this is an illegal argument some places. - - BITCOIN: ripemd160(sha256(x)) + hash is the algorithm that must be used for each + InnerOp description: >- InnerSpec contains all store-specific structure info to determine if two proofs from a @@ -50466,11 +52185,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -50601,11 +52318,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -50663,11 +52378,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -50739,11 +52452,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -50831,11 +52542,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -50891,11 +52600,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -50964,11 +52671,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -51037,11 +52742,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator valset_update_id: @@ -51082,11 +52785,9 @@ definitions: address: type: string format: byte - title: The first 20 bytes of SHA256(public key) power: type: string format: int64 - description: The voting power title: PubKey pub_key = 2 [(gogoproto.nullable)=false]; title: Validator tendermint.abci.ValidatorUpdate: @@ -51196,6 +52897,151 @@ definitions: description: params holds all the parameters of this module. type: object description: QueryParamsResponse is response type for the Query/Params RPC method. + neutron.cron.MsgExecuteContract: + type: object + properties: + contract: + type: string + title: Contract is the address of the smart contract + msg: + type: string + title: Msg is json encoded message to be passed to the contract + neutron.cron.Params: + type: object + properties: + security_address: + type: string + title: Security address that can remove schedules + limit: + type: string + format: uint64 + title: Limit of schedules executed in one block + description: Params defines the parameters for the module. + neutron.cron.QueryGetScheduleResponse: + type: object + properties: + schedule: + type: object + properties: + name: + type: string + title: Name of schedule + period: + type: string + format: uint64 + title: Period in blocks + msgs: + type: array + items: + type: object + properties: + contract: + type: string + title: Contract is the address of the smart contract + msg: + type: string + title: Msg is json encoded message to be passed to the contract + title: Msgs that will be executed every period amount of time + last_execute_height: + type: string + format: uint64 + title: Last execution's block height + neutron.cron.QueryParamsResponse: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + security_address: + type: string + title: Security address that can remove schedules + limit: + type: string + format: uint64 + title: Limit of schedules executed in one block + neutron.cron.QuerySchedulesResponse: + type: object + properties: + schedules: + type: array + items: + type: object + properties: + name: + type: string + title: Name of schedule + period: + type: string + format: uint64 + title: Period in blocks + msgs: + type: array + items: + type: object + properties: + contract: + type: string + title: Contract is the address of the smart contract + msg: + type: string + title: Msg is json encoded message to be passed to the contract + title: Msgs that will be executed every period amount of time + last_execute_height: + type: string + format: uint64 + title: Last execution's block height + pagination: + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + neutron.cron.Schedule: + type: object + properties: + name: + type: string + title: Name of schedule + period: + type: string + format: uint64 + title: Period in blocks + msgs: + type: array + items: + type: object + properties: + contract: + type: string + title: Contract is the address of the smart contract + msg: + type: string + title: Msg is json encoded message to be passed to the contract + title: Msgs that will be executed every period amount of time + last_execute_height: + type: string + format: uint64 + title: Last execution's block height neutron.feeburner.Params: type: object properties: @@ -51605,14 +53451,6 @@ definitions: type: object properties: next_block_header: - title: >- - We need to know block X+1 to verify response of transaction for block - X - - since LastResultsHash is root hash of all results from the txs from - the - - previous block type: object properties: '@type': @@ -51769,8 +53607,15 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + We need to know block X+1 to verify response of transaction for block + X + + since LastResultsHash is root hash of all results from the txs from + the + + previous block header: - title: We need to know block X to verify inclusion of transaction for block X type: object properties: '@type': @@ -51927,6 +53772,7 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: We need to know block X to verify inclusion of transaction for block X tx: type: object properties: @@ -51941,10 +53787,8 @@ definitions: format: byte log: type: string - title: nondeterministic info: type: string - title: nondeterministic gas_wanted: type: string format: int64 @@ -51971,7 +53815,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -51983,7 +53826,6 @@ definitions: ResponseDeliverTx. Later, transactions may be queried using these events. - title: nondeterministic codespace: type: string delivery_proof: @@ -52417,14 +54259,6 @@ definitions: type: object properties: next_block_header: - title: >- - We need to know block X+1 to verify response of transaction - for block X - - since LastResultsHash is root hash of all results from the txs - from the - - previous block type: object properties: '@type': @@ -52593,10 +54427,15 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - header: title: >- - We need to know block X to verify inclusion of transaction for - block X + We need to know block X+1 to verify response of transaction + for block X + + since LastResultsHash is root hash of all results from the txs + from the + + previous block + header: type: object properties: '@type': @@ -52765,6 +54604,9 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + We need to know block X to verify inclusion of transaction for + block X tx: type: object properties: @@ -52779,10 +54621,8 @@ definitions: format: byte log: type: string - title: nondeterministic info: type: string - title: nondeterministic gas_wanted: type: string format: int64 @@ -52809,7 +54649,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -52822,7 +54661,6 @@ definitions: Later, transactions may be queried using these events. - title: nondeterministic codespace: type: string delivery_proof: @@ -52933,14 +54771,6 @@ definitions: type: object properties: next_block_header: - title: >- - We need to know block X+1 to verify response of transaction for - block X - - since LastResultsHash is root hash of all results from the txs - from the - - previous block type: object properties: '@type': @@ -53103,10 +54933,15 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - header: title: >- - We need to know block X to verify inclusion of transaction for + We need to know block X+1 to verify response of transaction for block X + + since LastResultsHash is root hash of all results from the txs + from the + + previous block + header: type: object properties: '@type': @@ -53269,6 +55104,9 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: >- + We need to know block X to verify inclusion of transaction for + block X tx: type: object properties: @@ -53283,10 +55121,8 @@ definitions: format: byte log: type: string - title: nondeterministic info: type: string - title: nondeterministic gas_wanted: type: string format: int64 @@ -53313,7 +55149,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -53325,7 +55160,6 @@ definitions: and ResponseDeliverTx. Later, transactions may be queried using these events. - title: nondeterministic codespace: type: string delivery_proof: @@ -53521,10 +55355,8 @@ definitions: format: byte log: type: string - title: nondeterministic info: type: string - title: nondeterministic gas_wanted: type: string format: int64 @@ -53551,7 +55383,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -53563,7 +55394,6 @@ definitions: ResponseDeliverTx. Later, transactions may be queried using these events. - title: nondeterministic codespace: type: string delivery_proof: @@ -53625,10 +55455,8 @@ definitions: format: byte log: type: string - title: nondeterministic info: type: string - title: nondeterministic gas_wanted: type: string format: int64 @@ -53655,7 +55483,6 @@ definitions: format: byte index: type: boolean - title: nondeterministic description: >- EventAttribute is a single key-value pair, associated with an event. @@ -53667,7 +55494,6 @@ definitions: ResponseDeliverTx. Later, transactions may be queried using these events. - title: nondeterministic codespace: type: string tendermint.crypto.Proof: @@ -53823,7 +55649,6 @@ definitions: type: object properties: denom_trace: - description: denom_trace returns the requested denomination trace information. type: object properties: path: @@ -53836,6 +55661,11 @@ definitions: base_denom: type: string description: base denomination of the relayed fungible token. + description: >- + DenomTrace contains the base denomination for ICS20 fungible tokens + and the + + source tracing information path. description: |- QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC method. @@ -53980,3 +55810,19 @@ definitions: type: object title: Params holds parameters for the tokenfactory module description: QueryParamsResponse is the response type for the Query/Params RPC method. + router.v1.Params: + type: object + properties: + fee_percentage: + type: string + description: Params defines the set of IBC router parameters. + router.v1.QueryParamsResponse: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + fee_percentage: + type: string + description: QueryParamsResponse is the response type for the Query/Params RPC method. diff --git a/network/init-neutrond.sh b/network/init-neutrond.sh index 5c295fcf7..82ea7fe6f 100755 --- a/network/init-neutrond.sh +++ b/network/init-neutrond.sh @@ -516,3 +516,5 @@ $BINARY add-wasm-message execute "$DAO_CONTRACT_ADDRESS" "$ADD_SUBDAOS_MSG" --ru sed -i -e 's/\"admins\":.*/\"admins\": [\"'"$DAO_CONTRACT_ADDRESS"'\"]/g' "$CHAIN_DIR/config/genesis.json" sed -i -e 's/\"treasury_address\":.*/\"treasury_address\":\"'"$TREASURY_CONTRACT_ADDRESS"'\"/g' "$CHAIN_DIR/config/genesis.json" +sed -i -e 's/\"security_address\":.*/\"security_address\":\"'"$DAO_CONTRACT_ADDRESS"'\",/g' "$CHAIN_DIR/config/genesis.json" +sed -i -e 's/\"limit\":.*/\"limit\":5/g' "$CHAIN_DIR/config/genesis.json" diff --git a/proto/cron/genesis.proto b/proto/cron/genesis.proto new file mode 100644 index 000000000..f887c5b90 --- /dev/null +++ b/proto/cron/genesis.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package neutron.cron; + +import "gogoproto/gogo.proto"; +import "cron/params.proto"; +import "cron/schedule.proto"; +// this line is used by starport scaffolding # genesis/proto/import + +option go_package = "github.com/neutron-org/neutron/x/cron/types"; + +// GenesisState defines the cron module's genesis state. +message GenesisState { + repeated Schedule scheduleList = 2 [(gogoproto.nullable) = false]; + Params params = 1 [ (gogoproto.nullable) = false ]; + // this line is used by starport scaffolding # genesis/proto/state +} diff --git a/proto/cron/params.proto b/proto/cron/params.proto new file mode 100644 index 000000000..7dfad3181 --- /dev/null +++ b/proto/cron/params.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package neutron.cron; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/neutron-org/neutron/x/cron/types"; + +// Params defines the parameters for the module. +message Params { + option (gogoproto.goproto_stringer) = false; + // Security address that can remove schedules + string security_address = 1; + // Limit of schedules executed in one block + uint64 limit = 2; +} diff --git a/proto/cron/query.proto b/proto/cron/query.proto new file mode 100644 index 000000000..ae39bcd04 --- /dev/null +++ b/proto/cron/query.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; +package neutron.cron; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "cron/params.proto"; +import "cron/schedule.proto"; +// this line is used by starport scaffolding # 1 + +option go_package = "github.com/neutron-org/neutron/x/cron/types"; + +// Query defines the gRPC querier service. +service Query { + // Queries the parameters of the module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/neutron/cron/params"; + } + + // Queries a Schedule by name. + rpc Schedule(QueryGetScheduleRequest) returns (QueryGetScheduleResponse) { + option (google.api.http).get = "/neutron/cron/schedule/{name}"; + } + + // Queries a list of Schedule items. + rpc Schedules(QuerySchedulesRequest) returns (QuerySchedulesResponse) { + option (google.api.http).get = "/neutron/cron/schedule"; + } + +// this line is used by starport scaffolding # 2 +} + +message QueryParamsRequest {} + +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 [(gogoproto.nullable) = false]; +} + +message QueryGetScheduleRequest { + string name = 1; +} + +message QueryGetScheduleResponse { + Schedule schedule = 1 [(gogoproto.nullable) = false]; +} + +message QuerySchedulesRequest { + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +message QuerySchedulesResponse { + repeated Schedule schedules = 1 [(gogoproto.nullable) = false]; + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// this line is used by starport scaffolding # 3 diff --git a/proto/cron/schedule.proto b/proto/cron/schedule.proto new file mode 100644 index 000000000..d4fbd337e --- /dev/null +++ b/proto/cron/schedule.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; +package neutron.cron; + +option go_package = "github.com/neutron-org/neutron/x/cron/types"; + +import "gogoproto/gogo.proto"; + +message Schedule { + // Name of schedule + string name = 1; + // Period in blocks + uint64 period = 2; + // Msgs that will be executed every period amount of time + repeated MsgExecuteContract msgs = 3 [ (gogoproto.nullable) = false ]; + // Last execution's block height + uint64 last_execute_height = 4; +} + +message MsgExecuteContract { + // Contract is the address of the smart contract + string contract = 1; + // Msg is json encoded message to be passed to the contract + string msg = 2; +} + +message ScheduleCount { + // Count is the number of current schedules + int32 count = 1; +} diff --git a/proto/cron/tx.proto b/proto/cron/tx.proto new file mode 100644 index 000000000..79cf8b235 --- /dev/null +++ b/proto/cron/tx.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package neutron.cron; + +// this line is used by starport scaffolding # proto/tx/import + +option go_package = "github.com/neutron-org/neutron/x/cron/types"; + +// Msg defines the Msg service. +service Msg { +// this line is used by starport scaffolding # proto/tx/rpc +} + + +// this line is used by starport scaffolding # proto/tx/message diff --git a/testutil/contractmanager/network/network.go b/testutil/contractmanager/network/network.go index 9f223d25b..0643f8e0a 100644 --- a/testutil/contractmanager/network/network.go +++ b/testutil/contractmanager/network/network.go @@ -51,7 +51,7 @@ func New(t *testing.T, configs ...network.Config) *network.Network { // DefaultConfig will initialize config for the network with custom application, // genesis and single validator. All other parameters are inherited from cosmos-sdk/testutil/network.DefaultConfig func DefaultConfig() network.Config { - // app doesn't have this modules anymore, but we need them for test setup, which uses gentx and MsgCreateValidator + // app doesn't have these modules anymore, but we need them for test setup, which uses gentx and MsgCreateValidator app.ModuleBasics[genutiltypes.ModuleName] = genutil.AppModuleBasic{} app.ModuleBasics[stakingtypes.ModuleName] = staking.AppModuleBasic{} diff --git a/testutil/cron/keeper/cron.go b/testutil/cron/keeper/cron.go new file mode 100644 index 000000000..7206a15c7 --- /dev/null +++ b/testutil/cron/keeper/cron.go @@ -0,0 +1,54 @@ +package keeper + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + typesparams "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/neutron-org/neutron/x/cron/keeper" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmdb "github.com/tendermint/tm-db" +) + +func CronKeeper(t testing.TB, wasmMsgServer types.WasmMsgServer, accountKeeper types.AccountKeeper) (*keeper.Keeper, sdk.Context) { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + registry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(registry) + + paramsSubspace := typesparams.NewSubspace(cdc, + types.Amino, + storeKey, + memStoreKey, + "CronParams", + ) + k := keeper.NewKeeper( + cdc, + storeKey, + memStoreKey, + paramsSubspace, + accountKeeper, + ) + k.WasmMsgServer = wasmMsgServer + + ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) + + // Initialize params + k.SetParams(ctx, types.DefaultParams()) + + return k, ctx +} diff --git a/testutil/cron/network/network.go b/testutil/cron/network/network.go new file mode 100644 index 000000000..98b7cd5bf --- /dev/null +++ b/testutil/cron/network/network.go @@ -0,0 +1,99 @@ +package network + +import ( + "fmt" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/x/genutil" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + staking "github.com/cosmos/cosmos-sdk/x/staking" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/neutron-org/neutron/testutil/consumer" + + storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/neutron-org/neutron/app/params" + + "github.com/neutron-org/neutron/app" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + servertypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/testutil/network" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmdb "github.com/tendermint/tm-db" +) + +type ( + Network = network.Network + Config = network.Config +) + +// New creates instance with fully configured cosmos network. +// Accepts optional config, that will be used in place of the DefaultConfig() if provided. +func New(t *testing.T, configs ...network.Config) *network.Network { + if len(configs) > 1 { + panic("at most one config should be provided") + } + var cfg network.Config + if len(configs) == 0 { + cfg = DefaultConfig() + } else { + cfg = configs[0] + } + net := network.New(t, cfg) + t.Cleanup(net.Cleanup) + return net +} + +// DefaultConfig will initialize config for the network with custom application, +// genesis and single validator. All other parameters are inherited from cosmos-sdk/testutil/network.DefaultConfig +func DefaultConfig() network.Config { + // app doesn't have this modules anymore, but we need them for test setup, which uses gentx and MsgCreateValidator + app.ModuleBasics[genutiltypes.ModuleName] = genutil.AppModuleBasic{} + app.ModuleBasics[stakingtypes.ModuleName] = staking.AppModuleBasic{} + + encoding := app.MakeEncodingConfig() + return network.Config{ + Codec: encoding.Marshaler, + TxConfig: encoding.TxConfig, + LegacyAmino: encoding.Amino, + InterfaceRegistry: encoding.InterfaceRegistry, + AccountRetriever: authtypes.AccountRetriever{}, + AppConstructor: func(val network.Validator) servertypes.Application { + err := consumer.ModifyConsumerGenesis(val) + if err != nil { + panic(err) + } + + return app.New( + val.Ctx.Logger, tmdb.NewMemDB(), nil, true, map[int64]bool{}, val.Ctx.Config.RootDir, 0, + encoding, + app.GetEnabledProposals(), + simapp.EmptyAppOptions{}, + nil, + baseapp.SetPruning(storetypes.NewPruningOptionsFromString(val.AppConfig.Pruning)), + baseapp.SetMinGasPrices(val.AppConfig.MinGasPrices), + ) + }, + GenesisState: app.ModuleBasics.DefaultGenesis(encoding.Marshaler), + TimeoutCommit: 2 * time.Second, + ChainID: "chain-" + tmrand.NewRand().Str(6), + // Some changes are introduced to make the tests run as if neutron is a standalone chain. + // This will only work if NumValidators is set to 1. + NumValidators: 1, + BondDenom: params.DefaultDenom, + MinGasPrices: fmt.Sprintf("0.000006%s", params.DefaultDenom), + AccountTokens: sdk.TokensFromConsensusPower(1000, sdk.DefaultPowerReduction), + StakingTokens: sdk.TokensFromConsensusPower(500, sdk.DefaultPowerReduction), + BondedTokens: sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction), + PruningStrategy: storetypes.PruningOptionNothing, + CleanupDir: true, + SigningAlgo: string(hd.Secp256k1Type), + KeyringOptions: []keyring.Option{}, + } +} diff --git a/testutil/cron/nullify/nullify.go b/testutil/cron/nullify/nullify.go new file mode 100644 index 000000000..3b968c09c --- /dev/null +++ b/testutil/cron/nullify/nullify.go @@ -0,0 +1,57 @@ +// Package nullify provides methods to init nil values structs for test assertion. +package nullify + +import ( + "reflect" + "unsafe" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var ( + coinType = reflect.TypeOf(sdk.Coin{}) + coinsType = reflect.TypeOf(sdk.Coins{}) +) + +// Fill analyze all struct fields and slices with +// reflection and initialize the nil and empty slices, +// structs, and pointers. +func Fill(x interface{}) interface{} { + v := reflect.Indirect(reflect.ValueOf(x)) + switch v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + obj := v.Index(i) + objPt := reflect.NewAt(obj.Type(), unsafe.Pointer(obj.UnsafeAddr())).Interface() + objPt = Fill(objPt) + obj.Set(reflect.ValueOf(objPt)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + f := reflect.Indirect(v.Field(i)) + if !f.CanSet() { + continue + } + switch f.Kind() { + case reflect.Slice: + f.Set(reflect.MakeSlice(f.Type(), 0, 0)) + case reflect.Struct: + switch f.Type() { + case coinType: + coin := reflect.New(coinType).Interface() + s := reflect.ValueOf(coin).Elem() + f.Set(s) + case coinsType: + coins := reflect.New(coinsType).Interface() + s := reflect.ValueOf(coins).Elem() + f.Set(s) + default: + objPt := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Interface() + s := Fill(objPt) + f.Set(reflect.ValueOf(s)) + } + } + } + } + return reflect.Indirect(v).Interface() +} diff --git a/testutil/cron/sample/sample.go b/testutil/cron/sample/sample.go new file mode 100644 index 000000000..98f2153ed --- /dev/null +++ b/testutil/cron/sample/sample.go @@ -0,0 +1,13 @@ +package sample + +import ( + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// AccAddress returns a sample account address +func AccAddress() string { + pk := ed25519.GenPrivKey().PubKey() + addr := pk.Address() + return sdk.AccAddress(addr).String() +} diff --git a/testutil/mocks/cron/types/expected_keepers.go b/testutil/mocks/cron/types/expected_keepers.go new file mode 100644 index 000000000..ab9265ad9 --- /dev/null +++ b/testutil/mocks/cron/types/expected_keepers.go @@ -0,0 +1,89 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./../../x/cron/types/expected_keepers.go + +// Package mock_types is a generated GoMock package. +package mock_types + +import ( + context "context" + reflect "reflect" + + types "github.com/CosmWasm/wasmd/x/wasm/types" + types0 "github.com/cosmos/cosmos-sdk/types" + gomock "github.com/golang/mock/gomock" +) + +// MockAccountKeeper is a mock of AccountKeeper interface. +type MockAccountKeeper struct { + ctrl *gomock.Controller + recorder *MockAccountKeeperMockRecorder +} + +// MockAccountKeeperMockRecorder is the mock recorder for MockAccountKeeper. +type MockAccountKeeperMockRecorder struct { + mock *MockAccountKeeper +} + +// NewMockAccountKeeper creates a new mock instance. +func NewMockAccountKeeper(ctrl *gomock.Controller) *MockAccountKeeper { + mock := &MockAccountKeeper{ctrl: ctrl} + mock.recorder = &MockAccountKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAccountKeeper) EXPECT() *MockAccountKeeperMockRecorder { + return m.recorder +} + +// GetModuleAddress mocks base method. +func (m *MockAccountKeeper) GetModuleAddress(moduleName string) types0.AccAddress { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetModuleAddress", moduleName) + ret0, _ := ret[0].(types0.AccAddress) + return ret0 +} + +// GetModuleAddress indicates an expected call of GetModuleAddress. +func (mr *MockAccountKeeperMockRecorder) GetModuleAddress(moduleName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModuleAddress", reflect.TypeOf((*MockAccountKeeper)(nil).GetModuleAddress), moduleName) +} + +// MockWasmMsgServer is a mock of WasmMsgServer interface. +type MockWasmMsgServer struct { + ctrl *gomock.Controller + recorder *MockWasmMsgServerMockRecorder +} + +// MockWasmMsgServerMockRecorder is the mock recorder for MockWasmMsgServer. +type MockWasmMsgServerMockRecorder struct { + mock *MockWasmMsgServer +} + +// NewMockWasmMsgServer creates a new mock instance. +func NewMockWasmMsgServer(ctrl *gomock.Controller) *MockWasmMsgServer { + mock := &MockWasmMsgServer{ctrl: ctrl} + mock.recorder = &MockWasmMsgServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWasmMsgServer) EXPECT() *MockWasmMsgServerMockRecorder { + return m.recorder +} + +// ExecuteContract mocks base method. +func (m *MockWasmMsgServer) ExecuteContract(arg0 context.Context, arg1 *types.MsgExecuteContract) (*types.MsgExecuteContractResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteContract", arg0, arg1) + ret0, _ := ret[0].(*types.MsgExecuteContractResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteContract indicates an expected call of ExecuteContract. +func (mr *MockWasmMsgServerMockRecorder) ExecuteContract(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteContract", reflect.TypeOf((*MockWasmMsgServer)(nil).ExecuteContract), arg0, arg1) +} diff --git a/testutil/mocks/gomock.go b/testutil/mocks/gomock.go index 0f54bb7b9..b9f0de766 100644 --- a/testutil/mocks/gomock.go +++ b/testutil/mocks/gomock.go @@ -7,3 +7,4 @@ package mocks //go:generate mockgen -source=./../../x/interchaintxs/types/expected_keepers.go -destination ./interchaintxs/types/expected_keepers.go //go:generate mockgen -source=./../../x/transfer/types/expected_keepers.go -destination ./transfer/types/expected_keepers.go //go:generate mockgen -source=./../../x/feeburner/types/expected_keepers.go -destination ./feeburner/types/expected_keepers.go +//go:generate mockgen -source=./../../x/cron/types/expected_keepers.go -destination ./cron/types/expected_keepers.go diff --git a/wasmbinding/bindings/msg.go b/wasmbinding/bindings/msg.go index c6ec59ab3..381591842 100644 --- a/wasmbinding/bindings/msg.go +++ b/wasmbinding/bindings/msg.go @@ -42,6 +42,10 @@ type NeutronMsg struct { /// that they are the admin of. /// Currently, the burn from address must be the admin contract. BurnTokens *BurnTokens `json:"burn_tokens,omitempty"` + + // Cron types + AddSchedule *AddSchedule `json:"add_schedule,omitempty"` + RemoveSchedule *RemoveSchedule `json:"remove_schedule,omitempty"` } // SubmitTx submits interchain transaction on a remote chain. @@ -207,3 +211,29 @@ type BurnTokens struct { // BurnFromAddress must be set to "" for now. BurnFromAddress string `json:"burn_from_address"` } + +// AddSchedule adds new schedule to the cron module +type AddSchedule struct { + Name string `json:"name"` + Period uint64 `json:"period"` + Msgs []MsgExecuteContract `json:"msgs"` +} + +// AddScheduleResponse holds response AddSchedule +type AddScheduleResponse struct{} + +// RemoveSchedule removes existing schedule with given name +type RemoveSchedule struct { + Name string `json:"name"` +} + +// RemoveScheduleResponse holds response RemoveSchedule +type RemoveScheduleResponse struct{} + +// MsgExecuteContract defined separate from wasmtypes since we can get away with just passing the string into bindings +type MsgExecuteContract struct { + // Contract is the address of the smart contract + Contract string `json:"contract,omitempty"` + // Msg json encoded message to be passed to the contract + Msg string `json:"msg,omitempty"` +} diff --git a/wasmbinding/message_plugin.go b/wasmbinding/message_plugin.go index fb568434e..066a9e56f 100644 --- a/wasmbinding/message_plugin.go +++ b/wasmbinding/message_plugin.go @@ -4,17 +4,22 @@ import ( "encoding/json" "fmt" + crontypes "github.com/neutron-org/neutron/x/cron/types" + + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + + cronkeeper "github.com/neutron-org/neutron/x/cron/keeper" + paramChange "github.com/cosmos/cosmos-sdk/x/params/types/proposal" wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" - wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" wasmvmtypes "github.com/CosmWasm/wasmvm/types" "github.com/cosmos/cosmos-sdk/codec/types" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" softwareUpgrade "github.com/cosmos/cosmos-sdk/x/upgrade/types" - adminkeeper "github.com/cosmos/admin-module/x/adminmodule/keeper" + adminmodulekeeper "github.com/cosmos/admin-module/x/adminmodule/keeper" admintypes "github.com/cosmos/admin-module/x/adminmodule/types" bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" @@ -35,9 +40,10 @@ func CustomMessageDecorator( ictx *ictxkeeper.Keeper, icq *icqkeeper.Keeper, transferKeeper transferwrapperkeeper.KeeperTransferWrapper, - admKeeper *adminkeeper.Keeper, + adminKeeper *adminmodulekeeper.Keeper, bankKeeper *bankkeeper.BaseKeeper, tokenFactoryKeeper *tokenfactorykeeper.Keeper, + cronKeeper *cronkeeper.Keeper, ) func(messenger wasmkeeper.Messenger) wasmkeeper.Messenger { return func(old wasmkeeper.Messenger) wasmkeeper.Messenger { return &CustomMessenger{ @@ -46,9 +52,11 @@ func CustomMessageDecorator( Ictxmsgserver: ictxkeeper.NewMsgServerImpl(*ictx), Icqmsgserver: icqkeeper.NewMsgServerImpl(*icq), transferKeeper: transferKeeper, - Adminserver: adminkeeper.NewMsgServerImpl(*admKeeper), + Adminserver: adminmodulekeeper.NewMsgServerImpl(*adminKeeper), Bank: bankKeeper, TokenFactory: tokenFactoryKeeper, + CronKeeper: cronKeeper, + AdminKeeper: adminKeeper, } } } @@ -62,6 +70,8 @@ type CustomMessenger struct { Adminserver admintypes.MsgServer Bank *bankkeeper.BaseKeeper TokenFactory *tokenfactorykeeper.Keeper + CronKeeper *cronkeeper.Keeper + AdminKeeper *adminmodulekeeper.Keeper } var _ wasmkeeper.Messenger = (*CustomMessenger)(nil) @@ -111,6 +121,12 @@ func (m *CustomMessenger) DispatchMsg(ctx sdk.Context, contractAddr sdk.AccAddre if contractMsg.BurnTokens != nil { return m.burnTokens(ctx, contractAddr, contractMsg.BurnTokens) } + if contractMsg.AddSchedule != nil { + return m.addSchedule(ctx, contractAddr, contractMsg.AddSchedule) + } + if contractMsg.RemoveSchedule != nil { + return m.removeSchedule(ctx, contractAddr, contractMsg.RemoveSchedule) + } } return m.Wrapped.DispatchMsg(ctx, contractAddr, contractIBCPortID, msg) @@ -779,3 +795,78 @@ func (m *CustomMessenger) validateProposalQty(proposal *bindings.AdminProposal) return fmt.Errorf("more than one admin proposal type is present in message") } + +func (m *CustomMessenger) addSchedule(ctx sdk.Context, contractAddr sdk.AccAddress, addSchedule *bindings.AddSchedule) ([]sdk.Event, [][]byte, error) { + if !m.isAdmin(ctx, contractAddr) { + return nil, nil, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "only admin can add schedule") + } + + msgs := make([]crontypes.MsgExecuteContract, 0, len(addSchedule.Msgs)) + for _, msg := range addSchedule.Msgs { + msgs = append(msgs, crontypes.MsgExecuteContract{ + Contract: msg.Contract, + Msg: msg.Msg, + }) + } + + err := m.CronKeeper.AddSchedule(ctx, addSchedule.Name, addSchedule.Period, msgs) + if err != nil { + ctx.Logger().Error("failed to addSchedule", + "from_address", contractAddr.String(), + "error", err, + ) + return nil, nil, sdkerrors.Wrap(err, "marshal json failed") + } + + resp := bindings.AddScheduleResponse{} + data, err := json.Marshal(&resp) + if err != nil { + ctx.Logger().Error("json.Marshal: failed to marshal add schedule response to JSON", + "from_address", contractAddr.String(), + "error", err, + ) + return nil, nil, sdkerrors.Wrap(err, "marshal json failed") + } + + ctx.Logger().Debug("schedule added", + "from_address", contractAddr.String(), + "name", addSchedule.Name, + "period", addSchedule.Period, + ) + return nil, [][]byte{data}, nil +} + +func (m *CustomMessenger) removeSchedule(ctx sdk.Context, contractAddr sdk.AccAddress, removeSchedule *bindings.RemoveSchedule) ([]sdk.Event, [][]byte, error) { + params := m.CronKeeper.GetParams(ctx) + if !m.isAdmin(ctx, contractAddr) && contractAddr.String() != params.SecurityAddress { + return nil, nil, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "only admin or security dao can remove schedule") + } + + m.CronKeeper.RemoveSchedule(ctx, removeSchedule.Name) + + resp := bindings.RemoveScheduleResponse{} + data, err := json.Marshal(&resp) + if err != nil { + ctx.Logger().Error("json.Marshal: failed to marshal remove schedule response to JSON", + "from_address", contractAddr.String(), + "error", err, + ) + return nil, nil, sdkerrors.Wrap(err, "marshal json failed") + } + + ctx.Logger().Debug("schedule removed", + "from_address", contractAddr.String(), + "name", removeSchedule.Name, + ) + return nil, [][]byte{data}, nil +} + +func (m *CustomMessenger) isAdmin(ctx sdk.Context, contractAddr sdk.AccAddress) bool { + for _, admin := range m.AdminKeeper.GetAdmins(ctx) { + if admin == contractAddr.String() { + return true + } + } + + return false +} diff --git a/wasmbinding/test/custom_message_test.go b/wasmbinding/test/custom_message_test.go index 5fb1ee964..d59587c40 100644 --- a/wasmbinding/test/custom_message_test.go +++ b/wasmbinding/test/custom_message_test.go @@ -50,6 +50,8 @@ func (suite *CustomMessengerTestSuite) SetupTest() { suite.messenger.Adminserver = adminkeeper.NewMsgServerImpl(suite.neutron.AdminmoduleKeeper) suite.messenger.Bank = &suite.neutron.BankKeeper suite.messenger.TokenFactory = suite.neutron.TokenFactoryKeeper + suite.messenger.CronKeeper = &suite.neutron.CronKeeper + suite.messenger.AdminKeeper = &suite.neutron.AdminmoduleKeeper suite.contractOwner = keeper.RandomAccountAddress(suite.T()) } @@ -513,6 +515,59 @@ func (suite *CustomMessengerTestSuite) TestNoProposals() { suite.ErrorContains(err, "no admin proposal type is present in message") } +func (suite *CustomMessengerTestSuite) TestAddRemoveSchedule() { + // Store code and instantiate reflect contract + codeId := suite.StoreReflectCode(suite.ctx, suite.contractOwner, "../testdata/reflect.wasm") + suite.contractAddress = suite.InstantiateReflectContract(suite.ctx, suite.contractOwner, codeId) + suite.Require().NotEmpty(suite.contractAddress) + + // Set admin so that we can execute this proposal without permission error + suite.neutron.AdminmoduleKeeper.SetAdmin(suite.ctx, suite.contractAddress.String()) + + // Craft AddSchedule message + msg, err := json.Marshal(bindings.NeutronMsg{ + AddSchedule: &bindings.AddSchedule{ + Name: "schedule1", + Period: 5, + Msgs: []bindings.MsgExecuteContract{ + { + Contract: suite.contractAddress.String(), + Msg: "{\"send\": { \"to\": \"asdf\", \"amount\": 1000 }}", + }, + }, + }, + }) + suite.NoError(err) + + // Dispatch AddSchedule message + events, data, err := suite.messenger.DispatchMsg(suite.ctx, suite.contractAddress, suite.Path.EndpointA.ChannelConfig.PortID, types.CosmosMsg{ + Custom: msg, + }) + suite.NoError(err) + suite.Nil(events) + expected, err := json.Marshal(&bindings.AddScheduleResponse{}) + suite.NoError(err) + suite.Equal([][]uint8{expected}, data) + + // Craft RemoveSchedule message + msg, err = json.Marshal(bindings.NeutronMsg{ + RemoveSchedule: &bindings.RemoveSchedule{ + Name: "schedule1", + }, + }) + suite.NoError(err) + + // Dispatch AddSchedule message + events, data, err = suite.messenger.DispatchMsg(suite.ctx, suite.contractAddress, suite.Path.EndpointA.ChannelConfig.PortID, types.CosmosMsg{ + Custom: msg, + }) + suite.NoError(err) + suite.Nil(events) + expected, err = json.Marshal(&bindings.RemoveScheduleResponse{}) + suite.NoError(err) + suite.Equal([][]uint8{expected}, data) +} + func (suite *CustomMessengerTestSuite) executeCustomMsg(owner sdk.AccAddress, fullMsg bindings.NeutronMsg) (result [][]byte, msg []byte) { msg, err := json.Marshal(fullMsg) suite.NoError(err) diff --git a/wasmbinding/wasm.go b/wasmbinding/wasm.go index baf765efd..cb7be3872 100644 --- a/wasmbinding/wasm.go +++ b/wasmbinding/wasm.go @@ -4,10 +4,11 @@ import ( "github.com/CosmWasm/wasmd/x/wasm" wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + cronkeeper "github.com/neutron-org/neutron/x/cron/keeper" feeburnerkeeper "github.com/neutron-org/neutron/x/feeburner/keeper" feerefunderkeeper "github.com/neutron-org/neutron/x/feerefunder/keeper" - adminmodulemodulekeeper "github.com/cosmos/admin-module/x/adminmodule/keeper" + adminmodulekeeper "github.com/cosmos/admin-module/x/adminmodule/keeper" interchainqueriesmodulekeeper "github.com/neutron-org/neutron/x/interchainqueries/keeper" interchaintransactionsmodulekeeper "github.com/neutron-org/neutron/x/interchaintxs/keeper" @@ -20,11 +21,12 @@ func RegisterCustomPlugins( ictxKeeper *interchaintransactionsmodulekeeper.Keeper, icqKeeper *interchainqueriesmodulekeeper.Keeper, transfer transfer.KeeperTransferWrapper, - admKeeper *adminmodulemodulekeeper.Keeper, + adminKeeper *adminmodulekeeper.Keeper, feeBurnerKeeper *feeburnerkeeper.Keeper, feeRefunderKeeper *feerefunderkeeper.Keeper, bank *bankkeeper.BaseKeeper, tfk *tokenfactorykeeper.Keeper, + cronKeeper *cronkeeper.Keeper, ) []wasmkeeper.Option { wasmQueryPlugin := NewQueryPlugin(ictxKeeper, icqKeeper, feeBurnerKeeper, feeRefunderKeeper, tfk) @@ -32,7 +34,7 @@ func RegisterCustomPlugins( Custom: CustomQuerier(wasmQueryPlugin), }) messagePluginOpt := wasmkeeper.WithMessageHandlerDecorator( - CustomMessageDecorator(ictxKeeper, icqKeeper, transfer, admKeeper, bank, tfk), + CustomMessageDecorator(ictxKeeper, icqKeeper, transfer, adminKeeper, bank, tfk, cronKeeper), ) return []wasm.Option{ diff --git a/x/cron/client/cli/query.go b/x/cron/client/cli/query.go new file mode 100644 index 000000000..01c41d2a8 --- /dev/null +++ b/x/cron/client/cli/query.go @@ -0,0 +1,29 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/neutron-org/neutron/x/cron/types" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd(_ string) *cobra.Command { + // Group cron queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdQueryParams()) + cmd.AddCommand(CmdListSchedule()) + cmd.AddCommand(CmdShowSchedule()) + // this line is used by starport scaffolding # 1 + + return cmd +} diff --git a/x/cron/client/cli/query_params.go b/x/cron/client/cli/query_params.go new file mode 100644 index 000000000..346ed3088 --- /dev/null +++ b/x/cron/client/cli/query_params.go @@ -0,0 +1,34 @@ +package cli + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/spf13/cobra" +) + +func CmdQueryParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "shows the parameters of the module", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/cron/client/cli/query_schedule.go b/x/cron/client/cli/query_schedule.go new file mode 100644 index 000000000..ae5b3111e --- /dev/null +++ b/x/cron/client/cli/query_schedule.go @@ -0,0 +1,73 @@ +package cli + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/spf13/cobra" +) + +func CmdListSchedule() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-schedule", + Short: "list all schedule", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryClient(clientCtx) + + params := &types.QuerySchedulesRequest{ + Pagination: pageReq, + } + + res, err := queryClient.Schedules(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowSchedule() *cobra.Command { + cmd := &cobra.Command{ + Use: "show-schedule [name]", + Short: "shows a schedule", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + argName := args[0] + + params := &types.QueryGetScheduleRequest{ + Name: argName, + } + + res, err := queryClient.Schedule(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/cron/client/cli/query_schedule_test.go b/x/cron/client/cli/query_schedule_test.go new file mode 100644 index 000000000..6864b8c95 --- /dev/null +++ b/x/cron/client/cli/query_schedule_test.go @@ -0,0 +1,164 @@ +package cli_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/cosmos/cosmos-sdk/client/flags" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + "github.com/neutron-org/neutron/testutil/cron/network" + "github.com/neutron-org/neutron/testutil/cron/nullify" + "github.com/neutron-org/neutron/x/cron/client/cli" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" + tmcli "github.com/tendermint/tendermint/libs/cli" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func networkWithScheduleObjects(t *testing.T, n int) (*network.Network, []types.Schedule) { + t.Helper() + cfg := network.DefaultConfig() + state := types.GenesisState{} + require.NoError(t, cfg.Codec.UnmarshalJSON(cfg.GenesisState[types.ModuleName], &state)) + + for i := 0; i < n; i++ { + schedule := types.Schedule{ + Name: strconv.Itoa(i), + Period: 1000, + Msgs: []types.MsgExecuteContract{}, + LastExecuteHeight: uint64(0), + } + state.ScheduleList = append(state.ScheduleList, schedule) + } + state.Params = types.DefaultParams() + buf, err := cfg.Codec.MarshalJSON(&state) + require.NoError(t, err) + cfg.GenesisState[types.ModuleName] = buf + return network.New(t, cfg), state.ScheduleList +} + +func TestShowSchedule(t *testing.T) { + net, objs := networkWithScheduleObjects(t, 2) + + ctx := net.Validators[0].ClientCtx + common := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + for _, tc := range []struct { + desc string + name string + + args []string + err error + obj types.Schedule + }{ + { + desc: "found", + name: objs[0].Name, + + args: common, + obj: objs[0], + err: nil, + }, + { + desc: "not found", + name: strconv.Itoa(100000), + + args: common, + err: status.Error(codes.NotFound, "not found"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + args := []string{ + tc.name, + } + args = append(args, tc.args...) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowSchedule(), args) + if tc.err != nil { + stat, ok := status.FromError(tc.err) + require.True(t, ok) + require.ErrorIs(t, stat.Err(), tc.err) + } else { + require.NoError(t, err) + var resp types.QueryGetScheduleResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NotNil(t, resp.Schedule) + require.Equal(t, + nullify.Fill(&tc.obj), + nullify.Fill(&resp.Schedule), + ) + } + }) + } +} + +func TestListSchedule(t *testing.T) { + net, objs := networkWithScheduleObjects(t, 5) + + ctx := net.Validators[0].ClientCtx + request := func(next []byte, offset, limit uint64, total bool) []string { + args := []string{ + fmt.Sprintf("--%s=json", tmcli.OutputFlag), + } + if next == nil { + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset)) + } else { + args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next)) + } + args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit)) + if total { + args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal)) + } + return args + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(objs); i += step { + args := request(nil, uint64(i), uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListSchedule(), args) + require.NoError(t, err) + var resp types.QuerySchedulesResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Schedules), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Schedules), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(objs); i += step { + args := request(next, 0, uint64(step), false) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListSchedule(), args) + require.NoError(t, err) + var resp types.QuerySchedulesResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.LessOrEqual(t, len(resp.Schedules), step) + require.Subset(t, + nullify.Fill(objs), + nullify.Fill(resp.Schedules), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + args := request(nil, 0, uint64(len(objs)), true) + out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListSchedule(), args) + require.NoError(t, err) + var resp types.QuerySchedulesResponse + require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp)) + require.NoError(t, err) + require.Equal(t, len(objs), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(objs), + nullify.Fill(resp.Schedules), + ) + }) +} diff --git a/x/cron/client/cli/tx.go b/x/cron/client/cli/tx.go new file mode 100644 index 000000000..d29429dc1 --- /dev/null +++ b/x/cron/client/cli/tx.go @@ -0,0 +1,25 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/neutron-org/neutron/x/cron/types" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + // this line is used by starport scaffolding # 1 + + return cmd +} diff --git a/x/cron/genesis.go b/x/cron/genesis.go new file mode 100644 index 000000000..5b45b1ecf --- /dev/null +++ b/x/cron/genesis.go @@ -0,0 +1,31 @@ +package cron + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/neutron-org/neutron/x/cron/keeper" + "github.com/neutron-org/neutron/x/cron/types" +) + +// InitGenesis initializes the module's state from a provided genesis state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + // Set all the schedules + for _, elem := range genState.ScheduleList { + err := k.AddSchedule(ctx, elem.Name, elem.Period, elem.Msgs) + if err != nil { + panic(err) + } + } + // this line is used by starport scaffolding # genesis/module/init + k.SetParams(ctx, genState.Params) +} + +// ExportGenesis returns the module's exported genesis +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + genesis.Params = k.GetParams(ctx) + genesis.ScheduleList = k.GetAllSchedules(ctx) + + // this line is used by starport scaffolding # genesis/module/export + + return genesis +} diff --git a/x/cron/genesis_test.go b/x/cron/genesis_test.go new file mode 100644 index 000000000..fbfe980cb --- /dev/null +++ b/x/cron/genesis_test.go @@ -0,0 +1,39 @@ +package cron_test + +import ( + "testing" + + "github.com/neutron-org/neutron/testutil/cron/keeper" + "github.com/neutron-org/neutron/testutil/cron/nullify" + "github.com/neutron-org/neutron/x/cron" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" +) + +func TestGenesis(t *testing.T) { + k, ctx := keeper.CronKeeper(t, nil, nil) + + genesisState := types.GenesisState{ + Params: types.DefaultParams(), + ScheduleList: []types.Schedule{ + { + Name: "a", + Period: 5, + Msgs: nil, + LastExecuteHeight: uint64(ctx.BlockHeight()), + }, + }, + // this line is used by starport scaffolding # genesis/test/state + } + + cron.InitGenesis(ctx, *k, genesisState) + got := cron.ExportGenesis(ctx, *k) + require.NotNil(t, got) + + nullify.Fill(&genesisState) + nullify.Fill(got) + + require.Equal(t, genesisState.Params, got.Params) + require.ElementsMatch(t, genesisState.ScheduleList, got.ScheduleList) + // this line is used by starport scaffolding # genesis/test/assert +} diff --git a/x/cron/keeper/grpc_query.go b/x/cron/keeper/grpc_query.go new file mode 100644 index 000000000..078f1c730 --- /dev/null +++ b/x/cron/keeper/grpc_query.go @@ -0,0 +1,7 @@ +package keeper + +import ( + "github.com/neutron-org/neutron/x/cron/types" +) + +var _ types.QueryServer = Keeper{} diff --git a/x/cron/keeper/grpc_query_params.go b/x/cron/keeper/grpc_query_params.go new file mode 100644 index 000000000..ca3842241 --- /dev/null +++ b/x/cron/keeper/grpc_query_params.go @@ -0,0 +1,19 @@ +package keeper + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/neutron-org/neutron/x/cron/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil +} diff --git a/x/cron/keeper/grpc_query_params_test.go b/x/cron/keeper/grpc_query_params_test.go new file mode 100644 index 000000000..a2db2836a --- /dev/null +++ b/x/cron/keeper/grpc_query_params_test.go @@ -0,0 +1,22 @@ +package keeper_test + +import ( + "testing" + + testkeeper "github.com/neutron-org/neutron/testutil/cron/keeper" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" +) + +func TestParamsQuery(t *testing.T) { + keeper, ctx := testkeeper.CronKeeper(t, nil, nil) + wctx := sdk.WrapSDKContext(ctx) + params := types.DefaultParams() + keeper.SetParams(ctx, params) + + response, err := keeper.Params(wctx, &types.QueryParamsRequest{}) + require.NoError(t, err) + require.Equal(t, &types.QueryParamsResponse{Params: params}, response) +} diff --git a/x/cron/keeper/grpc_query_schedule.go b/x/cron/keeper/grpc_query_schedule.go new file mode 100644 index 000000000..46bf14fe5 --- /dev/null +++ b/x/cron/keeper/grpc_query_schedule.go @@ -0,0 +1,53 @@ +package keeper + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/neutron-org/neutron/x/cron/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Schedules(c context.Context, req *types.QuerySchedulesRequest) (*types.QuerySchedulesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + var schedules []types.Schedule + ctx := sdk.UnwrapSDKContext(c) + + scheduleStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) + + pageRes, err := query.Paginate(scheduleStore, req.Pagination, func(key []byte, value []byte) error { + var schedule types.Schedule + k.cdc.MustUnmarshal(value, &schedule) + + schedules = append(schedules, schedule) + return nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QuerySchedulesResponse{Schedules: schedules, Pagination: pageRes}, nil +} + +func (k Keeper) Schedule(c context.Context, req *types.QueryGetScheduleRequest) (*types.QueryGetScheduleResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + val, found := k.GetSchedule( + ctx, + req.Name, + ) + if !found { + return nil, status.Error(codes.NotFound, "schedule not found") + } + + return &types.QueryGetScheduleResponse{Schedule: *val}, nil +} diff --git a/x/cron/keeper/grpc_query_schedule_test.go b/x/cron/keeper/grpc_query_schedule_test.go new file mode 100644 index 000000000..fbaa08701 --- /dev/null +++ b/x/cron/keeper/grpc_query_schedule_test.go @@ -0,0 +1,145 @@ +package keeper_test + +import ( + "strconv" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + testutil_keeper "github.com/neutron-org/neutron/testutil/cron/keeper" + "github.com/neutron-org/neutron/testutil/cron/nullify" + cronkeeper "github.com/neutron-org/neutron/x/cron/keeper" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Prevent strconv unused error +var _ = strconv.IntSize + +func TestScheduleQuerySingle(t *testing.T) { + k, ctx := testutil_keeper.CronKeeper(t, nil, nil) + wctx := sdk.WrapSDKContext(ctx) + schedules := createNSchedule(t, ctx, k, 2) + + for _, tc := range []struct { + desc string + request *types.QueryGetScheduleRequest + response *types.QueryGetScheduleResponse + err error + }{ + { + desc: "First", + request: &types.QueryGetScheduleRequest{ + Name: schedules[0].Name, + }, + response: &types.QueryGetScheduleResponse{Schedule: schedules[0]}, + }, + { + desc: "Second", + request: &types.QueryGetScheduleRequest{ + Name: schedules[1].Name, + }, + response: &types.QueryGetScheduleResponse{Schedule: schedules[1]}, + }, + { + desc: "KeyIsAbsent", + request: &types.QueryGetScheduleRequest{ + Name: "absent_key", + }, + err: status.Error(codes.NotFound, "schedule not found"), + }, + { + desc: "InvalidRequest", + err: status.Error(codes.InvalidArgument, "invalid request"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + response, err := k.Schedule(wctx, tc.request) + if tc.err != nil { + require.ErrorIs(t, err, tc.err) + } else { + require.NoError(t, err) + require.Equal(t, + nullify.Fill(tc.response), + nullify.Fill(response), + ) + } + }) + } +} + +func TestScheduleQueryPaginated(t *testing.T) { + k, ctx := testutil_keeper.CronKeeper(t, nil, nil) + wctx := sdk.WrapSDKContext(ctx) + schedules := createNSchedule(t, ctx, k, 5) + + request := func(next []byte, offset, limit uint64, total bool) *types.QuerySchedulesRequest { + return &types.QuerySchedulesRequest{ + Pagination: &query.PageRequest{ + Key: next, + Offset: offset, + Limit: limit, + CountTotal: total, + }, + } + } + t.Run("ByOffset", func(t *testing.T) { + step := 2 + for i := 0; i < len(schedules); i += step { + resp, err := k.Schedules(wctx, request(nil, uint64(i), uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Schedules), step) + require.Subset(t, + nullify.Fill(schedules), + nullify.Fill(resp.Schedules), + ) + } + }) + t.Run("ByKey", func(t *testing.T) { + step := 2 + var next []byte + for i := 0; i < len(schedules); i += step { + resp, err := k.Schedules(wctx, request(next, 0, uint64(step), false)) + require.NoError(t, err) + require.LessOrEqual(t, len(resp.Schedules), step) + require.Subset(t, + nullify.Fill(schedules), + nullify.Fill(resp.Schedules), + ) + next = resp.Pagination.NextKey + } + }) + t.Run("Total", func(t *testing.T) { + resp, err := k.Schedules(wctx, request(nil, 0, 0, true)) + require.NoError(t, err) + require.Equal(t, len(schedules), int(resp.Pagination.Total)) + require.ElementsMatch(t, + nullify.Fill(schedules), + nullify.Fill(resp.Schedules), + ) + }) + t.Run("InvalidRequest", func(t *testing.T) { + _, err := k.Schedules(wctx, nil) + require.ErrorIs(t, err, status.Error(codes.InvalidArgument, "invalid request")) + }) +} + +func createNSchedule(t *testing.T, ctx sdk.Context, k *cronkeeper.Keeper, n int32) []types.Schedule { + res := make([]types.Schedule, n) + + for idx, item := range res { + item.Name = strconv.Itoa(idx) + item.Period = 1000 + item.Msgs = nil + item.LastExecuteHeight = uint64(ctx.BlockHeight()) + + err := k.AddSchedule(ctx, item.Name, item.Period, item.Msgs) + require.NoError(t, err) + + res[idx] = item + } + + return res +} diff --git a/x/cron/keeper/keeper.go b/x/cron/keeper/keeper.go new file mode 100644 index 000000000..81c5b41ed --- /dev/null +++ b/x/cron/keeper/keeper.go @@ -0,0 +1,257 @@ +package keeper + +import ( + "fmt" + "strconv" + "time" + + "github.com/armon/go-metrics" + "github.com/cosmos/cosmos-sdk/telemetry" + + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/tendermint/tendermint/libs/log" +) + +var ( + LabelExecuteReadySchedules = "execute_ready_schedules" + LabelScheduleCount = "schedule_count" + LabelScheduleExecutionsCount = "schedule_executions_count" + + MetricLabelSuccess = "success" + MetricLabelScheduleName = "schedule_name" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + paramstore paramtypes.Subspace + accountKeeper types.AccountKeeper + WasmMsgServer types.WasmMsgServer + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey, + memKey storetypes.StoreKey, + ps paramtypes.Subspace, + accountKeeper types.AccountKeeper, +) *Keeper { + // set KeyTable if it has not already been set + if !ps.HasKeyTable() { + ps = ps.WithKeyTable(types.ParamKeyTable()) + } + + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + paramstore: ps, + accountKeeper: accountKeeper, + } +} + +func (k *Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +// ExecuteReadySchedules gets all schedules that are due for execution (with limit that is equal to Params.Limit) +// and executes messages in each one +// NOTE that errors in contract calls rollback all already executed messages +func (k *Keeper) ExecuteReadySchedules(ctx sdk.Context) { + telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), LabelExecuteReadySchedules) + schedules := k.getSchedulesReadyForExecution(ctx) + + for _, schedule := range schedules { + err := k.executeSchedule(ctx, schedule) + recordExecutedSchedule(err, schedule) + } +} + +// AddSchedule adds new schedule to execution for every block `period`. +// First schedule execution is supposed to be on `now + period` block. +func (k *Keeper) AddSchedule(ctx sdk.Context, name string, period uint64, msgs []types.MsgExecuteContract) error { + if k.scheduleExists(ctx, name) { + return fmt.Errorf("schedule already exists with name=%v", name) + } + + schedule := types.Schedule{ + Name: name, + Period: period, + Msgs: msgs, + LastExecuteHeight: uint64(ctx.BlockHeight()), // let's execute newly added schedule on `now + period` block + } + k.storeSchedule(ctx, schedule) + k.changeTotalCount(ctx, 1) + + return nil +} + +// RemoveSchedule removes schedule with a given `name` +func (k *Keeper) RemoveSchedule(ctx sdk.Context, name string) { + if !k.scheduleExists(ctx, name) { + return + } + + k.changeTotalCount(ctx, -1) + k.removeSchedule(ctx, name) +} + +// GetSchedule returns schedule with a given `name` +func (k *Keeper) GetSchedule(ctx sdk.Context, name string) (*types.Schedule, bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) + bzSchedule := store.Get(types.GetScheduleKey(name)) + if bzSchedule == nil { + return nil, false + } + + var schedule types.Schedule + k.cdc.MustUnmarshal(bzSchedule, &schedule) + return &schedule, true +} + +// GetAllSchedules returns all schedules +func (k *Keeper) GetAllSchedules(ctx sdk.Context) []types.Schedule { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) + + res := make([]types.Schedule, 0) + + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var schedule types.Schedule + k.cdc.MustUnmarshal(iterator.Value(), &schedule) + res = append(res, schedule) + } + + return res +} + +func (k *Keeper) GetScheduleCount(ctx sdk.Context) int32 { + return k.getScheduleCount(ctx) +} + +func (k *Keeper) getSchedulesReadyForExecution(ctx sdk.Context) []types.Schedule { + params := k.GetParams(ctx) + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) + count := uint64(0) + + res := make([]types.Schedule, 0) + + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var schedule types.Schedule + k.cdc.MustUnmarshal(iterator.Value(), &schedule) + + if k.intervalPassed(ctx, schedule) { + res = append(res, schedule) + count++ + + if count >= params.Limit { + k.Logger(ctx).Info("limit of schedule executions per block reached") + return res + } + } + } + + return res +} + +// executeSchedule executes all msgs in a given schedule and changes LastExecuteHeight +// if at least one msg execution fails, rollback all messages +func (k *Keeper) executeSchedule(ctx sdk.Context, schedule types.Schedule) error { + // Even if contract execution returned an error, we still increase the height + // and execute it after this interval + schedule.LastExecuteHeight = uint64(ctx.BlockHeight()) + k.storeSchedule(ctx, schedule) + + cacheCtx, writeFn := ctx.CacheContext() + + for idx, msg := range schedule.Msgs { + executeMsg := wasmtypes.MsgExecuteContract{ + Sender: k.accountKeeper.GetModuleAddress(types.ModuleName).String(), + Contract: msg.Contract, + Msg: []byte(msg.Msg), + Funds: sdk.NewCoins(), + } + _, err := k.WasmMsgServer.ExecuteContract(sdk.WrapSDKContext(cacheCtx), &executeMsg) + if err != nil { + ctx.Logger().Info("executeSchedule: failed to execute contract msg", + "schedule_name", schedule.Name, + "msg_idx", idx, + "msg_contract", msg.Contract, + "msg", msg.Msg, + "error", err, + ) + return err + } + } + + // only save state if all the messages in a schedule were executed successfully + writeFn() + return nil +} + +func (k *Keeper) storeSchedule(ctx sdk.Context, schedule types.Schedule) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) + + bzSchedule := k.cdc.MustMarshal(&schedule) + store.Set(types.GetScheduleKey(schedule.Name), bzSchedule) +} + +func (k *Keeper) removeSchedule(ctx sdk.Context, name string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) + + store.Delete(types.GetScheduleKey(name)) +} + +func (k *Keeper) scheduleExists(ctx sdk.Context, name string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ScheduleKey) + return store.Has(types.GetScheduleKey(name)) +} + +func (k *Keeper) intervalPassed(ctx sdk.Context, schedule types.Schedule) bool { + return uint64(ctx.BlockHeight()) > (schedule.LastExecuteHeight + schedule.Period) +} + +func (k *Keeper) changeTotalCount(ctx sdk.Context, incrementAmount int32) { + store := ctx.KVStore(k.storeKey) + count := k.getScheduleCount(ctx) + newCount := types.ScheduleCount{Count: count + incrementAmount} + bzCount := k.cdc.MustMarshal(&newCount) + store.Set(types.ScheduleCountKey, bzCount) + + telemetry.ModuleSetGauge(types.ModuleName, float32(newCount.Count), LabelScheduleCount) +} + +func (k *Keeper) getScheduleCount(ctx sdk.Context) int32 { + store := ctx.KVStore(k.storeKey) + bzCount := store.Get(types.ScheduleCountKey) + if bzCount == nil { + return 0 + } + + var count types.ScheduleCount + k.cdc.MustUnmarshal(bzCount, &count) + return count.Count +} + +func recordExecutedSchedule(err error, schedule types.Schedule) { + telemetry.IncrCounterWithLabels([]string{LabelScheduleExecutionsCount}, 1, []metrics.Label{ + telemetry.NewLabel(telemetry.MetricLabelNameModule, types.ModuleName), + telemetry.NewLabel(MetricLabelSuccess, strconv.FormatBool(err == nil)), + telemetry.NewLabel(MetricLabelScheduleName, schedule.Name), + }) +} diff --git a/x/cron/keeper/keeper_test.go b/x/cron/keeper/keeper_test.go new file mode 100644 index 000000000..74f900985 --- /dev/null +++ b/x/cron/keeper/keeper_test.go @@ -0,0 +1,231 @@ +package keeper_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/golang/mock/gomock" + "github.com/neutron-org/neutron/testutil" + testutil_keeper "github.com/neutron-org/neutron/testutil/cron/keeper" + mock_types "github.com/neutron-org/neutron/testutil/mocks/cron/types" + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" +) + +// ExecuteReadySchedules: +// - calls msgServer.execute() on ready schedules +// - updates ready schedules lastExecuteHeight +// - does not update lastExecuteHeight of unready schedules +// - does not go over the limit +func TestKeeperExecuteReadySchedules(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + accountKeeper := mock_types.NewMockAccountKeeper(ctrl) + addr, err := sdk.AccAddressFromBech32(testutil.TestOwnerAddress) + require.NoError(t, err) + + wasmMsgServer := mock_types.NewMockWasmMsgServer(ctrl) + k, ctx := testutil_keeper.CronKeeper(t, wasmMsgServer, accountKeeper) + ctx = ctx.WithBlockHeight(0) + + k.SetParams(ctx, types.Params{ + SecurityAddress: testutil.TestOwnerAddress, + Limit: 2, + }) + + schedules := []types.Schedule{ + { + Name: "1_unready1", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "1_neutron", + Msg: "1_msg", + }, + }, + LastExecuteHeight: 4, + }, + { + Name: "2_ready1", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "2_neutron", + Msg: "2_msg", + }, + }, + LastExecuteHeight: 0, + }, + { + Name: "3_ready2", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "3_neutron", + Msg: "3_msg", + }, + }, + LastExecuteHeight: 0, + }, + { + Name: "4_unready2", + Period: 3, + Msgs: []types.MsgExecuteContract{}, + LastExecuteHeight: 4, + }, + { + Name: "5_ready3", + Period: 3, + Msgs: []types.MsgExecuteContract{ + { + Contract: "5_neutron", + Msg: "5_msg", + }, + }, + LastExecuteHeight: 0, + }, + } + + for _, item := range schedules { + ctx = ctx.WithBlockHeight(int64(item.LastExecuteHeight)) + err := k.AddSchedule(ctx, item.Name, item.Period, item.Msgs) + require.NoError(t, err) + } + + count := k.GetScheduleCount(ctx) + require.Equal(t, count, int32(5)) + + ctx = ctx.WithBlockHeight(5) + + accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(addr) + accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(addr) + wasmMsgServer.EXPECT().ExecuteContract(gomock.Any(), &wasmtypes.MsgExecuteContract{ + Sender: testutil.TestOwnerAddress, + Contract: "2_neutron", + Msg: []byte("2_msg"), + Funds: sdk.NewCoins(), + }).Return(nil, fmt.Errorf("executeerror")) + wasmMsgServer.EXPECT().ExecuteContract(gomock.Any(), &wasmtypes.MsgExecuteContract{ + Sender: testutil.TestOwnerAddress, + Contract: "3_neutron", + Msg: []byte("3_msg"), + Funds: sdk.NewCoins(), + }).Return(&wasmtypes.MsgExecuteContractResponse{}, nil) + + k.ExecuteReadySchedules(ctx) + + unready1, _ := k.GetSchedule(ctx, "1_unready1") + ready1, _ := k.GetSchedule(ctx, "2_ready1") + ready2, _ := k.GetSchedule(ctx, "3_ready2") + unready2, _ := k.GetSchedule(ctx, "4_unready2") + ready3, _ := k.GetSchedule(ctx, "5_ready3") + + require.Equal(t, uint64(4), unready1.LastExecuteHeight) + require.Equal(t, uint64(5), ready1.LastExecuteHeight) + require.Equal(t, uint64(5), ready2.LastExecuteHeight) + require.Equal(t, uint64(4), unready2.LastExecuteHeight) + require.Equal(t, uint64(0), ready3.LastExecuteHeight) + + // let's make another call at the next height + // Notice that now only one ready schedule left because we got limit of 2 at once + ctx = ctx.WithBlockHeight(6) + + accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(addr) + wasmMsgServer.EXPECT().ExecuteContract(gomock.Any(), &wasmtypes.MsgExecuteContract{ + Sender: testutil.TestOwnerAddress, + Contract: "5_neutron", + Msg: []byte("5_msg"), + Funds: sdk.NewCoins(), + }).Return(&wasmtypes.MsgExecuteContractResponse{}, nil) + + k.ExecuteReadySchedules(ctx) + + unready1, _ = k.GetSchedule(ctx, "1_unready1") + ready1, _ = k.GetSchedule(ctx, "2_ready1") + ready2, _ = k.GetSchedule(ctx, "3_ready2") + unready2, _ = k.GetSchedule(ctx, "4_unready2") + ready3, _ = k.GetSchedule(ctx, "5_ready3") + + require.Equal(t, uint64(4), unready1.LastExecuteHeight) + require.Equal(t, uint64(5), ready1.LastExecuteHeight) + require.Equal(t, uint64(5), ready2.LastExecuteHeight) + require.Equal(t, uint64(4), unready2.LastExecuteHeight) + require.Equal(t, uint64(6), ready3.LastExecuteHeight) +} + +func TestAddSchedule(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + accountKeeper := mock_types.NewMockAccountKeeper(ctrl) + + wasmMsgServer := mock_types.NewMockWasmMsgServer(ctrl) + k, ctx := testutil_keeper.CronKeeper(t, wasmMsgServer, accountKeeper) + ctx = ctx.WithBlockHeight(0) + + k.SetParams(ctx, types.Params{ + SecurityAddress: testutil.TestOwnerAddress, + Limit: 2, + }) + + // normal add schedule + err := k.AddSchedule(ctx, "a", 7, []types.MsgExecuteContract{ + { + Contract: "c", + Msg: "m", + }, + }) + require.NoError(t, err) + + // second time with same name returns error + err = k.AddSchedule(ctx, "a", 5, []types.MsgExecuteContract{}) + require.Error(t, err) + + scheduleA, found := k.GetSchedule(ctx, "a") + require.True(t, found) + require.Equal(t, scheduleA.Name, "a") + require.Equal(t, scheduleA.Period, uint64(7)) + require.Equal(t, scheduleA.Msgs, []types.MsgExecuteContract{ + {Contract: "c", Msg: "m"}, + }) + + // remove schedule works + k.RemoveSchedule(ctx, "a") + _, found = k.GetSchedule(ctx, "a") + assert.False(t, found) + + // does not panic even though we don't have it + k.RemoveSchedule(ctx, "a") +} + +func TestGetAllSchedules(t *testing.T) { + k, ctx := testutil_keeper.CronKeeper(t, nil, nil) + + k.SetParams(ctx, types.Params{ + SecurityAddress: testutil.TestOwnerAddress, + Limit: 2, + }) + expectedSchedules := make([]types.Schedule, 0, 3) + for i := range []int{1, 2, 3} { + s := types.Schedule{ + Name: strconv.Itoa(i), + Period: 5, + Msgs: nil, + LastExecuteHeight: uint64(ctx.BlockHeight()), + } + expectedSchedules = append(expectedSchedules, s) + err := k.AddSchedule(ctx, s.Name, s.Period, s.Msgs) + require.NoError(t, err) + } + + schedules := k.GetAllSchedules(ctx) + assert.Equal(t, 3, len(schedules)) + assert.ElementsMatch(t, schedules, expectedSchedules) + assert.Equal(t, int32(3), k.GetScheduleCount(ctx)) +} diff --git a/x/cron/keeper/params.go b/x/cron/keeper/params.go new file mode 100644 index 000000000..d04211c1b --- /dev/null +++ b/x/cron/keeper/params.go @@ -0,0 +1,18 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/neutron-org/neutron/x/cron/types" +) + +// GetParams get all parameters as types.Params +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + k.paramstore.GetParamSet(ctx, ¶ms) + + return params +} + +// SetParams set the params +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramstore.SetParamSet(ctx, ¶ms) +} diff --git a/x/cron/keeper/params_test.go b/x/cron/keeper/params_test.go new file mode 100644 index 000000000..5b1ce5a15 --- /dev/null +++ b/x/cron/keeper/params_test.go @@ -0,0 +1,28 @@ +package keeper_test + +import ( + "testing" + + "github.com/neutron-org/neutron/testutil" + + "github.com/neutron-org/neutron/app" + + testkeeper "github.com/neutron-org/neutron/testutil/cron/keeper" + + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" +) + +func TestGetParams(t *testing.T) { + _ = app.GetDefaultConfig() + + k, ctx := testkeeper.CronKeeper(t, nil, nil) + params := types.Params{ + SecurityAddress: testutil.TestOwnerAddress, + Limit: 5, + } + + k.SetParams(ctx, params) + + require.EqualValues(t, params, k.GetParams(ctx)) +} diff --git a/x/cron/module.go b/x/cron/module.go new file mode 100644 index 000000000..3df0560bf --- /dev/null +++ b/x/cron/module.go @@ -0,0 +1,160 @@ +package cron + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/gorilla/mux" + + // this line is used by starport scaffolding # 1 + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/neutron-org/neutron/x/cron/client/cli" + "github.com/neutron-org/neutron/x/cron/keeper" + "github.com/neutron-org/neutron/x/cron/types" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterRESTRoutes registers the capability module's REST service handlers. +func (AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) { +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd(types.StoreKey) +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + keeper *keeper.Keeper +} + +func NewAppModule( + cdc codec.Codec, + keeper *keeper.Keeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, *am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, *am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + am.keeper.ExecuteReadySchedules(ctx) + return []abci.ValidatorUpdate{} +} diff --git a/x/cron/module_simulation.go b/x/cron/module_simulation.go new file mode 100644 index 000000000..f1ed766f3 --- /dev/null +++ b/x/cron/module_simulation.go @@ -0,0 +1,57 @@ +package cron + +import ( + "math/rand" + + "github.com/cosmos/cosmos-sdk/baseapp" + simappparams "github.com/cosmos/cosmos-sdk/simapp/params" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/simulation" + cronsimulation "github.com/neutron-org/neutron/x/cron/simulation" + "github.com/neutron-org/neutron/x/cron/types" +) + +// avoid unused import issue +var ( + _ = cronsimulation.FindAccount + _ = simappparams.StakePerAccount + _ = simulation.MsgEntryKind + _ = baseapp.Paramspace +) + +const ( +// this line is used by starport scaffolding # simapp/module/const +) + +// GenerateGenesisState creates a randomized GenState of the module +func (AppModule) GenerateGenesisState(simState *module.SimulationState) { + accs := make([]string, len(simState.Accounts)) + for i, acc := range simState.Accounts { + accs[i] = acc.Address.String() + } + cronGenesis := types.GenesisState{ + Params: types.DefaultParams(), + } + simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&cronGenesis) +} + +// ProposalContents doesn't return any content functions for governance proposals +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { + return nil +} + +// RandomizedParams creates randomized param changes for the simulator +func (am AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { + return []simtypes.ParamChange{} +} + +// RegisterStoreDecoder registers a decoder +func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} + +// WeightedOperations returns the all the gov module operations with their respective weights. +func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { + operations := make([]simtypes.WeightedOperation, 0) + return operations +} diff --git a/x/cron/simulation/helpers.go b/x/cron/simulation/helpers.go new file mode 100644 index 000000000..92c437c0d --- /dev/null +++ b/x/cron/simulation/helpers.go @@ -0,0 +1,15 @@ +package simulation + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" +) + +// FindAccount find a specific address from an account list +func FindAccount(accs []simtypes.Account, address string) (simtypes.Account, bool) { + creator, err := sdk.AccAddressFromBech32(address) + if err != nil { + panic(err) + } + return simtypes.FindAccount(accs, creator) +} diff --git a/x/cron/types/codec.go b/x/cron/types/codec.go new file mode 100644 index 000000000..5c9dcb6db --- /dev/null +++ b/x/cron/types/codec.go @@ -0,0 +1,24 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +func RegisterCodec(_ *codec.LegacyAmino) { + // this line is used by starport scaffolding # 2 +} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil)) + // this line is used by starport scaffolding # 3 + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/cron/types/errors.go b/x/cron/types/errors.go new file mode 100644 index 000000000..35382cbcd --- /dev/null +++ b/x/cron/types/errors.go @@ -0,0 +1,12 @@ +package types + +// DONTCOVER + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// x/cron module sentinel errors +var ( + ErrSample = sdkerrors.Register(ModuleName, 1100, "sample error") +) diff --git a/x/cron/types/expected_keepers.go b/x/cron/types/expected_keepers.go new file mode 100644 index 000000000..cbed63584 --- /dev/null +++ b/x/cron/types/expected_keepers.go @@ -0,0 +1,19 @@ +package types + +import ( + "context" + + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// AccountKeeper defines the expected account keeper used for simulations (noalias) +type AccountKeeper interface { + GetModuleAddress(moduleName string) sdk.AccAddress + // Methods imported from account should be defined here +} + +type WasmMsgServer interface { + ExecuteContract(context.Context, *wasmtypes.MsgExecuteContract) (*wasmtypes.MsgExecuteContractResponse, error) + // Methods imported from account should be defined here +} diff --git a/x/cron/types/genesis.go b/x/cron/types/genesis.go new file mode 100644 index 000000000..7835e0aa6 --- /dev/null +++ b/x/cron/types/genesis.go @@ -0,0 +1,30 @@ +package types + +import "fmt" + +// DefaultGenesis returns the default genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + ScheduleList: []Schedule{}, + // this line is used by starport scaffolding # genesis/types/default + Params: DefaultParams(), + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + // Check for duplicated index in schedule + scheduleIndexMap := make(map[string]struct{}) + + for _, elem := range gs.ScheduleList { + index := string(GetScheduleKey(elem.Name)) + if _, ok := scheduleIndexMap[index]; ok { + return fmt.Errorf("duplicated index for schedule") + } + scheduleIndexMap[index] = struct{}{} + } + // this line is used by starport scaffolding # genesis/types/validate + + return gs.Params.Validate() +} diff --git a/x/cron/types/genesis.pb.go b/x/cron/types/genesis.pb.go new file mode 100644 index 000000000..eeccc651b --- /dev/null +++ b/x/cron/types/genesis.pb.go @@ -0,0 +1,385 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cron/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the cron module's genesis state. +type GenesisState struct { + ScheduleList []Schedule `protobuf:"bytes,2,rep,name=scheduleList,proto3" json:"scheduleList"` + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_0c58acd1c2bcdf4f, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetScheduleList() []Schedule { + if m != nil { + return m.ScheduleList + } + return nil +} + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "neutron.cron.GenesisState") +} + +func init() { proto.RegisterFile("cron/genesis.proto", fileDescriptor_0c58acd1c2bcdf4f) } + +var fileDescriptor_0c58acd1c2bcdf4f = []byte{ + // 228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0x2e, 0xca, 0xcf, + 0xd3, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, + 0xc9, 0x4b, 0x2d, 0x2d, 0x29, 0xca, 0xcf, 0xd3, 0x03, 0xc9, 0x49, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, + 0x83, 0x25, 0xf4, 0x41, 0x2c, 0x88, 0x1a, 0x29, 0x41, 0xb0, 0xbe, 0x82, 0xc4, 0xa2, 0xc4, 0x5c, + 0xa8, 0x36, 0x29, 0x61, 0xb0, 0x50, 0x71, 0x72, 0x46, 0x6a, 0x4a, 0x69, 0x4e, 0x2a, 0x44, 0x50, + 0xa9, 0x85, 0x91, 0x8b, 0xc7, 0x1d, 0x62, 0x7a, 0x70, 0x49, 0x62, 0x49, 0xaa, 0x90, 0x03, 0x17, + 0x0f, 0x4c, 0x89, 0x4f, 0x66, 0x71, 0x89, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x98, 0x1e, + 0xb2, 0x9d, 0x7a, 0xc1, 0x50, 0x15, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0xa1, 0xe8, 0x10, + 0x32, 0xe2, 0x62, 0x83, 0xd8, 0x2b, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0x82, 0xaa, 0x37, + 0x00, 0x2c, 0x07, 0xd5, 0x09, 0x55, 0xe9, 0xe4, 0x7a, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, + 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, + 0x72, 0x0c, 0x51, 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x50, + 0x73, 0x74, 0xf3, 0x8b, 0xd2, 0x61, 0x6c, 0xfd, 0x0a, 0x7d, 0xb0, 0xb7, 0x4a, 0x2a, 0x0b, 0x52, + 0x8b, 0x93, 0xd8, 0xc0, 0x9e, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x57, 0xaa, 0x3f, 0x21, + 0x36, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ScheduleList) > 0 { + for iNdEx := len(m.ScheduleList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScheduleList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.ScheduleList) > 0 { + for _, e := range m.ScheduleList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduleList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScheduleList = append(m.ScheduleList, Schedule{}) + if err := m.ScheduleList[len(m.ScheduleList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/cron/types/genesis_test.go b/x/cron/types/genesis_test.go new file mode 100644 index 000000000..98da89fa4 --- /dev/null +++ b/x/cron/types/genesis_test.go @@ -0,0 +1,53 @@ +package types_test + +import ( + "testing" + + "github.com/neutron-org/neutron/app" + + "github.com/neutron-org/neutron/x/cron/types" + "github.com/stretchr/testify/require" +) + +func TestGenesisState_Validate(t *testing.T) { + app.GetDefaultConfig() + + for _, tc := range []struct { + desc string + genState *types.GenesisState + valid bool + }{ + { + desc: "valid genesis state", + genState: &types.GenesisState{ + Params: types.Params{ + SecurityAddress: "neutron17dtl0mjt3t77kpuhg2edqzjpszulwhgzcdvagh", + Limit: 1, + }, + // this line is used by starport scaffolding # types/genesis/validField + }, + valid: true, + }, + { + desc: "invalid genesis state - params are invalid", + genState: &types.GenesisState{ + Params: types.Params{ + SecurityAddress: "", + Limit: 0, + }, + // this line is used by starport scaffolding # types/genesis/validField + }, + valid: false, + }, + // this line is used by starport scaffolding # types/genesis/testcase + } { + t.Run(tc.desc, func(t *testing.T) { + err := tc.genState.Validate() + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/x/cron/types/keys.go b/x/cron/types/keys.go new file mode 100644 index 000000000..e9608a032 --- /dev/null +++ b/x/cron/types/keys.go @@ -0,0 +1,29 @@ +package types + +const ( + // ModuleName defines the module name + ModuleName = "cron" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey defines the module's message routing key + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_cron" +) + +const ( + prefixScheduleKey = iota + 1 + prefixScheduleCountKey +) + +var ( + ScheduleKey = []byte{prefixScheduleKey} + ScheduleCountKey = []byte{prefixScheduleCountKey} +) + +func GetScheduleKey(name string) []byte { + return []byte(name) +} diff --git a/x/cron/types/params.go b/x/cron/types/params.go new file mode 100644 index 000000000..2ad7cf24c --- /dev/null +++ b/x/cron/types/params.go @@ -0,0 +1,106 @@ +package types + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + "gopkg.in/yaml.v2" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +var ( + KeySecurityAddress = []byte("SecurityAddress") + KeyLimit = []byte("Limit") + + DefaultSecurityAddress = "" + DefaultLimit = uint64(5) +) + +// ParamKeyTable the param key table for launch module +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new Params instance +func NewParams(securityAddress string, limit uint64) Params { + return Params{ + SecurityAddress: securityAddress, + Limit: limit, + } +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams(DefaultSecurityAddress, DefaultLimit) +} + +// ParamSetPairs get the params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair( + KeySecurityAddress, + &p.SecurityAddress, + validateAddress, + ), + paramtypes.NewParamSetPair( + KeyLimit, + &p.Limit, + validateLimit, + ), + } +} + +// Validate validates the set of params +func (p Params) Validate() error { + err := validateAddress(p.SecurityAddress) + if err != nil { + return fmt.Errorf("invalid security address: %w", err) + } + + err = validateLimit(p.Limit) + if err != nil { + return fmt.Errorf("invalid limit: %w", err) + } + + return nil +} + +// String implements the Stringer interface. +func (p Params) String() string { + out, _ := yaml.Marshal(p) + return string(out) +} + +func validateAddress(i interface{}) error { + v, ok := i.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + // address might be explicitly empty in test environments + if len(v) == 0 { + return nil + } + + _, err := sdk.AccAddressFromBech32(v) + if err != nil { + return fmt.Errorf("invalid address: %w", err) + } + + return nil +} + +func validateLimit(i interface{}) error { + l, ok := i.(uint64) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if l == 0 { + return fmt.Errorf("limit cannot be zero") + } + + return nil +} diff --git a/x/cron/types/params.pb.go b/x/cron/types/params.pb.go new file mode 100644 index 000000000..54321ca1e --- /dev/null +++ b/x/cron/types/params.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cron/params.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the parameters for the module. +type Params struct { + // Security address that can remove schedules + SecurityAddress string `protobuf:"bytes,1,opt,name=security_address,json=securityAddress,proto3" json:"security_address,omitempty"` + // Limit of schedules executed in one block + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (m *Params) Reset() { *m = Params{} } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_1c8bf79b449ebe44, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetSecurityAddress() string { + if m != nil { + return m.SecurityAddress + } + return "" +} + +func (m *Params) GetLimit() uint64 { + if m != nil { + return m.Limit + } + return 0 +} + +func init() { + proto.RegisterType((*Params)(nil), "neutron.cron.Params") +} + +func init() { proto.RegisterFile("cron/params.proto", fileDescriptor_1c8bf79b449ebe44) } + +var fileDescriptor_1c8bf79b449ebe44 = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2e, 0xca, 0xcf, + 0xd3, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xc9, + 0x4b, 0x2d, 0x2d, 0x29, 0xca, 0xcf, 0xd3, 0x03, 0x49, 0x49, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, + 0x25, 0xf4, 0x41, 0x2c, 0x88, 0x1a, 0x25, 0x7f, 0x2e, 0xb6, 0x00, 0xb0, 0x1e, 0x21, 0x4d, 0x2e, + 0x81, 0xe2, 0xd4, 0xe4, 0xd2, 0xa2, 0xcc, 0x92, 0xca, 0xf8, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, + 0x62, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x7e, 0x98, 0xb8, 0x23, 0x44, 0x58, 0x48, 0x84, + 0x8b, 0x35, 0x27, 0x33, 0x37, 0xb3, 0x44, 0x82, 0x49, 0x81, 0x51, 0x83, 0x25, 0x08, 0xc2, 0xb1, + 0x62, 0x99, 0xb1, 0x40, 0x9e, 0xc1, 0xc9, 0xf5, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, + 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, + 0x18, 0xa2, 0xb4, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xa1, 0x2e, + 0xd3, 0xcd, 0x2f, 0x4a, 0x87, 0xb1, 0xf5, 0x2b, 0xf4, 0xc1, 0x5e, 0x28, 0xa9, 0x2c, 0x48, 0x2d, + 0x4e, 0x62, 0x03, 0x3b, 0xcf, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x94, 0x8d, 0x0a, 0xb1, 0xd7, + 0x00, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limit != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if len(m.SecurityAddress) > 0 { + i -= len(m.SecurityAddress) + copy(dAtA[i:], m.SecurityAddress) + i = encodeVarintParams(dAtA, i, uint64(len(m.SecurityAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecurityAddress) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovParams(uint64(m.Limit)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecurityAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/cron/types/query.pb.go b/x/cron/types/query.pb.go new file mode 100644 index 000000000..8d554a34f --- /dev/null +++ b/x/cron/types/query.pb.go @@ -0,0 +1,1385 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cron/query.proto + +package types + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aa4b81a2a4395683, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aa4b81a2a4395683, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +type QueryGetScheduleRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *QueryGetScheduleRequest) Reset() { *m = QueryGetScheduleRequest{} } +func (m *QueryGetScheduleRequest) String() string { return proto.CompactTextString(m) } +func (*QueryGetScheduleRequest) ProtoMessage() {} +func (*QueryGetScheduleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aa4b81a2a4395683, []int{2} +} +func (m *QueryGetScheduleRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetScheduleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetScheduleRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetScheduleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetScheduleRequest.Merge(m, src) +} +func (m *QueryGetScheduleRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryGetScheduleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetScheduleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGetScheduleRequest proto.InternalMessageInfo + +func (m *QueryGetScheduleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type QueryGetScheduleResponse struct { + Schedule Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule"` +} + +func (m *QueryGetScheduleResponse) Reset() { *m = QueryGetScheduleResponse{} } +func (m *QueryGetScheduleResponse) String() string { return proto.CompactTextString(m) } +func (*QueryGetScheduleResponse) ProtoMessage() {} +func (*QueryGetScheduleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aa4b81a2a4395683, []int{3} +} +func (m *QueryGetScheduleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGetScheduleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGetScheduleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGetScheduleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGetScheduleResponse.Merge(m, src) +} +func (m *QueryGetScheduleResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryGetScheduleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGetScheduleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGetScheduleResponse proto.InternalMessageInfo + +func (m *QueryGetScheduleResponse) GetSchedule() Schedule { + if m != nil { + return m.Schedule + } + return Schedule{} +} + +type QuerySchedulesRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QuerySchedulesRequest) Reset() { *m = QuerySchedulesRequest{} } +func (m *QuerySchedulesRequest) String() string { return proto.CompactTextString(m) } +func (*QuerySchedulesRequest) ProtoMessage() {} +func (*QuerySchedulesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aa4b81a2a4395683, []int{4} +} +func (m *QuerySchedulesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuerySchedulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuerySchedulesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuerySchedulesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuerySchedulesRequest.Merge(m, src) +} +func (m *QuerySchedulesRequest) XXX_Size() int { + return m.Size() +} +func (m *QuerySchedulesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QuerySchedulesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QuerySchedulesRequest proto.InternalMessageInfo + +func (m *QuerySchedulesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +type QuerySchedulesResponse struct { + Schedules []Schedule `protobuf:"bytes,1,rep,name=schedules,proto3" json:"schedules"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QuerySchedulesResponse) Reset() { *m = QuerySchedulesResponse{} } +func (m *QuerySchedulesResponse) String() string { return proto.CompactTextString(m) } +func (*QuerySchedulesResponse) ProtoMessage() {} +func (*QuerySchedulesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aa4b81a2a4395683, []int{5} +} +func (m *QuerySchedulesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuerySchedulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuerySchedulesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuerySchedulesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuerySchedulesResponse.Merge(m, src) +} +func (m *QuerySchedulesResponse) XXX_Size() int { + return m.Size() +} +func (m *QuerySchedulesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QuerySchedulesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QuerySchedulesResponse proto.InternalMessageInfo + +func (m *QuerySchedulesResponse) GetSchedules() []Schedule { + if m != nil { + return m.Schedules + } + return nil +} + +func (m *QuerySchedulesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "neutron.cron.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "neutron.cron.QueryParamsResponse") + proto.RegisterType((*QueryGetScheduleRequest)(nil), "neutron.cron.QueryGetScheduleRequest") + proto.RegisterType((*QueryGetScheduleResponse)(nil), "neutron.cron.QueryGetScheduleResponse") + proto.RegisterType((*QuerySchedulesRequest)(nil), "neutron.cron.QuerySchedulesRequest") + proto.RegisterType((*QuerySchedulesResponse)(nil), "neutron.cron.QuerySchedulesResponse") +} + +func init() { proto.RegisterFile("cron/query.proto", fileDescriptor_aa4b81a2a4395683) } + +var fileDescriptor_aa4b81a2a4395683 = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xc7, 0xb3, 0xb1, 0x86, 0xe6, 0xe9, 0x41, 0x5f, 0x63, 0x0c, 0x4b, 0xdd, 0xd6, 0xd5, 0x56, + 0x51, 0x3a, 0x43, 0xe3, 0x45, 0x3c, 0x16, 0xb4, 0x78, 0xab, 0xd1, 0x93, 0x17, 0x99, 0xc4, 0x61, + 0x1b, 0x6c, 0x66, 0xb6, 0x3b, 0xb3, 0xc5, 0x22, 0x82, 0xf8, 0x17, 0x08, 0x9e, 0xfd, 0x7f, 0x7a, + 0x2c, 0x78, 0xf1, 0x24, 0x92, 0x78, 0xf7, 0x5f, 0x90, 0x9d, 0x1f, 0xe9, 0x6e, 0x13, 0x92, 0xdb, + 0x32, 0xf3, 0x7d, 0xdf, 0xef, 0x67, 0xde, 0x7b, 0x0b, 0x37, 0x06, 0x99, 0x14, 0xf4, 0x38, 0xe7, + 0xd9, 0x29, 0x49, 0x33, 0xa9, 0x25, 0x5e, 0x17, 0x3c, 0xd7, 0x99, 0x14, 0xa4, 0xb8, 0x09, 0x5b, + 0x89, 0x4c, 0xa4, 0xb9, 0xa0, 0xc5, 0x97, 0xd5, 0x84, 0xeb, 0x89, 0x94, 0xc9, 0x11, 0xa7, 0x2c, + 0x1d, 0x52, 0x26, 0x84, 0xd4, 0x4c, 0x0f, 0xa5, 0x50, 0xee, 0xf6, 0xd1, 0x40, 0xaa, 0x91, 0x54, + 0xb4, 0xcf, 0x14, 0xb7, 0xd6, 0xf4, 0x64, 0xb7, 0xcf, 0x35, 0xdb, 0xa5, 0x29, 0x4b, 0x86, 0xc2, + 0x88, 0x9d, 0xf6, 0xa6, 0xc9, 0x4f, 0x59, 0xc6, 0x46, 0xbe, 0x7c, 0xcd, 0x1c, 0xa9, 0xc1, 0x21, + 0x7f, 0x9f, 0x1f, 0x71, 0x7b, 0x18, 0xb7, 0x00, 0x5f, 0x15, 0x4e, 0x07, 0x46, 0xd9, 0xe3, 0xc7, + 0x39, 0x57, 0x3a, 0x7e, 0x09, 0x6b, 0x95, 0x53, 0x95, 0x4a, 0xa1, 0x38, 0x76, 0xa1, 0x61, 0x1d, + 0x3b, 0xc1, 0x66, 0xf0, 0xf0, 0x5a, 0xb7, 0x45, 0xca, 0x6f, 0x22, 0x56, 0xbd, 0xb7, 0x72, 0xf6, + 0x7b, 0xa3, 0xd6, 0x73, 0xca, 0x78, 0x07, 0x6e, 0x1b, 0xab, 0x7d, 0xae, 0x5f, 0xbb, 0x68, 0x97, + 0x82, 0x08, 0x2b, 0x82, 0x8d, 0xb8, 0x31, 0x6b, 0xf6, 0xcc, 0x77, 0xfc, 0x06, 0x3a, 0xb3, 0x72, + 0x17, 0xff, 0x14, 0x56, 0x3d, 0xbd, 0x03, 0x68, 0x57, 0x01, 0x7c, 0x85, 0x43, 0x98, 0xaa, 0xe3, + 0x77, 0x70, 0xcb, 0xb8, 0x7a, 0x81, 0x7f, 0x28, 0xbe, 0x00, 0xb8, 0x68, 0x9d, 0x33, 0xdd, 0x26, + 0xb6, 0xcf, 0xa4, 0xe8, 0x33, 0xb1, 0x23, 0x74, 0x7d, 0x26, 0x07, 0x2c, 0xf1, 0xf8, 0xbd, 0x52, + 0x65, 0xfc, 0x23, 0x80, 0xf6, 0xe5, 0x04, 0x47, 0xfd, 0x0c, 0x9a, 0x9e, 0xa3, 0xe8, 0xdb, 0x95, + 0xa5, 0xd8, 0x17, 0x72, 0xdc, 0xaf, 0xe0, 0xd5, 0x0d, 0xde, 0x83, 0xa5, 0x78, 0x36, 0xb8, 0xcc, + 0xd7, 0xfd, 0x57, 0x87, 0xab, 0x86, 0x0f, 0x3f, 0x40, 0xc3, 0xce, 0x09, 0x37, 0xab, 0x14, 0xb3, + 0x6b, 0x10, 0xde, 0x5d, 0xa0, 0xb0, 0x21, 0xf1, 0xfa, 0xd7, 0x9f, 0x7f, 0xbf, 0xd7, 0xdb, 0xd8, + 0xa2, 0x4e, 0x4a, 0x4b, 0x8b, 0x87, 0x5f, 0x02, 0x58, 0xf5, 0xaf, 0xc3, 0xad, 0x39, 0x6e, 0xb3, + 0x5b, 0x11, 0x6e, 0x2f, 0x93, 0xb9, 0xe4, 0x2d, 0x93, 0xbc, 0x81, 0x77, 0xaa, 0xc9, 0xbe, 0x79, + 0xf4, 0x53, 0xb1, 0x4f, 0x9f, 0xf1, 0x04, 0x9a, 0xd3, 0x99, 0xe0, 0xbd, 0x39, 0xde, 0x97, 0x77, + 0x22, 0xbc, 0xbf, 0x58, 0xe4, 0xe2, 0x23, 0x13, 0xdf, 0xc1, 0xf6, 0xfc, 0xf8, 0xbd, 0xe7, 0x67, + 0xe3, 0x28, 0x38, 0x1f, 0x47, 0xc1, 0x9f, 0x71, 0x14, 0x7c, 0x9b, 0x44, 0xb5, 0xf3, 0x49, 0x54, + 0xfb, 0x35, 0x89, 0x6a, 0x6f, 0x1f, 0x27, 0x43, 0x7d, 0x98, 0xf7, 0xc9, 0x40, 0x8e, 0x7c, 0xed, + 0x8e, 0xcc, 0x92, 0xa9, 0xcf, 0x47, 0xeb, 0xa4, 0x4f, 0x53, 0xae, 0xfa, 0x0d, 0xf3, 0x9b, 0x3e, + 0xf9, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x50, 0x66, 0xfc, 0x50, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) + // Queries a Schedule by name. + Schedule(ctx context.Context, in *QueryGetScheduleRequest, opts ...grpc.CallOption) (*QueryGetScheduleResponse, error) + // Queries a list of Schedule items. + Schedules(ctx context.Context, in *QuerySchedulesRequest, opts ...grpc.CallOption) (*QuerySchedulesResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/neutron.cron.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Schedule(ctx context.Context, in *QueryGetScheduleRequest, opts ...grpc.CallOption) (*QueryGetScheduleResponse, error) { + out := new(QueryGetScheduleResponse) + err := c.cc.Invoke(ctx, "/neutron.cron.Query/Schedule", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Schedules(ctx context.Context, in *QuerySchedulesRequest, opts ...grpc.CallOption) (*QuerySchedulesResponse, error) { + out := new(QuerySchedulesResponse) + err := c.cc.Invoke(ctx, "/neutron.cron.Query/Schedules", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) + // Queries a Schedule by name. + Schedule(context.Context, *QueryGetScheduleRequest) (*QueryGetScheduleResponse, error) + // Queries a list of Schedule items. + Schedules(context.Context, *QuerySchedulesRequest) (*QuerySchedulesResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} +func (*UnimplementedQueryServer) Schedule(ctx context.Context, req *QueryGetScheduleRequest) (*QueryGetScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Schedule not implemented") +} +func (*UnimplementedQueryServer) Schedules(ctx context.Context, req *QuerySchedulesRequest) (*QuerySchedulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Schedules not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/neutron.cron.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Schedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryGetScheduleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Schedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/neutron.cron.Query/Schedule", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Schedule(ctx, req.(*QueryGetScheduleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Schedules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QuerySchedulesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Schedules(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/neutron.cron.Query/Schedules", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Schedules(ctx, req.(*QuerySchedulesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "neutron.cron.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + { + MethodName: "Schedule", + Handler: _Query_Schedule_Handler, + }, + { + MethodName: "Schedules", + Handler: _Query_Schedules_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cron/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryGetScheduleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetScheduleRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetScheduleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryGetScheduleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGetScheduleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGetScheduleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Schedule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QuerySchedulesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuerySchedulesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuerySchedulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuerySchedulesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuerySchedulesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuerySchedulesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Schedules) > 0 { + for iNdEx := len(m.Schedules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Schedules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryGetScheduleRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryGetScheduleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Schedule.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QuerySchedulesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QuerySchedulesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Schedules) > 0 { + for _, e := range m.Schedules { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetScheduleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetScheduleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetScheduleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGetScheduleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGetScheduleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGetScheduleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Schedule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuerySchedulesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuerySchedulesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuerySchedulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuerySchedulesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuerySchedulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuerySchedulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedules = append(m.Schedules, Schedule{}) + if err := m.Schedules[len(m.Schedules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/cron/types/query.pb.gw.go b/x/cron/types/query.pb.gw.go new file mode 100644 index 000000000..19145fd03 --- /dev/null +++ b/x/cron/types/query.pb.gw.go @@ -0,0 +1,337 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: cron/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Schedule_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetScheduleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + msg, err := client.Schedule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Schedule_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGetScheduleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + msg, err := server.Schedule(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Schedules_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Schedules_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QuerySchedulesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Schedules_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Schedules(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Schedules_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QuerySchedulesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Schedules_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Schedules(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Schedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Schedule_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Schedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Schedules_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Schedules_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Schedules_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Schedule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Schedule_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Schedule_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Schedules_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Schedules_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Schedules_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"neutron", "cron", "params"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Schedule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"neutron", "cron", "schedule", "name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Schedules_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"neutron", "cron", "schedule"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage + + forward_Query_Schedule_0 = runtime.ForwardResponseMessage + + forward_Query_Schedules_0 = runtime.ForwardResponseMessage +) diff --git a/x/cron/types/schedule.pb.go b/x/cron/types/schedule.pb.go new file mode 100644 index 000000000..187fcad1d --- /dev/null +++ b/x/cron/types/schedule.pb.go @@ -0,0 +1,838 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cron/schedule.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Schedule struct { + // Name of schedule + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Period in blocks + Period uint64 `protobuf:"varint,2,opt,name=period,proto3" json:"period,omitempty"` + // Msgs that will be executed every period amount of time + Msgs []MsgExecuteContract `protobuf:"bytes,3,rep,name=msgs,proto3" json:"msgs"` + // Last execution's block height + LastExecuteHeight uint64 `protobuf:"varint,4,opt,name=last_execute_height,json=lastExecuteHeight,proto3" json:"last_execute_height,omitempty"` +} + +func (m *Schedule) Reset() { *m = Schedule{} } +func (m *Schedule) String() string { return proto.CompactTextString(m) } +func (*Schedule) ProtoMessage() {} +func (*Schedule) Descriptor() ([]byte, []int) { + return fileDescriptor_b6a5569ec6e2056c, []int{0} +} +func (m *Schedule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Schedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Schedule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Schedule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schedule.Merge(m, src) +} +func (m *Schedule) XXX_Size() int { + return m.Size() +} +func (m *Schedule) XXX_DiscardUnknown() { + xxx_messageInfo_Schedule.DiscardUnknown(m) +} + +var xxx_messageInfo_Schedule proto.InternalMessageInfo + +func (m *Schedule) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Schedule) GetPeriod() uint64 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *Schedule) GetMsgs() []MsgExecuteContract { + if m != nil { + return m.Msgs + } + return nil +} + +func (m *Schedule) GetLastExecuteHeight() uint64 { + if m != nil { + return m.LastExecuteHeight + } + return 0 +} + +type MsgExecuteContract struct { + // Contract is the address of the smart contract + Contract string `protobuf:"bytes,1,opt,name=contract,proto3" json:"contract,omitempty"` + // Msg is json encoded message to be passed to the contract + Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *MsgExecuteContract) Reset() { *m = MsgExecuteContract{} } +func (m *MsgExecuteContract) String() string { return proto.CompactTextString(m) } +func (*MsgExecuteContract) ProtoMessage() {} +func (*MsgExecuteContract) Descriptor() ([]byte, []int) { + return fileDescriptor_b6a5569ec6e2056c, []int{1} +} +func (m *MsgExecuteContract) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgExecuteContract) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgExecuteContract.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgExecuteContract) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgExecuteContract.Merge(m, src) +} +func (m *MsgExecuteContract) XXX_Size() int { + return m.Size() +} +func (m *MsgExecuteContract) XXX_DiscardUnknown() { + xxx_messageInfo_MsgExecuteContract.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgExecuteContract proto.InternalMessageInfo + +func (m *MsgExecuteContract) GetContract() string { + if m != nil { + return m.Contract + } + return "" +} + +func (m *MsgExecuteContract) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +type ScheduleCount struct { + // Count is the number of current schedules + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *ScheduleCount) Reset() { *m = ScheduleCount{} } +func (m *ScheduleCount) String() string { return proto.CompactTextString(m) } +func (*ScheduleCount) ProtoMessage() {} +func (*ScheduleCount) Descriptor() ([]byte, []int) { + return fileDescriptor_b6a5569ec6e2056c, []int{2} +} +func (m *ScheduleCount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScheduleCount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScheduleCount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ScheduleCount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScheduleCount.Merge(m, src) +} +func (m *ScheduleCount) XXX_Size() int { + return m.Size() +} +func (m *ScheduleCount) XXX_DiscardUnknown() { + xxx_messageInfo_ScheduleCount.DiscardUnknown(m) +} + +var xxx_messageInfo_ScheduleCount proto.InternalMessageInfo + +func (m *ScheduleCount) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +func init() { + proto.RegisterType((*Schedule)(nil), "neutron.cron.Schedule") + proto.RegisterType((*MsgExecuteContract)(nil), "neutron.cron.MsgExecuteContract") + proto.RegisterType((*ScheduleCount)(nil), "neutron.cron.ScheduleCount") +} + +func init() { proto.RegisterFile("cron/schedule.proto", fileDescriptor_b6a5569ec6e2056c) } + +var fileDescriptor_b6a5569ec6e2056c = []byte{ + // 304 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x41, 0x4b, 0xc3, 0x30, + 0x14, 0xc7, 0x1b, 0xd7, 0x8d, 0x2d, 0x2a, 0x68, 0x36, 0xa4, 0xec, 0x10, 0xcb, 0x40, 0x18, 0x88, + 0x29, 0xe8, 0xcd, 0x63, 0xc7, 0xc0, 0x8b, 0x97, 0x7a, 0xf3, 0x32, 0xba, 0x2c, 0xa4, 0x85, 0x35, + 0x29, 0x49, 0x0a, 0xf3, 0x5b, 0xf8, 0x19, 0xfc, 0x34, 0x3b, 0xee, 0xe8, 0x49, 0xa4, 0xfd, 0x22, + 0xd2, 0x34, 0x13, 0xc1, 0xdb, 0xef, 0xcf, 0xff, 0xfd, 0xf3, 0xde, 0xcb, 0x83, 0x63, 0xaa, 0xa4, + 0x88, 0x34, 0xcd, 0xd8, 0xa6, 0xda, 0x32, 0x52, 0x2a, 0x69, 0x24, 0x3a, 0x13, 0xac, 0x32, 0x4a, + 0x0a, 0xd2, 0x9a, 0xd3, 0x09, 0x97, 0x5c, 0x5a, 0x23, 0x6a, 0xa9, 0xab, 0x99, 0x7d, 0x00, 0x38, + 0x7c, 0x71, 0x31, 0x84, 0xa0, 0x2f, 0xd2, 0x82, 0x05, 0x20, 0x04, 0xf3, 0x51, 0x62, 0x19, 0x5d, + 0xc1, 0x41, 0xc9, 0x54, 0x2e, 0x37, 0xc1, 0x49, 0x08, 0xe6, 0x7e, 0xe2, 0x14, 0x7a, 0x84, 0x7e, + 0xa1, 0xb9, 0x0e, 0x7a, 0x61, 0x6f, 0x7e, 0x7a, 0x1f, 0x92, 0xbf, 0xbd, 0xc8, 0xb3, 0xe6, 0xcb, + 0x1d, 0xa3, 0x95, 0x61, 0x0b, 0x29, 0x8c, 0x4a, 0xa9, 0x89, 0xfd, 0xfd, 0xd7, 0xb5, 0x97, 0xd8, + 0x0c, 0x22, 0x70, 0xbc, 0x4d, 0xb5, 0x59, 0xb1, 0xae, 0x66, 0x95, 0xb1, 0x9c, 0x67, 0x26, 0xf0, + 0x6d, 0x83, 0xcb, 0xd6, 0x72, 0xe9, 0x27, 0x6b, 0xcc, 0x62, 0x88, 0xfe, 0xbf, 0x88, 0xa6, 0x70, + 0x48, 0x1d, 0xbb, 0x89, 0x7f, 0x35, 0xba, 0x80, 0xbd, 0x42, 0x73, 0x3b, 0xf2, 0x28, 0x69, 0x71, + 0x76, 0x03, 0xcf, 0x8f, 0x7b, 0x2e, 0x64, 0x25, 0x0c, 0x9a, 0xc0, 0x3e, 0x6d, 0xc1, 0x66, 0xfb, + 0x49, 0x27, 0xe2, 0xe5, 0xbe, 0xc6, 0xe0, 0x50, 0x63, 0xf0, 0x5d, 0x63, 0xf0, 0xde, 0x60, 0xef, + 0xd0, 0x60, 0xef, 0xb3, 0xc1, 0xde, 0xeb, 0x2d, 0xcf, 0x4d, 0x56, 0xad, 0x09, 0x95, 0x45, 0xe4, + 0x96, 0xbd, 0x93, 0x8a, 0x1f, 0x39, 0xda, 0x45, 0xf6, 0x06, 0xe6, 0xad, 0x64, 0x7a, 0x3d, 0xb0, + 0xbf, 0xfb, 0xf0, 0x13, 0x00, 0x00, 0xff, 0xff, 0x56, 0x9a, 0x42, 0xbf, 0x98, 0x01, 0x00, 0x00, +} + +func (m *Schedule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Schedule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Schedule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastExecuteHeight != 0 { + i = encodeVarintSchedule(dAtA, i, uint64(m.LastExecuteHeight)) + i-- + dAtA[i] = 0x20 + } + if len(m.Msgs) > 0 { + for iNdEx := len(m.Msgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Msgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSchedule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Period != 0 { + i = encodeVarintSchedule(dAtA, i, uint64(m.Period)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSchedule(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgExecuteContract) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgExecuteContract) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgExecuteContract) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarintSchedule(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0x12 + } + if len(m.Contract) > 0 { + i -= len(m.Contract) + copy(dAtA[i:], m.Contract) + i = encodeVarintSchedule(dAtA, i, uint64(len(m.Contract))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScheduleCount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScheduleCount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScheduleCount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != 0 { + i = encodeVarintSchedule(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintSchedule(dAtA []byte, offset int, v uint64) int { + offset -= sovSchedule(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Schedule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchedule(uint64(l)) + } + if m.Period != 0 { + n += 1 + sovSchedule(uint64(m.Period)) + } + if len(m.Msgs) > 0 { + for _, e := range m.Msgs { + l = e.Size() + n += 1 + l + sovSchedule(uint64(l)) + } + } + if m.LastExecuteHeight != 0 { + n += 1 + sovSchedule(uint64(m.LastExecuteHeight)) + } + return n +} + +func (m *MsgExecuteContract) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Contract) + if l > 0 { + n += 1 + l + sovSchedule(uint64(l)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovSchedule(uint64(l)) + } + return n +} + +func (m *ScheduleCount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != 0 { + n += 1 + sovSchedule(uint64(m.Count)) + } + return n +} + +func sovSchedule(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSchedule(x uint64) (n int) { + return sovSchedule(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Schedule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Schedule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Schedule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + m.Period = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Period |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msgs = append(m.Msgs, MsgExecuteContract{}) + if err := m.Msgs[len(m.Msgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastExecuteHeight", wireType) + } + m.LastExecuteHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastExecuteHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedule(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSchedule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgExecuteContract) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgExecuteContract: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgExecuteContract: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Contract", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Contract = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSchedule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedule(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSchedule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduleCount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduleCount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduleCount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedule(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSchedule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSchedule(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSchedule + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSchedule + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSchedule + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSchedule = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSchedule = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSchedule = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/cron/types/tx.pb.go b/x/cron/types/tx.pb.go new file mode 100644 index 000000000..09f13e1c6 --- /dev/null +++ b/x/cron/types/tx.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cron/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("cron/tx.proto", fileDescriptor_389b00cea0e301f3) } + +var fileDescriptor_389b00cea0e301f3 = []byte{ + // 120 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x2e, 0xca, 0xcf, + 0xd3, 0x2f, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xc9, 0x4b, 0x2d, 0x2d, 0x29, + 0xca, 0xcf, 0xd3, 0x03, 0x09, 0x1b, 0xb1, 0x72, 0x31, 0xfb, 0x16, 0xa7, 0x3b, 0xb9, 0x9e, 0x78, + 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, + 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x76, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, + 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x54, 0xa7, 0x6e, 0x7e, 0x51, 0x3a, 0x8c, 0xad, 0x5f, 0xa1, 0x0f, + 0x31, 0xbe, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x6c, 0x85, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, + 0xe7, 0xee, 0x4e, 0x1d, 0x73, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "neutron.cron.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: "cron/tx.proto", +} diff --git a/x/cron/types/types.go b/x/cron/types/types.go new file mode 100644 index 000000000..ab1254f4c --- /dev/null +++ b/x/cron/types/types.go @@ -0,0 +1 @@ +package types diff --git a/x/feerefunder/keeper/params.go b/x/feerefunder/keeper/params.go index f35216952..2a8b2f3d2 100644 --- a/x/feerefunder/keeper/params.go +++ b/x/feerefunder/keeper/params.go @@ -9,6 +9,7 @@ import ( // GetParams get all parameters as types.Params func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { k.paramstore.GetParamSet(ctx, ¶ms) + return params } diff --git a/x/tokenfactory/types/params.go b/x/tokenfactory/types/params.go index 92551f2e6..355c0638f 100644 --- a/x/tokenfactory/types/params.go +++ b/x/tokenfactory/types/params.go @@ -1,6 +1,7 @@ package types import ( + sdk "github.com/cosmos/cosmos-sdk/types" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" ) @@ -9,7 +10,7 @@ func ParamKeyTable() paramtypes.KeyTable { return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) } -func NewParams() Params { +func NewParams(_ sdk.Coins) Params { return Params{} }