diff --git a/bigdecimal/init_test.go b/bigdecimal/init_test.go deleted file mode 100644 index 48bf45e9b..000000000 --- a/bigdecimal/init_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package bigdecimal - -import "github.com/streamingfast/logging" - -func init() { - logging.InstantiateLoggers() -} diff --git a/client/logging.go b/client/logging.go index 6a99fb0fa..c247d6412 100644 --- a/client/logging.go +++ b/client/logging.go @@ -4,4 +4,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, tracer = logging.PackageLogger("substreams-clients", "github.com/streamingfast/substreams/client") +var zlog, _ = logging.PackageLogger("substreams-clients", "github.com/streamingfast/substreams/client") diff --git a/cmd/substreams/flags.go b/cmd/substreams/flags.go deleted file mode 100644 index 3963e970e..000000000 --- a/cmd/substreams/flags.go +++ /dev/null @@ -1,117 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func init() { - cobra.OnInitialize(func() { - autoBind(rootCmd, "SUBSTREAMS") - }) -} - -func autoBind(root *cobra.Command, prefix string) { - recurseCommands(root, prefix, nil) // []string{strings.ToLower(prefix)}) how does it wweeeerrkk? -} - -func recurseCommands(root *cobra.Command, prefix string, segments []string) { - var segmentPrefix string - if len(segments) > 0 { - segmentPrefix = strings.ToUpper(strings.Join(segments, "_")) + "_" - } - - root.PersistentFlags().VisitAll(func(f *pflag.Flag) { - newName := strings.Replace(strings.ToUpper(f.Name), "-", "_", -1) - varName := prefix + "_" + segmentPrefix + "GLOBAL_" + newName - if val := os.Getenv(varName); val != "" { - f.Usage += " [LOADED FROM ENV]" // Until we have a better template for our usage. - if !f.Changed { - f.Value.Set(val) - } - } - }) - - root.Flags().VisitAll(func(f *pflag.Flag) { - newName := strings.Replace(strings.ToUpper(f.Name), "-", "_", -1) - varName := prefix + "_" + segmentPrefix + "CMD_" + newName - if val := os.Getenv(varName); val != "" { - f.Usage += " [LOADED FROM ENV]" - if !f.Changed { - f.Value.Set(val) - } - } - }) - - for _, cmd := range root.Commands() { - recurseCommands(cmd, prefix, append(segments, cmd.Name())) - } -} - -func mustGetString(cmd *cobra.Command, flagName string) string { - val, err := cmd.Flags().GetString(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} - -func mustGetStringArray(cmd *cobra.Command, flagName string) []string { - val, err := cmd.Flags().GetStringArray(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} -func mustGetStringSlice(cmd *cobra.Command, flagName string) []string { - val, err := cmd.Flags().GetStringSlice(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - if len(val) == 0 { - return nil - } - return val -} -func mustGetInt64(cmd *cobra.Command, flagName string) int64 { - val, err := cmd.Flags().GetInt64(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} -func mustGetUint64(cmd *cobra.Command, flagName string) uint64 { - val, err := cmd.Flags().GetUint64(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} -func mustGetBool(cmd *cobra.Command, flagName string) bool { - val, err := cmd.Flags().GetBool(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} - -func maybeGetString(cmd *cobra.Command, flagName string) string { - val, _ := cmd.Flags().GetString(flagName) - return val -} -func maybeGetInt64(cmd *cobra.Command, flagName string) int64 { - val, _ := cmd.Flags().GetInt64(flagName) - return val -} -func maybeGetUint64(cmd *cobra.Command, flagName string) uint64 { - val, _ := cmd.Flags().GetUint64(flagName) - return val -} -func maybeGetBool(cmd *cobra.Command, flagName string) bool { - val, _ := cmd.Flags().GetBool(flagName) - return val -} diff --git a/cmd/substreams/gui.go b/cmd/substreams/gui.go index 95960ad3f..324396b6e 100644 --- a/cmd/substreams/gui.go +++ b/cmd/substreams/gui.go @@ -74,12 +74,18 @@ func runGui(cmd *cobra.Command, args []string) error { } } - productionMode := mustGetBool(cmd, "production-mode") - debugModulesOutput := mustGetStringSlice(cmd, "debug-modules-output") + productionMode := sflags.MustGetBool(cmd, "production-mode") + debugModulesOutput := sflags.MustGetStringSlice(cmd, "debug-modules-output") + if len(debugModulesOutput) == 0 { + debugModulesOutput = nil + } if debugModulesOutput != nil && productionMode { return fmt.Errorf("cannot set 'debug-modules-output' in 'production-mode'") } - debugModulesInitialSnapshot := mustGetStringSlice(cmd, "debug-modules-initial-snapshot") + debugModulesInitialSnapshot := sflags.MustGetStringSlice(cmd, "debug-modules-initial-snapshot") + if len(debugModulesInitialSnapshot) == 0 { + debugModulesInitialSnapshot = nil + } outputModule := args[0] network := sflags.MustGetString(cmd, "network") @@ -108,7 +114,7 @@ func runGui(cmd *cobra.Command, args []string) error { return fmt.Errorf("read manifest %q: %w", manifestPath, err) } - endpoint, err := manifest.ExtractNetworkEndpoint(pkg.Network, mustGetString(cmd, "substreams-endpoint"), zlog) + endpoint, err := manifest.ExtractNetworkEndpoint(pkg.Network, sflags.MustGetString(cmd, "substreams-endpoint"), zlog) if err != nil { return fmt.Errorf("extracting endpoint: %w", err) } @@ -118,8 +124,8 @@ func runGui(cmd *cobra.Command, args []string) error { endpoint, authToken, authType, - mustGetBool(cmd, "insecure"), - mustGetBool(cmd, "plaintext"), + sflags.MustGetBool(cmd, "insecure"), + sflags.MustGetBool(cmd, "plaintext"), ) homeDir, err := os.UserHomeDir() @@ -134,7 +140,7 @@ func runGui(cmd *cobra.Command, args []string) error { homeDir = filepath.Join(homeDir, ".config", "substreams") } - cursor := mustGetString(cmd, "cursor") + cursor := sflags.MustGetString(cmd, "cursor") fmt.Println("Launching Substreams GUI...") @@ -171,12 +177,12 @@ func runGui(cmd *cobra.Command, args []string) error { OutputModule: outputModule, SubstreamsClientConfig: substreamsClientConfig, HomeDir: homeDir, - Vcr: mustGetBool(cmd, "replay"), - Headers: parseHeaders(mustGetStringSlice(cmd, "header")), + Vcr: sflags.MustGetBool(cmd, "replay"), + Headers: parseHeaders(sflags.MustGetStringSlice(cmd, "header")), Cursor: cursor, StartBlock: startBlock, StopBlock: stopBlock, - FinalBlocksOnly: mustGetBool(cmd, "final-blocks-only"), + FinalBlocksOnly: sflags.MustGetBool(cmd, "final-blocks-only"), Params: params, ReaderOptions: readerOptions, } diff --git a/cmd/substreams/info.go b/cmd/substreams/info.go index 30eae0d13..d1daf2421 100644 --- a/cmd/substreams/info.go +++ b/cmd/substreams/info.go @@ -49,14 +49,14 @@ func runInfo(cmd *cobra.Command, args []string) error { outputModule = args[1] } - outputSinkconfigFilesPath := mustGetString(cmd, "output-sinkconfig-files-path") + outputSinkconfigFilesPath := sflags.MustGetString(cmd, "output-sinkconfig-files-path") info, err := info.Extended(manifestPath, outputModule, sflags.MustGetBool(cmd, "skip-package-validation")) if err != nil { return err } - if mustGetBool(cmd, "json") { + if sflags.MustGetBool(cmd, "json") { res, err := json.MarshalIndent(info, "", " ") if err != nil { return err @@ -83,8 +83,13 @@ func runInfo(cmd *cobra.Command, args []string) error { for _, input := range mod.Inputs { fmt.Printf("Input: %s: %s\n", input.Type, input.Name) } + if mod.BlockFilter != nil { + fmt.Printf("Block Filter: (using *%s*): `%s`\n", mod.BlockFilter.Module, mod.BlockFilter.Query) + } switch mod.Kind { + case "index": + fmt.Println("Output Type:", *mod.OutputType) case "map": fmt.Println("Output Type:", *mod.OutputType) case "store": @@ -132,9 +137,7 @@ func runInfo(cmd *cobra.Command, args []string) error { var layerDefs []string for _, l := range layers { var mods []string - for _, m := range l { - mods = append(mods, m) - } + mods = append(mods, l...) layerDefs = append(layerDefs, fmt.Sprintf(`["%s"]`, strings.Join(mods, `","`))) } fmt.Printf("Stage %d: [%s]\n", i, strings.Join(layerDefs, `,`)) diff --git a/cmd/substreams/init.go b/cmd/substreams/init.go index d397f229b..88c300c1d 100644 --- a/cmd/substreams/init.go +++ b/cmd/substreams/init.go @@ -606,11 +606,14 @@ func prompt(label string, opts *promptOptions) (string, error) { templates.Valid = `{{ "?" | blue}} {{ . | bold }} {{ "[y/N]" | faint}} ` templates.Invalid = templates.Valid } - + def := "" + if opts != nil { + def = opts.Default + } prompt := promptui.Prompt{ Label: label, Templates: templates, - Default: opts.Default, + Default: def, } if opts != nil && opts.Validate != nil { prompt.Validate = opts.Validate diff --git a/cmd/substreams/pack.go b/cmd/substreams/pack.go index b710c02cb..a641a54c2 100644 --- a/cmd/substreams/pack.go +++ b/cmd/substreams/pack.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/cli" + "github.com/streamingfast/cli/sflags" "github.com/streamingfast/substreams/manifest" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -59,7 +60,8 @@ func runPack(cmd *cobra.Command, args []string) error { return fmt.Errorf("reading manifest %q: %w", manifestPath, err) } - originalOutputFile := maybeGetString(cmd, "output-file") + originalOutputFile, _ := sflags.GetString(cmd, "output-file") + resolvedOutputFile := resolveOutputFile(originalOutputFile, map[string]string{ "manifestDir": filepath.Dir(manifestPath), "spkgDefaultName": fmt.Sprintf("%s-%s.spkg", strings.Replace(pkg.PackageMeta[0].Name, "_", "-", -1), pkg.PackageMeta[0].Version), diff --git a/cmd/substreams/protogen.go b/cmd/substreams/protogen.go index b608400a2..9dbf11061 100644 --- a/cmd/substreams/protogen.go +++ b/cmd/substreams/protogen.go @@ -6,6 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/cli" + "github.com/streamingfast/cli/sflags" "github.com/streamingfast/substreams/codegen" "github.com/streamingfast/substreams/manifest" "go.uber.org/zap" @@ -43,10 +44,10 @@ func init() { } func runProtogen(cmd *cobra.Command, args []string) error { - outputPath := mustGetString(cmd, "output-path") - excludePaths := mustGetStringArray(cmd, "exclude-paths") - generateMod := mustGetBool(cmd, "generate-mod-rs") - showGeneratedBufGen := mustGetBool(cmd, "show-generated-buf-gen") + outputPath := sflags.MustGetString(cmd, "output-path") + excludePaths := sflags.MustGetStringArray(cmd, "exclude-paths") + generateMod := sflags.MustGetBool(cmd, "generate-mod-rs") + showGeneratedBufGen := sflags.MustGetBool(cmd, "show-generated-buf-gen") manifestPath := "" if len(args) == 1 { diff --git a/cmd/substreams/proxy.go b/cmd/substreams/proxy.go index d42f8ec04..0446cfa88 100644 --- a/cmd/substreams/proxy.go +++ b/cmd/substreams/proxy.go @@ -11,6 +11,7 @@ import ( grpcreflect "connectrpc.com/grpcreflect" "github.com/rs/cors" "github.com/spf13/cobra" + "github.com/streamingfast/cli/sflags" "github.com/streamingfast/substreams/client" "github.com/streamingfast/substreams/manifest" pbrpcsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/rpc/v2" @@ -113,22 +114,22 @@ func init() { } func runProxy(cmd *cobra.Command, args []string) error { - addr := mustGetString(cmd, "listen-addr") + addr := sflags.MustGetString(cmd, "listen-addr") fmt.Println("listening on", addr) authToken, authType := tools.GetAuth(cmd, "substreams-api-key-envvar", "substreams-api-token-envvar") substreamsClientConfig := client.NewSubstreamsClientConfig( - mustGetString(cmd, "substreams-endpoint"), + sflags.MustGetString(cmd, "substreams-endpoint"), authToken, authType, - mustGetBool(cmd, "insecure"), - mustGetBool(cmd, "plaintext"), + sflags.MustGetBool(cmd, "insecure"), + sflags.MustGetBool(cmd, "plaintext"), ) cs := &ConnectServer{ - Manifest: mustGetString(cmd, "force-manifest"), + Manifest: sflags.MustGetString(cmd, "force-manifest"), SubstreamsClientConfig: substreamsClientConfig, - StartBlock: mustGetUint64(cmd, "force-start-block"), + StartBlock: sflags.MustGetUint64(cmd, "force-start-block"), } reflector := grpcreflect.NewStaticReflector( diff --git a/cmd/substreams/run.go b/cmd/substreams/run.go index 51443bff3..48ba57661 100644 --- a/cmd/substreams/run.go +++ b/cmd/substreams/run.go @@ -29,7 +29,7 @@ func init() { runCmd.Flags().Bool("final-blocks-only", false, "Only process blocks that have pass finality, to prevent any reorg and undo signal by staying further away from the chain HEAD") runCmd.Flags().Bool("insecure", false, "Skip certificate validation on GRPC connection") runCmd.Flags().Bool("plaintext", false, "Establish GRPC connection in plaintext") - runCmd.Flags().StringP("output", "o", "", "Output mode. Defaults to 'ui' when in a TTY is present, and 'json' otherwise") + runCmd.Flags().StringP("output", "o", "", "Output mode, one of: [ui, json, jsonl, clock] Defaults to 'ui' when in a TTY is present, and 'json' otherwise") runCmd.Flags().StringSlice("debug-modules-initial-snapshot", nil, "List of 'store' modules from which to print the initial data snapshot (Unavailable in Production Mode)") runCmd.Flags().StringSlice("debug-modules-output", nil, "List of modules from which to print outputs, deltas and logs (Unavailable in Production Mode)") runCmd.Flags().StringSliceP("header", "H", nil, "Additional headers to be sent in the substreams request") @@ -71,7 +71,7 @@ func runRun(cmd *cobra.Command, args []string) error { outputModule = args[1] } - outputMode := mustGetString(cmd, "output") + outputMode := sflags.MustGetString(cmd, "output") network := sflags.MustGetString(cmd, "network") paramsString := sflags.MustGetStringArray(cmd, "params") @@ -99,7 +99,7 @@ func runRun(cmd *cobra.Command, args []string) error { return fmt.Errorf("read manifest %q: %w", manifestPath, err) } - endpoint, err := manifest.ExtractNetworkEndpoint(pkg.Network, mustGetString(cmd, "substreams-endpoint"), zlog) + endpoint, err := manifest.ExtractNetworkEndpoint(pkg.Network, sflags.MustGetString(cmd, "substreams-endpoint"), zlog) if err != nil { return fmt.Errorf("extracting endpoint: %w", err) } @@ -110,22 +110,28 @@ func runRun(cmd *cobra.Command, args []string) error { } var testRunner *test.Runner - testFile := mustGetString(cmd, "test-file") + testFile := sflags.MustGetString(cmd, "test-file") if testFile != "" { zlog.Info("running test runner", zap.String(testFile, testFile)) - testRunner, err = test.NewRunner(testFile, msgDescs, mustGetBool(cmd, "test-verbose"), zlog) + testRunner, err = test.NewRunner(testFile, msgDescs, sflags.MustGetBool(cmd, "test-verbose"), zlog) if err != nil { return fmt.Errorf("failed to setup test runner: %w", err) } } - productionMode := mustGetBool(cmd, "production-mode") - debugModulesOutput := mustGetStringSlice(cmd, "debug-modules-output") + productionMode := sflags.MustGetBool(cmd, "production-mode") + debugModulesOutput := sflags.MustGetStringSlice(cmd, "debug-modules-output") + if len(debugModulesOutput) == 0 { + debugModulesOutput = nil + } if debugModulesOutput != nil && productionMode { return fmt.Errorf("cannot set 'debug-modules-output' in 'production-mode'") } - debugModulesInitialSnapshot := mustGetStringSlice(cmd, "debug-modules-initial-snapshot") + debugModulesInitialSnapshot := sflags.MustGetStringSlice(cmd, "debug-modules-initial-snapshot") + if len(debugModulesInitialSnapshot) == 0 { + debugModulesInitialSnapshot = nil + } startBlock, readFromModule, err := readStartBlockFlag(cmd, "start-block") if err != nil { @@ -145,8 +151,8 @@ func runRun(cmd *cobra.Command, args []string) error { endpoint, authToken, authType, - mustGetBool(cmd, "insecure"), - mustGetBool(cmd, "plaintext"), + sflags.MustGetBool(cmd, "insecure"), + sflags.MustGetBool(cmd, "plaintext"), ) ssClient, connClose, callOpts, headers, err := client.NewSubstreamsClient(substreamsClientConfig) @@ -155,7 +161,7 @@ func runRun(cmd *cobra.Command, args []string) error { } defer connClose() - cursorStr := mustGetString(cmd, "cursor") + cursorStr := sflags.MustGetString(cmd, "cursor") stopBlock, err := readStopBlockFlag(cmd, startBlock, "stop-block", cursorStr != "") if err != nil { @@ -166,7 +172,7 @@ func runRun(cmd *cobra.Command, args []string) error { StartBlockNum: startBlock, StartCursor: cursorStr, StopBlockNum: stopBlock, - FinalBlocksOnly: mustGetBool(cmd, "final-blocks-only"), + FinalBlocksOnly: sflags.MustGetBool(cmd, "final-blocks-only"), Modules: pkg.Modules, OutputModule: outputModule, ProductionMode: productionMode, @@ -202,7 +208,7 @@ func runRun(cmd *cobra.Command, args []string) error { streamCtx = metadata.AppendToOutgoingContext(streamCtx, headers.ToArray()...) } //parse additional-headers flag - additionalHeaders := mustGetStringSlice(cmd, "header") + additionalHeaders := sflags.MustGetStringSlice(cmd, "header") if additionalHeaders != nil { res := parseHeaders(additionalHeaders) headerArray := make([]string, 0, len(res)*2) diff --git a/cmd/substreams/service-deploy.go b/cmd/substreams/service-deploy.go index 77dfa745f..98cdf218e 100644 --- a/cmd/substreams/service-deploy.go +++ b/cmd/substreams/service-deploy.go @@ -70,7 +70,7 @@ func deployE(cmd *cobra.Command, args []string) error { pkg.Networks = nil // we don't want to send this to the server, so it does not apply network values again, possibly losing the overriden params paramsMap := make(map[string]string) - for _, param := range mustGetStringArray(cmd, "deployment-params") { + for _, param := range sflags.MustGetStringArray(cmd, "deployment-params") { parts := strings.SplitN(param, "=", 2) if len(parts) != 2 { return fmt.Errorf("invalid parameter format: %q", param) diff --git a/cmd/substreams/setup.go b/cmd/substreams/setup.go index 70b1a6975..69e50c1e8 100644 --- a/cmd/substreams/setup.go +++ b/cmd/substreams/setup.go @@ -5,6 +5,7 @@ import ( _ "net/http/pprof" "github.com/spf13/cobra" + "github.com/streamingfast/cli/sflags" "github.com/streamingfast/logging" "github.com/streamingfast/substreams/manifest" "go.uber.org/zap" @@ -13,7 +14,7 @@ import ( func setup(cmd *cobra.Command, loglevel zapcore.Level) { setupProfiler() - manifest.IPFSURL = mustGetString(cmd, "ipfs-url") + manifest.IPFSURL = sflags.MustGetString(cmd, "ipfs-url") logging.InstantiateLoggers(logging.WithLogLevelSwitcherServerAutoStart(), logging.WithDefaultLevel(loglevel)) } diff --git a/codegen/generator.go b/codegen/generator.go index 9c5629136..ed317efa3 100644 --- a/codegen/generator.go +++ b/codegen/generator.go @@ -49,18 +49,6 @@ var tplMod string //go:embed templates/generator/pb_mod.gotmpl var tplPbMod string -//go:embed templates/generator/buildsh.gotmpl -var tplBuildSh string - -//go:embed templates/generator/cargotoml.gotmpl -var tplCargoToml string - -//go:embed templates/generator/manifestyaml.gotmpl -var tplManifestYaml string - -//go:embed templates/generator/rusttoolchain.gotmpl -var tplRustToolchain string - var StoreType = map[string]string{ "bytes": "Raw", "string": "String", @@ -368,7 +356,7 @@ func (e *Engine) ReadableStoreType(store *manifest.Module, input *manifest.Input } if p == manifest.UpdatePolicyAppend { - return fmt.Sprintf("substreams::store::StoreGetRaw") + return "substreams::store::StoreGetRaw" } t = maybeTranslateType(t) diff --git a/codegen/logging.go b/codegen/logging.go deleted file mode 100644 index 4b7d4a41e..000000000 --- a/codegen/logging.go +++ /dev/null @@ -1,7 +0,0 @@ -package codegen - -import ( - "github.com/streamingfast/logging" -) - -var zlog, tracer = logging.PackageLogger("substreams", "github.com/streamingfast/substreams/codegen") diff --git a/docs/release-notes/change-log.md b/docs/release-notes/change-log.md index 6b8a14ef9..7e212e2c5 100644 --- a/docs/release-notes/change-log.md +++ b/docs/release-notes/change-log.md @@ -9,6 +9,44 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## v1.6.0 + +### Upgrading + +> **Note** Upgrading to v1.6.0 will require changing the tier1 and tier2 versions concurrently, as the internal protocol has changed. + +### Highlights + +#### Index Modules and Block Filter + +* *Index Modules* and *Block Filter* can now be used to speed up processing and reduce the amount of parsed data. +* When indexes are used along with the `BlockFilter` attribute on a mapper, blocks can be skipped completely: they will not be run in downstreams modules or sent in the output stream, except in live segment or in dev-mode, where an empty 'clock' is still sent. +* See https://github.com/streamingfast/substreams-foundational-modules for an example implementation +* Blocks that are skipped will still appear in the metering as "read bytes" (unless a full segment is skipped), but the index stores themselves are not "metered" + +#### Scheduling / speed improvements + +* The scheduler no longer duplicates work in the first segments of a request with multiple stages. +* Fix all issues with running a substreams where modules have different "initial blocks" +* Maximum Tier1 output speed improved for data that is already processed +* Tier1 'FileWalker' now polls more aggressively on local filesystem to prevent extra seconds of wait time. + +### Fixed + +* Fix a bug in the `gui` that would crash when trying to `r`estart the stream. +* fix total read bytes in case data already cache + +### Added + +* New environment variable `SUBSTREAMS_WORKERS_RAMPUP_TIME` can specify the initial delay before tier1 will reach the number of tier2 concurrent requests. +* Add 'clock' output to `substreams run` command, useful mostly for performance testing or pre-caching +* (alpha) Introduce the `wasip1/tinygo-v1` binary type. + +### Changed / Removed + +* Disabled `otelcol://` tracing protocol, its mere presence affected performance. +* Previous value for `SUBSTREAMS_WORKERS_RAMPUP_TIME` was `4s`, now set to `0`, disabling the mechanism by default. + ## v1.5.6 ### Fixes diff --git a/go.mod b/go.mod index 8ea9f79de..851d5f4d1 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/golang/protobuf v1.5.4 github.com/jhump/protoreflect v1.14.0 github.com/spf13/cobra v1.7.0 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.5 // indirect github.com/streamingfast/bstream v0.0.2-0.20240228193450-5200ecab8050 github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20240219205130-bfe428489338 @@ -29,6 +29,7 @@ require ( buf.build/gen/go/bufbuild/reflect/protocolbuffers/go v1.33.0-20240117202343-bf8f65e8876c.1 connectrpc.com/connect v1.16.1 connectrpc.com/grpcreflect v1.2.0 + github.com/RoaringBitmap/roaring v1.9.1 github.com/alecthomas/chroma v0.10.0 github.com/alecthomas/participle v0.7.1 github.com/bytecodealliance/wasmtime-go/v4 v4.0.0 @@ -59,7 +60,7 @@ require ( github.com/streamingfast/dmetering v0.0.0-20240403142935-dc8bb3bb32c3 github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 github.com/streamingfast/eth-go v0.0.0-20230410173454-433bd8803da1 - github.com/streamingfast/sf-tracing v0.0.0-20240209202324-9daa52c71a52 + github.com/streamingfast/sf-tracing v0.0.0-20240430173521-888827872b90 github.com/streamingfast/shutter v1.5.0 github.com/streamingfast/substreams-sdk-go v0.0.0-20240110154316-5fb21a7a330b github.com/streamingfast/substreams-sink-sql v1.0.1-0.20231127153906-acf5f3e34330 @@ -90,6 +91,7 @@ require ( github.com/mschoch/smat v0.2.0 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.uber.org/goleak v1.3.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect @@ -111,7 +113,6 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.15.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.39.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/propagator v0.0.0-20221018185641-36f91511cfd7 // indirect - github.com/RoaringBitmap/roaring v1.9.1 github.com/atotto/clipboard v0.1.4 // indirect github.com/aws/aws-sdk-go v1.44.325 // indirect github.com/aymanbagabas/go-osc52 v1.2.1 // indirect @@ -121,7 +122,6 @@ require ( github.com/blendle/zapdriver v1.3.2-0.20200203083823-9200777f8a3d // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect github.com/bufbuild/protocompile v0.4.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chzyer/readline v1.5.0 // indirect @@ -146,7 +146,6 @@ require ( github.com/gorilla/schema v1.0.2 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -209,13 +208,10 @@ require ( github.com/yuin/goldmark-emoji v1.0.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.9.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.23.1 // indirect go.opentelemetry.io/otel/exporters/zipkin v1.23.1 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/sdk v1.23.1 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 diff --git a/go.sum b/go.sum index b41319570..1b46dbe2b 100644 --- a/go.sum +++ b/go.sum @@ -154,8 +154,6 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOF github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= @@ -361,8 +359,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -604,8 +600,8 @@ github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e h1:8hoT2QUwh+ github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= github.com/streamingfast/protoreflect v0.0.0-20231205191344-4b629d20ce8d h1:33VIARqUqBUKXJcuQoOS1rVSms54tgxhhNCmrLptpLg= github.com/streamingfast/protoreflect v0.0.0-20231205191344-4b629d20ce8d/go.mod h1:aBJivEdekmFWYSQ29EE/fN9IanJWJXbtjy3ky0XD/jE= -github.com/streamingfast/sf-tracing v0.0.0-20240209202324-9daa52c71a52 h1:D9M3b2mTrvnvjGpFFd/JqZ/GSPoUrWU2zrtRpDOyqao= -github.com/streamingfast/sf-tracing v0.0.0-20240209202324-9daa52c71a52/go.mod h1:VRhdIrTjQSsc9cryNR18HTS32rgrHxQYwmoUOSEhpFA= +github.com/streamingfast/sf-tracing v0.0.0-20240430173521-888827872b90 h1:94HllkX4ttYVilo8ZJv05b5z8JiMmqBvv4+Jdgk/+2A= +github.com/streamingfast/sf-tracing v0.0.0-20240430173521-888827872b90/go.mod h1:e6tKS/udlfXFUTQBYfDDdISfjULvQXet1kBrOeRfgI4= github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAtyaTOgs= github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/substreams-sdk-go v0.0.0-20240110154316-5fb21a7a330b h1:O00ZKnNHVHrIEzS/dr+w07H3c0qP2JZwE6XS9scJZSY= @@ -684,10 +680,6 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 h1:o8iWeVFa1BcLtVEV0LzrCxV2/55tB3xLxADr6Kyoey4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1/go.mod h1:SEVfdK4IoBnbT2FXNM/k8yC08MrfbhWk3U4ljM8B3HE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 h1:p3A5+f5l9e/kuEBwLOrnpkIDHQFlHmbiVxMURWRK6gQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1/go.mod h1:OClrnXUjBqQbInvjJFjYSnMxBSCXBF8r3b34WqjiIrQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.23.1 h1:IqmsDcJnxQSs6W+1TMSqpYO7VY4ZuEKJGYlSBPUlT1s= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.23.1/go.mod h1:VMZ84RYOd4Lrp0+09mckDvqBj2PXWDwOFaxb1P5uO8g= go.opentelemetry.io/otel/exporters/zipkin v1.23.1 h1:goka4KdsPPpHHQnzp1/XE1wVpk2oQO9RXCOH4MZWSyg= @@ -701,8 +693,6 @@ go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhs go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/info/info.go b/info/info.go index cdb8d6f21..03a0b31ec 100644 --- a/info/info.go +++ b/info/info.go @@ -6,7 +6,7 @@ import ( "github.com/streamingfast/substreams/manifest" pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" - "github.com/streamingfast/substreams/pipeline/outputmodules" + "github.com/streamingfast/substreams/pipeline/exec" "google.golang.org/protobuf/types/descriptorpb" ) @@ -61,15 +61,16 @@ type ProtoFileInfo struct { } type ModulesInfo struct { - Name string `json:"name"` - Kind string `json:"kind"` - Inputs []ModuleInput `json:"inputs"` - OutputType *string `json:"output_type,omitempty"` //for map inputs - ValueType *string `json:"value_type,omitempty"` //for store inputs - UpdatePolicy *string `json:"update_policy,omitempty"` //for store inputs - InitialBlock uint64 `json:"initial_block"` - Documentation *string `json:"documentation,omitempty"` - Hash string `json:"hash"` + Name string `json:"name"` + Kind string `json:"kind"` + Inputs []ModuleInput `json:"inputs"` + OutputType *string `json:"output_type,omitempty"` //for map inputs + ValueType *string `json:"value_type,omitempty"` //for store inputs + UpdatePolicy *string `json:"update_policy,omitempty"` //for store inputs + BlockFilter *pbsubstreams.Module_BlockFilter `json:"block_filter,omitempty"` + InitialBlock uint64 `json:"initial_block"` + Documentation *string `json:"documentation,omitempty"` + Hash string `json:"hash"` } type ModuleInput struct { @@ -134,6 +135,10 @@ func Basic(pkg *pbsubstreams.Package, graph *manifest.ModuleGraph) (*BasicInfo, kind := mod.GetKind() switch v := kind.(type) { + + case *pbsubstreams.Module_KindBlockIndex_: + modInfo.Kind = "index" + modInfo.OutputType = strPtr(v.KindBlockIndex.OutputType) case *pbsubstreams.Module_KindMap_: modInfo.Kind = "map" modInfo.OutputType = strPtr(v.KindMap.OutputType) @@ -180,6 +185,7 @@ func Basic(pkg *pbsubstreams.Package, graph *manifest.ModuleGraph) (*BasicInfo, inputs = append(inputs, inputInfo) } modInfo.Inputs = inputs + modInfo.BlockFilter = mod.BlockFilter modules = append(modules, modInfo) } @@ -239,12 +245,12 @@ func ExtendedWithPackage(pkg *pbsubstreams.Package, graph *manifest.ModuleGraph, var stages [][][]string if outputModule != "" { - outputGraph, err := outputmodules.NewOutputModuleGraph(outputModule, true, pkg.Modules) + execGraph, err := exec.NewOutputModuleGraph(outputModule, true, pkg.Modules) if err != nil { return nil, fmt.Errorf("creating output module graph: %w", err) } - stages = make([][][]string, 0, len(outputGraph.StagedUsedModules())) - for _, layers := range outputGraph.StagedUsedModules() { + stages = make([][][]string, 0, len(execGraph.StagedUsedModules())) + for _, layers := range execGraph.StagedUsedModules() { var layerDefs [][]string for _, l := range layers { var mods []string diff --git a/manifest/graph.go b/manifest/graph.go index 304328b62..b1f216aff 100644 --- a/manifest/graph.go +++ b/manifest/graph.go @@ -61,6 +61,13 @@ func NewModuleGraph(modules []*pbsubstreams.Module) (*ModuleGraph, error) { g.inputOrderIndex[module.Name][moduleName] = j } + if module.BlockFilter != nil { + moduleName := module.BlockFilter.Module + if j, found := g.moduleIndex[moduleName]; found { + g.AddCost(i, j, 1) + g.inputOrderIndex[module.Name][moduleName] = j + } + } } if !graph.Acyclic(g) { diff --git a/manifest/manifest.go b/manifest/manifest.go index 6b61edaeb..35955fb7b 100644 --- a/manifest/manifest.go +++ b/manifest/manifest.go @@ -101,8 +101,14 @@ type Module struct { } type BlockFilter struct { - Module string `yaml:"module,omitempty"` - Query string `yaml:"query,omitempty"` + Module string `yaml:"module,omitempty"` + Query BlockFilterQuery `yaml:"query,omitempty"` +} + +type BlockFilterQuery struct { + String string `yaml:"string,omitempty"` + Params bool `yaml:"params,omitempty"` + // Store string `yaml:"store,omitempty"` } type Input struct { @@ -304,10 +310,19 @@ func (m *Module) ToProtoWASM(codeIndex uint32) (*pbsubstreams.Module, error) { func (m *Module) setBlockFilterToProto(pbModule *pbsubstreams.Module) { if m.BlockFilter != nil { - pbModule.BlockFilter = &pbsubstreams.Module_BlockFilter{ + bf := &pbsubstreams.Module_BlockFilter{ Module: m.BlockFilter.Module, - Query: m.BlockFilter.Query, } + switch { + case m.BlockFilter.Query.String != "": + bf.Query = &pbsubstreams.Module_BlockFilter_QueryString{ + QueryString: m.BlockFilter.Query.String, + } + case m.BlockFilter.Query.Params: + bf.Query = &pbsubstreams.Module_BlockFilter_QueryFromParams{} + } + + pbModule.BlockFilter = bf } } @@ -404,6 +419,12 @@ const ( func (m *Module) setKindToProto(pbModule *pbsubstreams.Module) { switch m.Kind { + case ModuleKindBlockIndex: + pbModule.Kind = &pbsubstreams.Module_KindBlockIndex_{ + KindBlockIndex: &pbsubstreams.Module_KindBlockIndex{ + OutputType: m.Output.Type, + }, + } case ModuleKindMap: pbModule.Kind = &pbsubstreams.Module_KindMap_{ KindMap: &pbsubstreams.Module_KindMap{ diff --git a/manifest/manifest_test.go b/manifest/manifest_test.go index 64b2b2152..da65fee46 100644 --- a/manifest/manifest_test.go +++ b/manifest/manifest_test.go @@ -92,13 +92,14 @@ output: }, }, { - name: "basic with block filter", + name: "basic with block filter string", rawYamlInput: `--- name: bf_module kind: map blockFilter: module: basic_index - query: this is my query + query: + string: this is my query output: type: proto:sf.substreams.database.changes.v1 `, @@ -109,7 +110,33 @@ output: Output: StreamOutput{Type: "proto:sf.substreams.database.changes.v1"}, BlockFilter: &BlockFilter{ Module: "basic_index", - Query: "this is my query", + Query: BlockFilterQuery{String: "this is my query"}, + }, + }, + }, + { + name: "basic with block filter from params", + rawYamlInput: `--- +name: bf_module +kind: map +blockFilter: + module: basic_index + query: + params: true +inputs: + - params: string +output: + type: proto:sf.substreams.database.changes.v1 +`, + + expectedOutput: Module{ + Kind: ModuleKindMap, + Name: "bf_module", + Inputs: []*Input{{Params: "string"}}, + Output: StreamOutput{Type: "proto:sf.substreams.database.changes.v1"}, + BlockFilter: &BlockFilter{ + Module: "basic_index", + Query: BlockFilterQuery{Params: true}, }, }, }, diff --git a/manifest/mermaid.go b/manifest/mermaid.go index 337ee6629..3c301972c 100644 --- a/manifest/mermaid.go +++ b/manifest/mermaid.go @@ -63,7 +63,7 @@ func generateMermaidGraph(mods *pbsubstreams.Modules) string { str.WriteString(fmt.Sprintf(" %s --> %s;\n", name, s.Name)) case *pbsubstreams.Module_Input_Store_: name := input.Store.ModuleName - mode := strings.ToLower(fmt.Sprintf("%s", input.Store.Mode)) + mode := strings.ToLower(input.Store.Mode.String()) if mode == "deltas" { str.WriteString(fmt.Sprintf(" %s -- deltas --> %s;\n", name, s.Name)) } else { diff --git a/manifest/msgdesc.go b/manifest/msgdesc.go index f2eac02f7..0fe4fa99e 100644 --- a/manifest/msgdesc.go +++ b/manifest/msgdesc.go @@ -35,6 +35,10 @@ func BuildMessageDescriptors(pkg *pbsubstreams.Package) (out map[string]*ModuleD case *pbsubstreams.Module_KindMap_: msgType = modKind.KindMap.OutputType desc.MapOutputType = msgType + + case *pbsubstreams.Module_KindBlockIndex_: + msgType = modKind.KindBlockIndex.OutputType + desc.MapOutputType = msgType } if strings.HasPrefix(msgType, "proto:") { msgType = strings.TrimPrefix(msgType, "proto:") diff --git a/manifest/package.go b/manifest/package.go index 011959737..b8c76c33d 100644 --- a/manifest/package.go +++ b/manifest/package.go @@ -1,6 +1,7 @@ package manifest import ( + "context" "fmt" "os" "path" @@ -9,6 +10,7 @@ import ( "github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/dynamic" pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" + "github.com/streamingfast/substreams/sqe" ) type manifestConverter struct { @@ -64,8 +66,13 @@ func (r *manifestConverter) validateManifest(manif *Manifest) error { // TODO: put a limit on the SIZE of the WASM payload (max 10MB per binary?) for _, s := range manif.Modules { + if s.BlockFilter != nil { + ctx := context.Background() + if err := validateQuery(ctx, s.BlockFilter.Query, manif.Params[s.Name]); err != nil { + return fmt.Errorf("stream %q: %w", s.Name, err) + } + } // TODO: let's make sure this is also checked when received in Protobuf in a remote request. - switch s.Kind { case ModuleKindMap: if s.Output.Type == "" { @@ -92,10 +99,6 @@ func (r *manifestConverter) validateManifest(manif *Manifest) error { } } - if s.InitialBlock != nil { - return fmt.Errorf("stream %q: block index module cannot have initial block", s.Name) - } - if s.BlockFilter != nil { return fmt.Errorf("stream %q: block index module cannot have block filter", s.Name) } @@ -127,6 +130,25 @@ func (r *manifestConverter) validateManifest(manif *Manifest) error { return nil } +func validateQuery(ctx context.Context, query BlockFilterQuery, param string) error { + var q string + switch { + case query.String != "" && query.Params: + return fmt.Errorf("only one of 'string' or 'params' can be set") + case query.String != "": + q = query.String + case query.Params: + q = param + default: + return fmt.Errorf("missing query") + } + + _, err := sqe.Parse(ctx, q) + if err != nil { + return fmt.Errorf("invalid query: %w", err) + } + return nil +} func handleUseModules(pkg *pbsubstreams.Package, manif *Manifest) error { packageModulesMapping := make(map[string]*pbsubstreams.Module) for _, module := range pkg.Modules.Modules { @@ -186,20 +208,7 @@ func checkEqualInputs(moduleWithUse, usedModule *pbsubstreams.Module, manifestMo return fmt.Errorf("module %q: input %q has different mode than the used module %q: input %q", manifestModuleWithUse.Name, input.String(), manifestModuleWithUse.Use, usedModuleInput.String()) } - curMod, found := packageModulesMapping[input.GetStore().ModuleName] - if !found { - return fmt.Errorf("module %q: input %q store module %q not found", manifestModuleWithUse.Name, input.String(), input.GetStore().ModuleName) - } - - usedMod, found := packageModulesMapping[usedModuleInput.GetStore().ModuleName] - if !found { - return fmt.Errorf("module %q: input %q store module %q not found", manifestModuleWithUse.Name, usedModuleInput.String(), usedModuleInput.GetStore().ModuleName) - } - - if curMod.Output.Type != usedMod.Output.Type { - return fmt.Errorf("module %q: input %q has different output than the used module %q: input %q", manifestModuleWithUse.Name, input.String(), manifestModuleWithUse.Use, usedModuleInput.String()) - } - + // we don't check output, we'll overwrite it with the used module case input.GetMap() != nil: if usedModuleInput.GetMap() == nil { return fmt.Errorf("module %q: input %q is not a map type", manifestModuleWithUse.Name, input.String()) diff --git a/manifest/package_test.go b/manifest/package_test.go index 3a1b2969f..7b2fbbd93 100644 --- a/manifest/package_test.go +++ b/manifest/package_test.go @@ -2,9 +2,10 @@ package manifest import ( "fmt" - pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" "testing" + pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" + "github.com/stretchr/testify/require" ) @@ -173,7 +174,6 @@ func TestHandleUseModules(t *testing.T) { } func TestValidateManifest(t *testing.T) { - var initialBlock uint64 = 123 cases := []struct { name string manifest *Manifest @@ -219,16 +219,6 @@ func TestValidateManifest(t *testing.T) { }, expectedError: "stream \"basic_index\": block index module should have inputs", }, - { - name: "block index with initialBlock", - manifest: &Manifest{ - SpecVersion: "v0.1.0", - Modules: []*Module{ - {Name: "basic_index", Kind: "blockIndex", Inputs: []*Input{{Map: "proto:sf.database.v1.changes"}}, InitialBlock: &initialBlock, Output: StreamOutput{"proto:sf.substreams.index.v1.Keys"}}, - }, - }, - expectedError: "stream \"basic_index\": block index module cannot have initial block", - }, { name: "block index with block filter", manifest: &Manifest{ @@ -236,7 +226,7 @@ func TestValidateManifest(t *testing.T) { Modules: []*Module{ {Name: "basic_index", Kind: "blockIndex", BlockFilter: &BlockFilter{ Module: "my_module", - Query: "test query", + Query: BlockFilterQuery{String: "test query"}, }, Inputs: []*Input{{Map: "proto:sf.database.v1.changes"}}, Output: StreamOutput{"proto:sf.substreams.index.v1.Keys"}}, }, }, @@ -286,7 +276,7 @@ func TestValidateModules(t *testing.T) { }, BlockFilter: &pbsubstreams.Module_BlockFilter{ Module: "block_index", - Query: "This is my query", + Query: &pbsubstreams.Module_BlockFilter_QueryString{QueryString: "This is my query"}, }, }, @@ -323,7 +313,7 @@ func TestValidateModules(t *testing.T) { }, BlockFilter: &pbsubstreams.Module_BlockFilter{ Module: "wrong_module", - Query: "This is my query", + Query: &pbsubstreams.Module_BlockFilter_QueryString{QueryString: "This is my query"}, }, }, @@ -360,7 +350,7 @@ func TestValidateModules(t *testing.T) { }, BlockFilter: &pbsubstreams.Module_BlockFilter{ Module: "map_module", - Query: "This is my query", + Query: &pbsubstreams.Module_BlockFilter_QueryString{QueryString: "This is my query"}, }, }, { diff --git a/manifest/reader.go b/manifest/reader.go index ab35443c1..ae86480dd 100644 --- a/manifest/reader.go +++ b/manifest/reader.go @@ -55,7 +55,6 @@ type Reader struct { // cached values protoDefinitions []*desc.FileDescriptor - sinkConfigJSON string sinkConfigDynamicMessage *dynamic.Message collectProtoDefinitionsFunc func(protoDefinitions []*desc.FileDescriptor) @@ -401,7 +400,7 @@ func validatePackage(pkg *pbsubstreams.Package, skipModuleOutputTypeValidation b outputType := i.KindMap.OutputType if !skipModuleOutputTypeValidation { if !strings.HasPrefix(outputType, "proto:") { - return fmt.Errorf("module %q incorrect outputTyupe %q valueType must be a proto Message", mod.Name, outputType) + return fmt.Errorf("module %q incorrect outputType %q valueType must be a proto Message", mod.Name, outputType) } } case *pbsubstreams.Module_KindStore_: @@ -533,17 +532,23 @@ func duplicateStringInput(in *pbsubstreams.Module_Input) string { } } -func checkValidBlockFilter(mod *pbsubstreams.Module, mapModuleKind map[string]pbsubstreams.ModuleKind) error { +func checkValidBlockFilter(mod *pbsubstreams.Module, mapModules map[string]*pbsubstreams.Module) error { blockFilter := mod.GetBlockFilter() if blockFilter != nil { seekModName := blockFilter.GetModule() - seekModuleKind, found := mapModuleKind[seekModName] + seekModule, found := mapModules[seekModName] if !found { return fmt.Errorf("block filter module %q not found", blockFilter.Module) } - if seekModuleKind != pbsubstreams.ModuleKindBlockIndex { + + if seekModule.ModuleKind() != pbsubstreams.ModuleKindBlockIndex { return fmt.Errorf("block filter module %q not of 'block_index' kind", blockFilter.Module) } + + if seekModule.InitialBlock > mod.InitialBlock { + return fmt.Errorf("block filter module %q cannot have an init block greater than module %q init block", blockFilter.Module, mod.Name) + } + } return nil } @@ -610,10 +615,12 @@ func ValidateModules(mods *pbsubstreams.Modules) error { } mapModuleKind := make(map[string]pbsubstreams.ModuleKind) + mapModules := make(map[string]*pbsubstreams.Module) for _, mod := range mods.Modules { if _, found := mapModuleKind[mod.Name]; found { return fmt.Errorf("module %q: duplicate module name", mod.Name) } + mapModules[mod.Name] = mod mapModuleKind[mod.Name] = mod.ModuleKind() } @@ -628,7 +635,7 @@ func ValidateModules(mods *pbsubstreams.Modules) error { return fmt.Errorf("limit of 30 inputs for a given module (%q) reached", mod.Name) } - err := checkValidBlockFilter(mod, mapModuleKind) + err := checkValidBlockFilter(mod, mapModules) if err != nil { return fmt.Errorf("checking block filter for module %q: %w", mod.Name, err) } @@ -775,6 +782,10 @@ func prefixModules(mods []*pbsubstreams.Module, prefix string) { panic(fmt.Sprintf("module %q: input index %d: unsupported module input type %s", mod.Name, idx, inputIface.Input)) } } + + if mod.BlockFilter != nil { + mod.BlockFilter.Module = withPrefix(mod.BlockFilter.Module, prefix) + } } } diff --git a/manifest/signature.go b/manifest/signature.go index 4d57ebef7..22ce765e6 100644 --- a/manifest/signature.go +++ b/manifest/signature.go @@ -11,6 +11,8 @@ import ( pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" ) +var UseSimpleHash = false + type ModuleHash []byte type ModuleHashes struct { @@ -45,6 +47,14 @@ func (m *ModuleHashes) Iter(cb func(hash, name string) error) error { } func (m *ModuleHashes) HashModule(modules *pbsubstreams.Modules, module *pbsubstreams.Module, graph *ModuleGraph) (ModuleHash, error) { + //Simplified hash for testing purposes + if UseSimpleHash { + return m.hashModuleSimple(modules, module, graph) + } + return m.hashModule(modules, module, graph) +} + +func (m *ModuleHashes) hashModule(modules *pbsubstreams.Modules, module *pbsubstreams.Module, graph *ModuleGraph) (ModuleHash, error) { m.mu.RLock() if cachedHash := m.cache[module.Name]; cachedHash != nil { m.mu.RUnlock() @@ -66,6 +76,8 @@ func (m *ModuleHashes) HashModule(modules *pbsubstreams.Modules, module *pbsubst buf.WriteString("map") case *pbsubstreams.Module_KindStore_: buf.WriteString("store") + case *pbsubstreams.Module_KindBlockIndex_: + buf.WriteString("block_index") default: return nil, fmt.Errorf("invalid module file %T", module.Kind) } @@ -88,6 +100,24 @@ func (m *ModuleHashes) HashModule(modules *pbsubstreams.Modules, module *pbsubst } buf.WriteString(value) } + if module.BlockFilter != nil { + buf.WriteString("block_filter_module!") + blockfilterModule, err := graph.Module(module.BlockFilter.Module) + if err != nil { + return nil, fmt.Errorf("cannot hash module %q: cannot find block filter module %q", module.Name, module.BlockFilter.Module) + } + blockFilterModuleHash, err := m.hashModule(modules, blockfilterModule, graph) + if err != nil { + return nil, fmt.Errorf("cannot hash module %q: cannot get hash of its blockfiltermodule %q", module.Name, module.BlockFilter.Module) + } + buf.WriteString(string(blockFilterModuleHash)) + buf.WriteString("block_filter_query!") + qs, err := module.BlockFilterQueryString() + if err != nil { + return nil, err + } + buf.WriteString(qs) + } buf.WriteString("ancestors") ancestors, _ := graph.AncestorsOf(module.Name) @@ -112,6 +142,12 @@ func (m *ModuleHashes) HashModule(modules *pbsubstreams.Modules, module *pbsubst return output, nil } +func (m *ModuleHashes) hashModuleSimple(modules *pbsubstreams.Modules, module *pbsubstreams.Module, graph *ModuleGraph) (ModuleHash, error) { + hash := []byte(module.Name) + m.cache[module.Name] = hash + return hash, nil +} + func inputName(input *pbsubstreams.Module_Input) (string, error) { switch input.Input.(type) { case *pbsubstreams.Module_Input_Store_: diff --git a/manifest/sink.go b/manifest/sink.go index 110a89332..080abcd29 100644 --- a/manifest/sink.go +++ b/manifest/sink.go @@ -305,13 +305,7 @@ func convertYAMLtoJSONCompat(i any, resolvePath func(in string) string, scope st if opts.LoadFromFile { - if strings.HasPrefix(x, "@@") { // support previous behavior - x = x[1:] - } - - if strings.HasPrefix(x, "@") { // support previous behavior - x = x[1:] - } + x = strings.TrimPrefix(x, "@") // support previous behavior cnt, err := os.ReadFile(resolvePath(x)) if err != nil { diff --git a/manifest/testing.go b/manifest/testing.go index 3dc0f1048..98dfdaf2a 100644 --- a/manifest/testing.go +++ b/manifest/testing.go @@ -11,7 +11,6 @@ var five = uint64(5) var ten = uint64(10) var twenty = uint64(20) var thirty = uint64(30) -var fourty = uint64(40) // NewTestModules can be used in foreign packages for their test suite func NewSimpleTestModules() []*pbsubstreams.Module { @@ -234,5 +233,6 @@ func TestReadManifest(t testing.T, manifestPath string) *pbsubstreams.Package { manifestReader := MustNewReader(manifestPath) pkg, _, err := manifestReader.Read() require.NoError(t, err) + return pkg } diff --git a/orchestrator/execout/execout_walker.go b/orchestrator/execout/execout_walker.go index 543e70d73..fb3f2b7cf 100644 --- a/orchestrator/execout/execout_walker.go +++ b/orchestrator/execout/execout_walker.go @@ -64,13 +64,14 @@ func (r *Walker) IsWorking() bool { func (r *Walker) CmdDownloadCurrentSegment(waitBefore time.Duration) loop.Cmd { file := r.fileWalker.File() + r.fileWalker.PreloadNext(r.ctx) return func() loop.Msg { time.Sleep(waitBefore) err := file.Load(r.ctx) if errors.Is(err, dstore.ErrNotFound) { - return MsgFileNotPresent{NextWait: computeNewWait(waitBefore)} + return MsgFileNotPresent{NextWait: computeNewWait(waitBefore, r.fileWalker.IsLocal)} } if err != nil { return loop.NewQuitMsg(fmt.Errorf("loading %s cache %q: %w", file.ModuleName, file.Filename(), err)) @@ -83,13 +84,16 @@ func (r *Walker) CmdDownloadCurrentSegment(waitBefore time.Duration) loop.Cmd { } } -func computeNewWait(previousWait time.Duration) time.Duration { +func computeNewWait(previousWait time.Duration, storeIsLocal bool) time.Duration { + if storeIsLocal { + return 50 * time.Millisecond + } if previousWait == 0 { return 500 * time.Millisecond } - newWait := previousWait * 2 - if newWait > 4*time.Second { - return 4 * time.Second + newWait := previousWait + 250*time.Millisecond + if newWait > 2*time.Second { + return 2 * time.Second } return newWait } @@ -102,6 +106,9 @@ func (r *Walker) sendItems(sortedItems []*pboutput.Item) error { if item.BlockNum < r.StartBlock { continue } + if item.BlockNum >= r.ExclusiveEndBlock { + return nil + } blockScopedData, err := toBlockScopedData(r.module, item) if err != nil { diff --git a/orchestrator/parallelprocessor.go b/orchestrator/parallelprocessor.go index df7a97580..d28362a85 100644 --- a/orchestrator/parallelprocessor.go +++ b/orchestrator/parallelprocessor.go @@ -12,7 +12,7 @@ import ( "github.com/streamingfast/substreams/orchestrator/scheduler" "github.com/streamingfast/substreams/orchestrator/stage" "github.com/streamingfast/substreams/orchestrator/work" - "github.com/streamingfast/substreams/pipeline/outputmodules" + "github.com/streamingfast/substreams/pipeline/exec" "github.com/streamingfast/substreams/storage/execout" "github.com/streamingfast/substreams/storage/store" ) @@ -28,7 +28,7 @@ func BuildParallelProcessor( reqPlan *plan.RequestPlan, workerFactory work.WorkerFactory, maxParallelJobs int, - outputGraph *outputmodules.Graph, + execGraph *exec.Graph, execoutStorage *execout.Configs, respFunc func(resp substreams.ResponseFromAnyTier) error, storeConfigs store.ConfigMap, @@ -37,7 +37,7 @@ func BuildParallelProcessor( stream := response.New(respFunc) sched := scheduler.New(ctx, stream) - stages := stage.NewStages(ctx, outputGraph, reqPlan, storeConfigs) + stages := stage.NewStages(ctx, execGraph, reqPlan, storeConfigs) sched.Stages = stages // OPTIMIZATION: We should fetch the ExecOut files too, and see if they @@ -64,22 +64,25 @@ func BuildParallelProcessor( // for whatever reason, if reqPlan.ReadExecOut != nil { - execOutSegmenter := reqPlan.WriteOutSegmenter() // note: since we are *NOT* in a sub-request and are setting up output module is a map - requestedModule := outputGraph.OutputModule() + requestedModule := execGraph.OutputModule() if requestedModule.GetKindStore() != nil { panic("logic error: should not get a store as outputModule on tier 1") } - walker := execoutStorage.NewFileWalker(requestedModule.Name, execOutSegmenter) - - sched.ExecOutWalker = orchestratorExecout.NewWalker( - ctx, - requestedModule, - walker, - reqPlan.ReadExecOut, - stream, - ) + // no ReadExecOut if output type is an index + if requestedModule.GetKindMap() != nil { + execOutSegmenter := reqPlan.ReadOutSegmenter(requestedModule.InitialBlock) + walker := execoutStorage.NewFileWalker(requestedModule.Name, execOutSegmenter) + + sched.ExecOutWalker = orchestratorExecout.NewWalker( + ctx, + requestedModule, + walker, + reqPlan.ReadExecOut, + stream, + ) + } } // we may be here only for mapper, without stores diff --git a/orchestrator/plan/requestplan.go b/orchestrator/plan/requestplan.go index 784e60c11..4b634df9f 100644 --- a/orchestrator/plan/requestplan.go +++ b/orchestrator/plan/requestplan.go @@ -50,42 +50,34 @@ func (p *RequestPlan) RequiresParallelProcessing() bool { return p.WriteExecOut != nil || p.BuildStores != nil } -func BuildTier1RequestPlan(productionMode bool, segmentInterval uint64, graphInitBlock, resolvedStartBlock, linearHandoffBlock, exclusiveEndBlock uint64, scheduleStores bool) (*RequestPlan, error) { - if exclusiveEndBlock != 0 && linearHandoffBlock > exclusiveEndBlock { - return nil, fmt.Errorf("end block %d cannot be prior to the linear handoff block %d", exclusiveEndBlock, linearHandoffBlock) - } - if resolvedStartBlock < graphInitBlock { - return nil, fmt.Errorf("start block cannot be prior to the lowest init block in the requested module graph (%d)", graphInitBlock) +func BuildTier1RequestPlan(productionMode bool, segmentInterval, lowestInitialBlock, resolvedStartBlock, linearHandoffBlock, exclusiveEndBlock uint64, scheduleStores bool) (*RequestPlan, error) { + if resolvedStartBlock < lowestInitialBlock { + return nil, fmt.Errorf("start block cannot be prior to the lowest init block in the requested module graph (%d)", lowestInitialBlock) } - segmenter := block.NewSegmenter(segmentInterval, graphInitBlock, exclusiveEndBlock) + segmenter := block.NewSegmenter(segmentInterval, lowestInitialBlock, exclusiveEndBlock) plan := &RequestPlan{ segmentInterval: segmentInterval, } - if linearHandoffBlock != exclusiveEndBlock || + + if linearHandoffBlock < exclusiveEndBlock || + exclusiveEndBlock == 0 || linearHandoffBlock == 0 { // ex: unbound dev mode plan.LinearPipeline = block.NewRange(linearHandoffBlock, exclusiveEndBlock) } - if resolvedStartBlock == linearHandoffBlock && graphInitBlock == resolvedStartBlock { + + if resolvedStartBlock == linearHandoffBlock && lowestInitialBlock == resolvedStartBlock { return plan, nil } + if productionMode { - storesStopOnBound := plan.LinearPipeline == nil - endStoreBound := linearHandoffBlock - if storesStopOnBound { - segmentIdx := segmenter.IndexForEndBlock(linearHandoffBlock) - endStoreBoundRange := segmenter.Range(segmentIdx) - if endStoreBoundRange == nil { - return nil, fmt.Errorf("store bound range: invalid start block %d for segment interval %d", linearHandoffBlock, segmentInterval) - } - endStoreBound = endStoreBoundRange.ExclusiveEndBlock - } - if scheduleStores { - plan.BuildStores = block.NewRange(graphInitBlock, endStoreBound) + storesEnd := linearHandoffBlock + if scheduleStores && storesEnd > lowestInitialBlock { + plan.BuildStores = block.NewRange(lowestInitialBlock, storesEnd) } if resolvedStartBlock <= linearHandoffBlock { - startExecOutAtBlock := max(resolvedStartBlock, graphInitBlock) + startExecOutAtBlock := max(resolvedStartBlock, lowestInitialBlock) startExecOutAtSegment := segmenter.IndexForStartBlock(startExecOutAtBlock) writeExecOutStartBlockRange := segmenter.Range(startExecOutAtSegment) if writeExecOutStartBlockRange == nil { @@ -93,11 +85,15 @@ func BuildTier1RequestPlan(productionMode bool, segmentInterval uint64, graphIni } writeExecOutStartBlock := writeExecOutStartBlockRange.StartBlock plan.WriteExecOut = block.NewRange(writeExecOutStartBlock, linearHandoffBlock) - plan.ReadExecOut = block.NewRange(resolvedStartBlock, linearHandoffBlock) + readEndBlock := linearHandoffBlock + if exclusiveEndBlock != 0 && exclusiveEndBlock < linearHandoffBlock { + readEndBlock = exclusiveEndBlock + } + plan.ReadExecOut = block.NewRange(resolvedStartBlock, readEndBlock) } } else { /* dev mode */ - if scheduleStores { - plan.BuildStores = block.NewRange(graphInitBlock, linearHandoffBlock) + if scheduleStores && linearHandoffBlock > lowestInitialBlock { + plan.BuildStores = block.NewRange(lowestInitialBlock, linearHandoffBlock) } plan.WriteExecOut = nil } @@ -129,6 +125,14 @@ func (p *RequestPlan) WriteOutSegmenter() *block.Segmenter { return block.NewSegmenter(p.segmentInterval, p.WriteExecOut.StartBlock, p.WriteExecOut.ExclusiveEndBlock) } +func (p *RequestPlan) ReadOutSegmenter(outputModuleInitialBlock uint64) *block.Segmenter { + startBlock := p.WriteExecOut.StartBlock + if outputModuleInitialBlock > startBlock { + startBlock = outputModuleInitialBlock + } + return block.NewSegmenter(p.segmentInterval, startBlock, p.WriteExecOut.ExclusiveEndBlock) +} + func (p *RequestPlan) String() string { return fmt.Sprintf("interval=%d, stores=%s, map_write=%s, map_read=%s, linear=%s", p.segmentInterval, p.BuildStores, p.WriteExecOut, p.ReadExecOut, p.LinearPipeline) } diff --git a/orchestrator/plan/requestplan_test.go b/orchestrator/plan/requestplan_test.go index 24cf61adc..26689482c 100644 --- a/orchestrator/plan/requestplan_test.go +++ b/orchestrator/plan/requestplan_test.go @@ -27,34 +27,6 @@ func TestBuildConfig(t *testing.T) { } tests := []testStruct{ - { - name: "no parallel work to do prod mode", - storeInterval: 100, - productionMode: false, - needsStores: true, - graphInitBlock: 621, - resolvedStartBlock: 621, - linearHandoffBlock: 621, - exclusiveEndBlock: 742, - expectStoresRange: "nil", - expectWriteExecOutRange: "nil", - expectReadExecOutRange: "nil", - expectLinearPipelineRange: "621-742", - }, - { - name: "no parallel work to do dev mode", - storeInterval: 100, - productionMode: true, - needsStores: true, - graphInitBlock: 621, - resolvedStartBlock: 621, - linearHandoffBlock: 621, - exclusiveEndBlock: 742, - expectStoresRange: "nil", - expectWriteExecOutRange: "nil", - expectReadExecOutRange: "nil", - expectLinearPipelineRange: "621-742", - }, { name: "g1. dev mode with stop within same segment as start block", storeInterval: 100, @@ -62,12 +34,12 @@ func TestBuildConfig(t *testing.T) { needsStores: true, graphInitBlock: 621, resolvedStartBlock: 738, - linearHandoffBlock: 738, + linearHandoffBlock: 700, exclusiveEndBlock: 742, - expectStoresRange: "621-738", + expectStoresRange: "621-700", expectWriteExecOutRange: "nil", expectReadExecOutRange: "nil", - expectLinearPipelineRange: "738-742", + expectLinearPipelineRange: "700-742", }, { name: "g2. dev mode with stop in next segment", @@ -76,97 +48,87 @@ func TestBuildConfig(t *testing.T) { needsStores: true, graphInitBlock: 621, resolvedStartBlock: 738, - linearHandoffBlock: 738, + linearHandoffBlock: 700, exclusiveEndBlock: 842, - expectStoresRange: "621-738", + expectStoresRange: "621-700", expectWriteExecOutRange: "nil", expectReadExecOutRange: "nil", - expectLinearPipelineRange: "738-842", - }, - { - name: "g3. production with handoff and stop within same segment", - storeInterval: 100, - productionMode: true, - needsStores: true, - graphInitBlock: 621, - resolvedStartBlock: 738, - linearHandoffBlock: 742, - exclusiveEndBlock: 742, - expectStoresRange: "621-742", - expectWriteExecOutRange: "700-742", - expectReadExecOutRange: "738-742", - expectLinearPipelineRange: "nil", + expectLinearPipelineRange: "700-842", }, { - name: "similar to g3. production with handoff on boundary", + name: "g4. production within start and stop on the same segment", storeInterval: 100, productionMode: true, needsStores: true, graphInitBlock: 621, resolvedStartBlock: 738, linearHandoffBlock: 800, - exclusiveEndBlock: 800, + exclusiveEndBlock: 742, expectStoresRange: "621-800", expectWriteExecOutRange: "700-800", - expectReadExecOutRange: "738-800", + expectReadExecOutRange: "738-742", expectLinearPipelineRange: "nil", }, + { - name: "production, handoff 10k and start/init is 0, stop infinity (0)", - storeInterval: 100, - productionMode: true, - needsStores: true, - graphInitBlock: 0, - resolvedStartBlock: 0, - linearHandoffBlock: 10000, - exclusiveEndBlock: 0, - expectStoresRange: "0-10000", - expectWriteExecOutRange: "0-10000", - expectReadExecOutRange: "0-10000", - expectLinearPipelineRange: "10000-0", - }, - { - name: "g4. production with handoff and stop in next segment", + name: "g5. production three different segments for init, start and stop block", storeInterval: 100, productionMode: true, needsStores: true, graphInitBlock: 621, resolvedStartBlock: 738, - linearHandoffBlock: 842, + linearHandoffBlock: 900, exclusiveEndBlock: 842, - expectStoresRange: "621-842", - expectWriteExecOutRange: "700-842", + expectStoresRange: "621-900", + expectWriteExecOutRange: "700-900", expectReadExecOutRange: "738-842", expectLinearPipelineRange: "nil", }, + { - name: "g5. production, start is init, start handoff and stop in three segments", + name: "g6. production, start is init, handoff as boundary, stop block in a next segment", storeInterval: 100, productionMode: true, needsStores: true, graphInitBlock: 621, resolvedStartBlock: 621, - linearHandoffBlock: 942, + linearHandoffBlock: 900, exclusiveEndBlock: 998, - expectStoresRange: "621-942", - expectWriteExecOutRange: "621-942", - expectReadExecOutRange: "621-942", - expectLinearPipelineRange: "942-998", + expectStoresRange: "621-900", + expectWriteExecOutRange: "621-900", + expectReadExecOutRange: "621-900", + expectLinearPipelineRange: "900-998", }, { - name: "g6. production, start is init, start and handoff in two segments, stop infinity", + name: "g7. production, start is init, handoff as boundary, stop block infinite", storeInterval: 100, productionMode: true, needsStores: true, graphInitBlock: 621, resolvedStartBlock: 621, - linearHandoffBlock: 942, + linearHandoffBlock: 900, + exclusiveEndBlock: 0, + expectStoresRange: "621-900", + expectWriteExecOutRange: "621-900", + expectReadExecOutRange: "621-900", + expectLinearPipelineRange: "900-0", + }, + + { + name: "production, handoff 10k and start/init is 0, stop infinity (0)", + storeInterval: 100, + productionMode: true, + needsStores: true, + graphInitBlock: 0, + resolvedStartBlock: 0, + linearHandoffBlock: 10000, exclusiveEndBlock: 0, - expectStoresRange: "621-942", - expectWriteExecOutRange: "621-942", - expectReadExecOutRange: "621-942", - expectLinearPipelineRange: "942-0", + expectStoresRange: "0-10000", + expectWriteExecOutRange: "0-10000", + expectReadExecOutRange: "0-10000", + expectLinearPipelineRange: "10000-0", }, + { name: "small segment, production", storeInterval: 1000, @@ -223,32 +185,6 @@ func TestBuildConfig(t *testing.T) { expectReadExecOutRange: "nil", expectLinearPipelineRange: "100-0", }, - { - name: "req in live segment development", - storeInterval: 10, - productionMode: false, - needsStores: false, - graphInitBlock: 5, - resolvedStartBlock: 105, - linearHandoffBlock: 100, - exclusiveEndBlock: 0, - expectStoresRange: "nil", - expectWriteExecOutRange: "nil", - expectReadExecOutRange: "nil", - expectLinearPipelineRange: "100-0", - }, - - // This panics because we don't accept a start block prior to the graph init block. - // Maybe we can unblock that in the future, but it's not really useful. - // A fronting layer could have the start block be equal to the graph init block - // since _nothing_ would be produced prior to the graph init block anyway. - // And that _might already be the case_. - //{ - // "g7. production, start block is prior to graph init block", - // 100, 100, - // true, 700, 621, 842, 842, - // "700-800", "700-842", "nil", - //}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -256,6 +192,7 @@ func TestBuildConfig(t *testing.T) { assert.Nil(t, err) assert.Equal(t, tt.expectStoresRange, tostr(res.BuildStores), "buildStores") assert.Equal(t, tt.expectWriteExecOutRange, tostr(res.WriteExecOut), "writeExecOut") + assert.Equal(t, tt.expectReadExecOutRange, tostr(res.ReadExecOut), "readExecOut") assert.Equal(t, tt.expectLinearPipelineRange, tostr(res.LinearPipeline), "linearPipeline") }) } diff --git a/orchestrator/scheduler/scheduler.go b/orchestrator/scheduler/scheduler.go index a87418395..cc1626830 100644 --- a/orchestrator/scheduler/scheduler.go +++ b/orchestrator/scheduler/scheduler.go @@ -83,11 +83,24 @@ func (s *Scheduler) Update(msg loop.Msg) loop.Cmd { case work.MsgJobSucceeded: metrics.Tier1ActiveWorkerRequest.Dec() - s.Stages.MarkSegmentPartialPresent(msg.Unit) + shadowedUnits := s.Stages.MarkJobSuccess(msg.Unit) s.WorkerPool.Return(msg.Worker) + tryMerge := s.Stages.CmdTryMerge(msg.Unit.Stage) + if shadowedUnits == nil { + cmds = append(cmds, tryMerge) + } else { + multi := []loop.Cmd{tryMerge} + for _, u := range shadowedUnits { + multi = append(multi, s.Stages.CmdTryMerge(u.Stage)) + } + + cmds = append(cmds, + loop.Batch(multi...), + ) + } + cmds = append(cmds, - s.Stages.CmdTryMerge(msg.Unit.Stage), work.CmdScheduleNextJob(), ) if s.ExecOutWalker != nil { @@ -181,6 +194,12 @@ func (s *Scheduler) cmdShutdownWhenComplete() loop.Cmd { if s.ExecOutWalker != nil { start, current, end := s.ExecOutWalker.Progress() fields = append(fields, zap.Int("cached_output_start", start), zap.Int("cached_output_current", current), zap.Int("cached_output_end", end)) + } else { + // we may be creating an index + if s.Stages.OutputModuleIsIndex() && !s.Stages.LastStageCompleted() { + s.logger.Info("scheduler: waiting for last stage to complete because output module is an index") + return nil + } } s.logger.Info("scheduler: stores and cached_outputs stream completed, switching to live", fields...) return func() loop.Msg { diff --git a/orchestrator/stage/fetchstorage.go b/orchestrator/stage/fetchstorage.go index 61fdba561..b121782f2 100644 --- a/orchestrator/stage/fetchstorage.go +++ b/orchestrator/stage/fetchstorage.go @@ -32,10 +32,9 @@ func (s *Stages) FetchStoresState( mapperName = lastStage.storeModuleStates[0].name conf := execoutConfigs.ConfigMap[mapperName] - // TODO: OPTIMIZATION: get the actual needed range for execOutputs to optimize lookup if upToBlock != 0 { - files, err := conf.ListSnapshotFiles(ctx, bstream.NewInclusiveRange(0, upToBlock)) + files, err := conf.ListSnapshotFiles(ctx, bstream.NewInclusiveRange(segmenter.InitialBlock(), upToBlock)) if err != nil { return fmt.Errorf("fetching mapper storage state: %w", err) } diff --git a/orchestrator/stage/modstate.go b/orchestrator/stage/modstate.go index 235dfc77d..bc2f9faf0 100644 --- a/orchestrator/stage/modstate.go +++ b/orchestrator/stage/modstate.go @@ -32,6 +32,9 @@ func NewModuleState(logger *zap.Logger, name string, segmenter *block.Segmenter, } } +func (s *StoreModuleState) Name() string { + return s.name +} func (s *StoreModuleState) getStore(ctx context.Context, exclusiveEndBlock uint64) (*store.FullKV, error) { if s.lastBlockInStore == exclusiveEndBlock && s.cachedStore != nil { return s.cachedStore, nil diff --git a/orchestrator/stage/segment.go b/orchestrator/stage/segment.go index 170285dd7..985d62be6 100644 --- a/orchestrator/stage/segment.go +++ b/orchestrator/stage/segment.go @@ -11,6 +11,7 @@ const ( UnitPartialPresent UnitScheduled // Means the job was scheduled for execution UnitMerging // A partial is being merged + UnitShadowed // will not be run directly, its outputs are created by the last stage of this segment UnitCompleted // End state. A store has been snapshot for this segment, and we have gone over in the per-request squasher UnitNoOp // State given to a unit that does not need scheduling. Mostly for map segments where we know in advance we won't consume the output. ) @@ -34,6 +35,8 @@ func (s UnitState) String() string { return "Merging" case UnitCompleted: return "Completed" + case UnitShadowed: + return "Shadowed" case UnitNoOp: return "NoOp" default: diff --git a/orchestrator/stage/stages.go b/orchestrator/stage/stages.go index 5e62639d5..bede88c48 100644 --- a/orchestrator/stage/stages.go +++ b/orchestrator/stage/stages.go @@ -12,7 +12,7 @@ import ( "github.com/streamingfast/substreams/orchestrator/loop" "github.com/streamingfast/substreams/orchestrator/plan" pbsubstreamsrpc "github.com/streamingfast/substreams/pb/sf/substreams/rpc/v2" - "github.com/streamingfast/substreams/pipeline/outputmodules" + "github.com/streamingfast/substreams/pipeline/exec" "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/storage/store" ) @@ -42,8 +42,9 @@ type Stages struct { stages []*Stage // segmentStates is a matrix of segment and stages - segmentStates []stageStates // segmentStates[offsetSegment][StageIndex] - lastStatUpdate time.Time + segmentStates []stageStates // segmentStates[offsetSegment][StageIndex] + lastStatUpdate time.Time + outputModuleIsIndex bool // If you're processing at 12M blocks, offset 12,000 segments, so you don't need to allocate 12k empty elements. // Any previous segment is assumed to have completed successfully, and any stores that we sync'd prior to this offset @@ -54,7 +55,7 @@ type stageStates []UnitState func NewStages( ctx context.Context, - outputGraph *outputmodules.Graph, + execGraph *exec.Graph, reqPlan *plan.RequestPlan, storeConfigs store.ConfigMap, ) (out *Stages) { @@ -65,11 +66,12 @@ func NewStages( logger := reqctx.Logger(ctx) - stagedModules := outputGraph.StagedUsedModules() + stagedModules := execGraph.StagedUsedModules() out = &Stages{ - ctx: ctx, - logger: reqctx.Logger(ctx), - globalSegmenter: reqPlan.BackprocessSegmenter(), + ctx: ctx, + logger: reqctx.Logger(ctx), + globalSegmenter: reqPlan.BackprocessSegmenter(), + outputModuleIsIndex: execGraph.OutputModule().GetKindBlockIndex() != nil, } if reqPlan.BuildStores != nil { out.storeSegmenter = reqPlan.StoresSegmenter() @@ -122,13 +124,30 @@ func NewStages( return out } -func layerKind(layer outputmodules.LayerModules) Kind { +func layerKind(layer exec.LayerModules) Kind { if layer.IsStoreLayer() { return KindStore } return KindMap } +func (s *Stages) OutputModuleIsIndex() bool { + return s.outputModuleIsIndex +} + +func (s *Stages) LastStageCompleted() bool { + lastSegment := s.mapSegmenter.LastIndex() + + idx := len(s.stages) - 1 + for seg := s.mapSegmenter.FirstIndex(); seg <= lastSegment; seg++ { + state := s.getState(Unit{Segment: seg, Stage: idx}) + if state != UnitCompleted && state != UnitPartialPresent && state != UnitNoOp { + return false + } + } + return true +} + func (s *Stages) AllStoresCompleted() bool { if s.storeSegmenter == nil { // no store at all return true @@ -318,7 +337,7 @@ func (s *Stages) getState(u Unit) UnitState { index := u.Segment - s.segmentOffset if index >= len(s.segmentStates) { return UnitPending - } else if index < 0 { + } else if index < 0 || (len(s.stages) != 0 && u.Segment < s.stages[u.Stage].segmenter.FirstIndex()) { return UnitNoOp } else { return s.segmentStates[index][u.Stage] @@ -351,17 +370,16 @@ func (s *Stages) NextJob() (Unit, *block.Range) { // OPTIMIZATION: eventually, we can push `segmentsOffset` // each time contiguous segments are completed for all stages. + lastStage := len(s.stages) - 1 for segmentIdx := s.globalSegmenter.FirstIndex(); segmentIdx <= s.globalSegmenter.LastIndex(); segmentIdx++ { - for stageIdx := len(s.stages) - 1; stageIdx >= 0; stageIdx-- { + someShadowed := s.markShadowedUnits(segmentIdx) + for stageIdx := lastStage; stageIdx >= 0; stageIdx-- { stage := s.stages[stageIdx] unit := Unit{Segment: segmentIdx, Stage: stageIdx} segmentState := s.getState(unit) if segmentState != UnitPending { continue } - if segmentState == UnitNoOp { - continue - } if segmentIdx < stage.segmenter.FirstIndex() { // Don't process stages where all modules' initial blocks are only later continue @@ -380,6 +398,16 @@ func (s *Stages) NextJob() (Unit, *block.Range) { continue } + if someShadowed && stageIdx == lastStage { + for i := 0; i < len(s.stages); i++ { + u := Unit{Segment: segmentIdx, Stage: i} + if st := s.getState(u); st == UnitPending { + s.markSegmentScheduled(u) + return u, r + } + } + } + s.markSegmentScheduled(unit) return unit, r } @@ -387,6 +415,36 @@ func (s *Stages) NextJob() (Unit, *block.Range) { return Unit{}, nil } +func (s *Stages) shadowable(segmentIdx int) bool { + if len(s.stages) < 2 { + return false + } + return segmentIdx-s.segmentOffset <= len(s.stages)-1 +} + +func (s *Stages) markShadowedUnits(segmentIdx int) (someShadowed bool) { + if !s.shadowable(segmentIdx) { + return + } + + relSegmentOrdinal := segmentIdx - s.segmentOffset + s.allocSegments(segmentIdx) + + lastStage := len(s.stages) - 1 + for stageIdx := lastStage - 1; stageIdx >= relSegmentOrdinal; stageIdx-- { // skip the last stage + unit := Unit{Segment: segmentIdx, Stage: stageIdx} + segmentState := s.getState(unit) + if segmentState != UnitCompleted && segmentState != UnitNoOp { + nextState := s.getState(Unit{Segment: segmentIdx, Stage: stageIdx + 1}) + if nextState == UnitPending || nextState == UnitScheduled || nextState == UnitMerging || nextState == UnitShadowed { + s.setState(unit, UnitShadowed) + someShadowed = true + } + } + } + return +} + func (s *Stages) allocSegments(segmentIdx int) { segmentsNeeded := segmentIdx - s.segmentOffset if len(s.segmentStates) > segmentsNeeded { @@ -405,9 +463,19 @@ func (s *Stages) dependenciesCompleted(u Unit) bool { if u.Stage == 0 { return true } + + previousSegmentParent := s.getState(Unit{Segment: u.Segment - 1, Stage: u.Stage - 1}) + for i := u.Stage - 1; i >= 0; i-- { - state := s.getState(Unit{Segment: u.Segment - 1, Stage: i}) - if !(state == UnitCompleted || state == UnitNoOp) { + state := s.getState(Unit{Segment: u.Segment, Stage: i}) + switch state { + case UnitCompleted, UnitNoOp: + case UnitShadowed, UnitPartialPresent: + // if the direct parent stage is shadowed or UnitPartialPresent, we need the previous segment's previous stage to be completed + if previousSegmentParent != UnitCompleted && previousSegmentParent != UnitNoOp { + return false + } + default: return false } } @@ -452,6 +520,7 @@ func (s *Stages) StatesString() string { UnitMerging: "M", UnitCompleted: "C", UnitNoOp: "N", + UnitShadowed: "Z", }[segment[i]]) } out.WriteString("\n") diff --git a/orchestrator/stage/stages_test.go b/orchestrator/stage/stages_test.go index c67721046..350b99bc8 100644 --- a/orchestrator/stage/stages_test.go +++ b/orchestrator/stage/stages_test.go @@ -10,7 +10,7 @@ import ( "github.com/streamingfast/substreams/block" "github.com/streamingfast/substreams/orchestrator/plan" - "github.com/streamingfast/substreams/pipeline/outputmodules" + "github.com/streamingfast/substreams/pipeline/exec" ) func TestNewStages(t *testing.T) { @@ -20,7 +20,7 @@ func TestNewStages(t *testing.T) { stages := NewStages( context.Background(), - outputmodules.TestGraphStagedModules(5, 7, 12, 22, 25), + exec.TestGraphStagedModules(5, 7, 12, 22, 25), reqPlan, nil, ) @@ -38,155 +38,165 @@ func TestNewStages(t *testing.T) { assert.Equal(t, block.ParseRange("70-75"), stages.globalSegmenter.Range(7)) } -func TestNewStagesNextJobs(t *testing.T) { - //seg := block.NewSegmenter(10, 5, 50) - reqPlan, err := plan.BuildTier1RequestPlan(true, 10, 5, 5, 40, 40, true) +func unit(seg, stage int) Unit { + return Unit{Segment: seg, Stage: stage} +} + +func TestNewStageNextJobs(t *testing.T) { + reqPlan, err := plan.BuildTier1RequestPlan(true, 10, 5, 5, 50, 50, true) assert.NoError(t, err) - assert.Equal(t, "interval=10, stores=[5, 40), map_write=[5, 40), map_read=[5, 40), linear=[nil)", reqPlan.String()) + assert.Equal(t, "interval=10, stores=[5, 50), map_write=[5, 50), map_read=[5, 50), linear=[nil)", reqPlan.String()) stages := NewStages( context.Background(), - outputmodules.TestGraphStagedModules(5, 5, 5, 5, 5), + exec.TestGraphStagedModules(5, 5, 5, 5, 5), reqPlan, nil, ) - stages.allocSegments(0) - stages.setState(Unit{Stage: 2, Segment: 0}, UnitNoOp) - - segmentStateEquals(t, stages, ` -S:. -S:. -M:N`) - - j1, _ := stages.NextJob() - assert.Equal(t, 1, j1.Stage) - assert.Equal(t, 0, j1.Segment) - - segmentStateEquals(t, stages, ` -S:. -S:S -M:N`) + noNextJob := func() { + _, r := stages.NextJob() + assert.Nil(t, r) + } - stages.forceTransition(0, 1, UnitCompleted) + nextJob := func() Unit { + j, r := stages.NextJob() + if r == nil { + t.Error("no next job") + } + return j + } - segmentStateEquals(t, stages, ` -S:. -S:C -M:N`) + merge := func(u Unit) { + stages.forceTransition(u.Segment, u.Stage, UnitMerging) + stages.MergeCompleted(u) + } - stages.NextJob() + assert.Equal(t, unit(0, 2), nextJob()) segmentStateEquals(t, stages, ` -S:S -S:C -M:N`) - - stages.NextJob() + S:Z + S:Z + M:S`) + assert.Equal(t, unit(1, 0), nextJob()) segmentStateEquals(t, stages, ` -S:SS -S:C. -M:N.`) - - stages.forceTransition(0, 0, UnitCompleted) - stages.NextJob() + S:ZS + S:ZZ + M:S.`) + assert.Equal(t, unit(2, 0), nextJob()) segmentStateEquals(t, stages, ` -S:CS -S:C. -M:NS`) - - stages.forceTransition(1, 0, UnitCompleted) - stages.NextJob() + S:ZSS + S:ZZ. + M:S..`) + stages.MarkJobSuccess(unit(0, 2)) segmentStateEquals(t, stages, ` -S:CC -S:CS -M:NS`) + S:PSS + S:PZ. + M:P..`) - stages.NextJob() + merge(unit(0, 0)) + merge(unit(0, 1)) + assert.Equal(t, unit(3, 0), nextJob()) segmentStateEquals(t, stages, ` -S:CC. -S:CSS -M:NS.`) - - stages.MarkSegmentPartialPresent(id(1, 2)) + S:CSSS + S:CZ.. + M:P...`) + stages.MarkJobSuccess(unit(1, 0)) + merge(unit(1, 0)) segmentStateEquals(t, stages, ` -S:CC. -S:CSS -M:NP.`) - - stages.MarkSegmentMerging(id(1, 2)) + S:CCSS + S:CZ.. + M:P...`) + assert.Equal(t, unit(1, 2), nextJob()) segmentStateEquals(t, stages, ` -S:CC. -S:CSS -M:NM.`) - - stages.markSegmentCompleted(id(1, 2)) - stages.NextJob() + S:CCSS + S:CZ.. + M:PS..`) + assert.Equal(t, unit(4, 0), nextJob()) segmentStateEquals(t, stages, ` -S:CCS -S:CSS -M:NC.`) + S:CCSSS + S:CZ... + M:PS...`) - stages.NextJob() + noNextJob() - _, r := stages.NextJob() - assert.Nil(t, r) + stages.MarkJobSuccess(unit(2, 0)) + merge(unit(2, 0)) + stages.MarkJobSuccess(unit(3, 0)) + merge(unit(3, 0)) segmentStateEquals(t, stages, ` -S:CCSS -S:CSS. -M:NC..`) + S:CCCCS + S:CZ... + M:PS...`) - stages.MarkSegmentPartialPresent(id(2, 0)) + assert.Equal(t, unit(2, 1), nextJob()) + assert.Equal(t, unit(3, 1), nextJob()) + segmentStateEquals(t, stages, ` + S:CCCCS + S:CZSS. + M:PS...`) + stages.MarkJobSuccess(unit(2, 1)) segmentStateEquals(t, stages, ` -S:CCPS -S:CSS. -M:NC..`) + S:CCCCS + S:CZPS. + M:PS...`) - stages.MarkSegmentMerging(id(2, 0)) + noNextJob() - _, r = stages.NextJob() - assert.Nil(t, r) + stages.MarkJobSuccess(unit(1, 2)) segmentStateEquals(t, stages, ` -S:CCMS -S:CSS. -M:NC..`) + S:CCCCS + S:CPPS. + M:PP...`) - _, r = stages.NextJob() - assert.Nil(t, r) - stages.markSegmentCompleted(id(2, 0)) + noNextJob() + stages.MarkJobSuccess(unit(4, 0)) + + assert.Equal(t, unit(4, 1), nextJob()) segmentStateEquals(t, stages, ` -S:CCCS -S:CSS. -M:NC..`) + S:CCCCP + S:CPPSS + M:PP...`) - stages.NextJob() + merge(unit(1, 1)) + merge(unit(2, 1)) + segmentStateEquals(t, stages, ` + S:CCCCP + S:CCCSS + M:PP...`) + assert.Equal(t, unit(2, 2), nextJob()) segmentStateEquals(t, stages, ` -S:CCCS -S:CSSS -M:NC..`) + S:CCCCP + S:CCCSS + M:PPS..`) - stages.forceTransition(1, 1, UnitCompleted) - stages.NextJob() + stages.MarkJobSuccess(unit(3, 1)) + stages.MarkJobSuccess(unit(4, 1)) + assert.Equal(t, unit(3, 2), nextJob()) segmentStateEquals(t, stages, ` -S:CCCS -S:CCSS -M:NCS.`) + S:CCCCP + S:CCCPP + M:PPSS.`) -} + noNextJob() -func id(segment, stage int) Unit { - return Unit{Stage: stage, Segment: segment} + merge(unit(3, 1)) + assert.Equal(t, unit(4, 2), nextJob()) + segmentStateEquals(t, stages, ` + S:CCCCP + S:CCCCP + M:PPSSS`) } func segmentStateEquals(t *testing.T, s *Stages, segments string) { @@ -194,7 +204,13 @@ func segmentStateEquals(t *testing.T, s *Stages, segments string) { out := s.StatesString() - assert.Equal(t, strings.TrimSpace(segments), strings.TrimSpace(out)) + lines := strings.FieldsFunc(segments, func(c rune) bool { return c == '\n' || c == '\r' }) + for i := range lines { + lines[i] = strings.TrimSpace(lines[i]) + } + canon := strings.Join(lines, "\n") + + assert.Equal(t, canon, strings.TrimSpace(out)) } func TestStages_previousUnitComplete(t *testing.T) { diff --git a/orchestrator/stage/transitions.go b/orchestrator/stage/transitions.go index 410650434..2f0ff413b 100644 --- a/orchestrator/stage/transitions.go +++ b/orchestrator/stage/transitions.go @@ -1,5 +1,7 @@ package stage +import "fmt" + /* Transitions: @@ -107,6 +109,21 @@ func (s *Stages) MarkSegmentPending(u Unit) { ) } +func (s *Stages) MarkJobSuccess(u Unit) (shadowedUnits []Unit) { + s.MarkSegmentPartialPresent(u) + + if s.shadowable(u.Segment) { + for i := u.Stage - 1; i >= 0; i-- { + u2 := Unit{Segment: u.Segment, Stage: i} + if s.getState(u2) == UnitShadowed { + s.transition(u2, UnitPartialPresent, UnitShadowed) // we let the squasher pretend it is partial, because squashing can occur from full or from partial + shadowedUnits = append(shadowedUnits, u2) + } + } + } + return +} + func (s *Stages) MarkSegmentPartialPresent(u Unit) { s.transition(u, UnitPartialPresent, UnitScheduled, // reported by working completing its generation of a partial @@ -124,6 +141,10 @@ func (s *Stages) markSegmentCompleted(u Unit) { s.transition(u, UnitCompleted, UnitPending, // from an initial storage state snapshot UnitMerging, // from the Squasher's merge operations completing + UnitScheduled, + UnitShadowed, + UnitNoOp, + UnitCompleted, // in case it got completed indirectly ) } @@ -136,11 +157,11 @@ func (s *Stages) transition(u Unit, to UnitState, allowedPreviousStates ...UnitS return } } - invalidTransition(prev, to) + invalidTransition(prev, to, u) } -func invalidTransition(prev, next UnitState) { - panic("invalid transition from " + prev.String() + " to " + next.String()) +func invalidTransition(prev, next UnitState, u Unit) { + panic(fmt.Sprintf("invalid transition from %q to %q on unit [stage: %d, segment: %d]", prev.String(), next.String(), u.Stage, u.Segment)) } func (s *Stages) forceTransition(segment int, stage int, to UnitState) { diff --git a/orchestrator/work/worker.go b/orchestrator/work/worker.go index 5878bc103..bd1d18c23 100644 --- a/orchestrator/work/worker.go +++ b/orchestrator/work/worker.go @@ -93,20 +93,20 @@ func NewRequest(ctx context.Context, req *reqctx.RequestDetails, stageIndex int, panic("unable to get tier2 request parameters") } - return &pbssinternal.ProcessRangeRequest{ - StartBlockNum: workRange.StartBlock, - StopBlockNum: workRange.ExclusiveEndBlock, - Modules: req.Modules, - OutputModule: req.OutputModule, - Stage: uint32(stageIndex), + segment := uint64(workRange.StartBlock) / tier2ReqParams.StateBundleSize + return &pbssinternal.ProcessRangeRequest{ + Modules: req.Modules, + OutputModule: req.OutputModule, + Stage: uint32(stageIndex), MeteringConfig: tier2ReqParams.MeteringConfig, FirstStreamableBlock: tier2ReqParams.FirstStreamableBlock, MergedBlocksStore: tier2ReqParams.MergedBlockStoreURL, StateStore: tier2ReqParams.StateStoreURL, - StateBundleSize: tier2ReqParams.StateBundleSize, + SegmentSize: tier2ReqParams.StateBundleSize, + SegmentNumber: segment, StateStoreDefaultTag: tier2ReqParams.StateStoreDefaultTag, - WasmModules: tier2ReqParams.WASMModules, + WasmExtensionConfigs: tier2ReqParams.WASMModules, BlockType: tier2ReqParams.BlockType, } } @@ -123,8 +123,7 @@ func (w *RemoteWorker) Work(ctx context.Context, unit stage.Unit, workRange *blo var previousError error err := derr.RetryContext(ctx, uint64(maxRetries), func(ctx context.Context) error { w.logger.Info("launching remote worker", - zap.Int64("start_block_num", int64(request.StartBlockNum)), - zap.Uint64("stop_block_num", request.StopBlockNum), + zap.Uint64("segment", request.SegmentNumber), zap.Uint32("stage", request.Stage), zap.String("output_module", request.OutputModule), zap.Int("attempt", retryIdx+1), @@ -164,7 +163,7 @@ func (w *RemoteWorker) Work(ctx context.Context, unit stage.Unit, workRange *blo zap.Int("number_of_tries", retryIdx), zap.Strings("module_name", moduleNames), zap.Duration("duration", timeTook), - zap.Float64("num_of_blocks_per_sec", float64(request.StopBlockNum-request.StartBlockNum)/timeTook.Seconds()), + zap.Float64("num_of_blocks_per_sec", float64(request.SegmentSize)/timeTook.Seconds()), zap.Error(err), ) return MsgJobFailed{Unit: unit, Error: err} @@ -182,7 +181,7 @@ func (w *RemoteWorker) Work(ctx context.Context, unit stage.Unit, workRange *blo zap.Int("number_of_tries", retryIdx), zap.Strings("module_name", moduleNames), zap.Float64("duration", timeTook.Seconds()), - zap.Float64("processing_time_per_block", timeTook.Seconds()/float64(request.StopBlockNum-request.StartBlockNum)), + zap.Float64("processing_time_per_block", timeTook.Seconds()/float64(request.SegmentSize)), ) return MsgJobSucceeded{ Unit: unit, @@ -196,12 +195,11 @@ func (w *RemoteWorker) Work(ctx context.Context, unit stage.Unit, workRange *blo func (w *RemoteWorker) work(ctx context.Context, request *pbssinternal.ProcessRangeRequest, moduleNames []string, upstream *response.Stream) *Result { var err error - ctx, span := reqctx.WithSpan(ctx, fmt.Sprintf("substreams/tier1/schedule/%s/%d-%d", request.OutputModule, request.StartBlockNum, request.StopBlockNum)) + ctx, span := reqctx.WithSpan(ctx, fmt.Sprintf("substreams/tier1/schedule/%s/%d", request.OutputModule, request.SegmentNumber)) defer span.EndWithErr(&err) span.SetAttributes( attribute.String("substreams.output_module", request.OutputModule), - attribute.Int64("substreams.start_block", int64(request.StartBlockNum)), - attribute.Int64("substreams.stop_block", int64(request.StopBlockNum)), + attribute.Int64("substreams.segment_number", int64(request.SegmentNumber)), attribute.Int64("substreams.worker_id", int64(w.id)), ) logger := w.logger @@ -212,7 +210,8 @@ func (w *RemoteWorker) work(ctx context.Context, request *pbssinternal.ProcessRa } stats := reqctx.ReqStats(ctx) - jobIdx := stats.RecordNewSubrequest(request.Stage, request.StartBlockNum, request.StopBlockNum) + startBlock := request.SegmentNumber * request.SegmentSize + jobIdx := stats.RecordNewSubrequest(request.Stage, startBlock, startBlock+request.SegmentSize) defer stats.RecordEndSubrequest(jobIdx) ctx = dauth.FromContext(ctx).ToOutgoingGRPCContext(ctx) diff --git a/pb/last_generate.txt b/pb/last_generate.txt index b1b69175c..084151401 100644 --- a/pb/last_generate.txt +++ b/pb/last_generate.txt @@ -1,2 +1,2 @@ -generate.sh - Tue Apr 30 14:01:29 EDT 2024 - cbillett -streamingfast/proto revision: 6c1189f2d3f481b1838e11cc4e7a86336a926dea +generate.sh - Wed 1 May 2024 15:44:36 EDT - stepd +streamingfast/proto revision: 5be91f4920622fe974e7e71bec4180d0ab98074f diff --git a/pb/sf/substreams/index/v1/keys.pb.go b/pb/sf/substreams/index/v1/keys.pb.go index e213f690f..5eceed6be 100644 --- a/pb/sf/substreams/index/v1/keys.pb.go +++ b/pb/sf/substreams/index/v1/keys.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/index/v1/keys.proto diff --git a/pb/sf/substreams/intern/v2/deltas.pb.go b/pb/sf/substreams/intern/v2/deltas.pb.go index eedd5ed6f..a9c29242a 100644 --- a/pb/sf/substreams/intern/v2/deltas.pb.go +++ b/pb/sf/substreams/intern/v2/deltas.pb.go @@ -1,12 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/intern/v2/deltas.proto package pbssinternal import ( + _ "github.com/streamingfast/substreams/pb/sf/substreams/index/v1" v1 "github.com/streamingfast/substreams/pb/sf/substreams/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -357,66 +358,68 @@ var file_sf_substreams_intern_v2_deltas_proto_rawDesc = []byte{ 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x64, - 0x65, 0x6c, 0x74, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x02, 0x0a, 0x0c, - 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, - 0x0a, 0x6d, 0x61, 0x70, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x70, 0x4f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x65, - 0x6c, 0x74, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x66, 0x2e, - 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x14, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x62, 0x75, - 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x52, - 0x0a, 0x0a, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x44, 0x0a, 0x0a, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0xea, 0x03, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6f, 0x72, - 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, - 0x45, 0x54, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x45, - 0x54, 0x5f, 0x49, 0x46, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, - 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x45, 0x54, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x5f, 0x49, - 0x46, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x03, 0x12, 0x0a, - 0x0a, 0x06, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x45, - 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x10, 0x05, 0x12, 0x13, 0x0a, - 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x49, 0x4e, 0x54, - 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x58, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, - 0x54, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x4d, 0x41, - 0x4c, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x49, 0x4e, 0x5f, 0x42, - 0x49, 0x47, 0x5f, 0x49, 0x4e, 0x54, 0x10, 0x0a, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x54, 0x5f, - 0x4d, 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x53, - 0x45, 0x54, 0x5f, 0x4d, 0x49, 0x4e, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x0c, - 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x47, 0x5f, - 0x44, 0x45, 0x43, 0x49, 0x4d, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x55, 0x4d, - 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x49, 0x4e, 0x54, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, - 0x4d, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x0f, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x55, 0x4d, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x10, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x55, - 0x4d, 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x4d, 0x41, 0x4c, 0x10, 0x11, 0x42, - 0x4d, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, 0x62, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x2f, 0x76, - 0x32, 0x3b, 0x70, 0x62, 0x73, 0x73, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x6c, 0x74, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x73, 0x66, 0x2f, + 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, + 0x02, 0x0a, 0x0c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x35, 0x0a, 0x0a, 0x6d, 0x61, 0x70, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x61, + 0x70, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6c, + 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, + 0x30, 0x0a, 0x14, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x74, 0x72, + 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x52, 0x0a, 0x0a, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x44, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xea, 0x03, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x03, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x02, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x0d, + 0x0a, 0x09, 0x53, 0x45, 0x54, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, + 0x11, 0x53, 0x45, 0x54, 0x5f, 0x49, 0x46, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x58, 0x49, 0x53, + 0x54, 0x53, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x45, 0x54, 0x5f, 0x42, 0x59, 0x54, 0x45, + 0x53, 0x5f, 0x49, 0x46, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, + 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x11, 0x0a, + 0x0d, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x10, 0x05, + 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x49, 0x47, 0x5f, + 0x49, 0x4e, 0x54, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x58, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, + 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x08, 0x12, 0x17, 0x0a, + 0x13, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x44, 0x45, 0x43, + 0x49, 0x4d, 0x41, 0x4c, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x49, + 0x4e, 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x49, 0x4e, 0x54, 0x10, 0x0a, 0x12, 0x11, 0x0a, 0x0d, 0x53, + 0x45, 0x54, 0x5f, 0x4d, 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x0b, 0x12, 0x13, + 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x49, 0x4e, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, + 0x34, 0x10, 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x54, 0x5f, 0x4d, 0x49, 0x4e, 0x5f, 0x42, + 0x49, 0x47, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x4d, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, + 0x53, 0x55, 0x4d, 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x49, 0x4e, 0x54, 0x10, 0x0e, 0x12, 0x0d, 0x0a, + 0x09, 0x53, 0x55, 0x4d, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x0f, 0x12, 0x0f, 0x0a, 0x0b, + 0x53, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x10, 0x12, 0x13, 0x0a, + 0x0f, 0x53, 0x55, 0x4d, 0x5f, 0x42, 0x49, 0x47, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x4d, 0x41, 0x4c, + 0x10, 0x11, 0x42, 0x4d, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, + 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x66, 0x2f, + 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x70, 0x62, 0x73, 0x73, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pb/sf/substreams/intern/v2/service.go b/pb/sf/substreams/intern/v2/service.go index 7ca8e7834..03ea6abe6 100644 --- a/pb/sf/substreams/intern/v2/service.go +++ b/pb/sf/substreams/intern/v2/service.go @@ -3,7 +3,7 @@ package pbssinternal import "encoding/json" func (x *ProcessRangeRequest) WasmConfig(module string) string { - return x.WasmModules[module] + return x.WasmExtensionConfigs[module] } type RPCCallWasmModuleConfiguration struct { @@ -29,3 +29,11 @@ func ToWasmModuleTypeArray(s []string) []WASMModuleType { } return result } + +func (r *ProcessRangeRequest) StartBlock() uint64 { + return r.SegmentNumber * r.SegmentSize +} + +func (r *ProcessRangeRequest) StopBlock() uint64 { + return r.SegmentNumber*r.SegmentSize + r.SegmentSize +} diff --git a/pb/sf/substreams/intern/v2/service.pb.go b/pb/sf/substreams/intern/v2/service.pb.go index 210de28b2..ee42e1fae 100644 --- a/pb/sf/substreams/intern/v2/service.pb.go +++ b/pb/sf/substreams/intern/v2/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/intern/v2/service.proto @@ -74,20 +74,20 @@ type ProcessRangeRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - StartBlockNum uint64 `protobuf:"varint,1,opt,name=start_block_num,json=startBlockNum,proto3" json:"start_block_num,omitempty"` + // Deprecated: Do not use. StopBlockNum uint64 `protobuf:"varint,2,opt,name=stop_block_num,json=stopBlockNum,proto3" json:"stop_block_num,omitempty"` OutputModule string `protobuf:"bytes,3,opt,name=output_module,json=outputModule,proto3" json:"output_module,omitempty"` Modules *v1.Modules `protobuf:"bytes,4,opt,name=modules,proto3" json:"modules,omitempty"` Stage uint32 `protobuf:"varint,5,opt,name=stage,proto3" json:"stage,omitempty"` // 0-based index of stage to execute up to MeteringConfig string `protobuf:"bytes,6,opt,name=metering_config,json=meteringConfig,proto3" json:"metering_config,omitempty"` - FirstStreamableBlock uint64 `protobuf:"varint,7,opt,name=first_streamable_block,json=firstStreamableBlock,proto3" json:"first_streamable_block,omitempty"` // first block that can be streamed - LastStreamableBlock uint64 `protobuf:"varint,8,opt,name=last_streamable_block,json=lastStreamableBlock,proto3" json:"last_streamable_block,omitempty"` // last block that can be streamed - WasmModules map[string]string `protobuf:"bytes,9,rep,name=wasm_modules,json=wasmModules,proto3" json:"wasm_modules,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // TODO: rename to `wasm_extension_configs` - MergedBlocksStore string `protobuf:"bytes,10,opt,name=merged_blocks_store,json=mergedBlocksStore,proto3" json:"merged_blocks_store,omitempty"` // store to use for merged blocks - StateStore string `protobuf:"bytes,11,opt,name=state_store,json=stateStore,proto3" json:"state_store,omitempty"` // store to use for substreams state - StateStoreDefaultTag string `protobuf:"bytes,12,opt,name=state_store_default_tag,json=stateStoreDefaultTag,proto3" json:"state_store_default_tag,omitempty"` // default tag to use for state store - StateBundleSize uint64 `protobuf:"varint,13,opt,name=state_bundle_size,json=stateBundleSize,proto3" json:"state_bundle_size,omitempty"` // number of blocks to process in a single batch - BlockType string `protobuf:"bytes,14,opt,name=block_type,json=blockType,proto3" json:"block_type,omitempty"` // block type to process + FirstStreamableBlock uint64 `protobuf:"varint,7,opt,name=first_streamable_block,json=firstStreamableBlock,proto3" json:"first_streamable_block,omitempty"` // first block that can be streamed on that chain + WasmExtensionConfigs map[string]string `protobuf:"bytes,9,rep,name=wasm_extension_configs,json=wasmExtensionConfigs,proto3" json:"wasm_extension_configs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // TODO: rename to `wasm_extension_configs` + MergedBlocksStore string `protobuf:"bytes,10,opt,name=merged_blocks_store,json=mergedBlocksStore,proto3" json:"merged_blocks_store,omitempty"` // store to use for merged blocks + StateStore string `protobuf:"bytes,11,opt,name=state_store,json=stateStore,proto3" json:"state_store,omitempty"` // store to use for substreams state + StateStoreDefaultTag string `protobuf:"bytes,12,opt,name=state_store_default_tag,json=stateStoreDefaultTag,proto3" json:"state_store_default_tag,omitempty"` // default tag to use for state store + SegmentSize uint64 `protobuf:"varint,13,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"` // number of blocks to process in a single batch + BlockType string `protobuf:"bytes,14,opt,name=block_type,json=blockType,proto3" json:"block_type,omitempty"` // block type to process + SegmentNumber uint64 `protobuf:"varint,15,opt,name=segment_number,json=segmentNumber,proto3" json:"segment_number,omitempty"` // segment_number * segment_size = start_block_num } func (x *ProcessRangeRequest) Reset() { @@ -122,13 +122,7 @@ func (*ProcessRangeRequest) Descriptor() ([]byte, []int) { return file_sf_substreams_intern_v2_service_proto_rawDescGZIP(), []int{0} } -func (x *ProcessRangeRequest) GetStartBlockNum() uint64 { - if x != nil { - return x.StartBlockNum - } - return 0 -} - +// Deprecated: Do not use. func (x *ProcessRangeRequest) GetStopBlockNum() uint64 { if x != nil { return x.StopBlockNum @@ -171,16 +165,9 @@ func (x *ProcessRangeRequest) GetFirstStreamableBlock() uint64 { return 0 } -func (x *ProcessRangeRequest) GetLastStreamableBlock() uint64 { +func (x *ProcessRangeRequest) GetWasmExtensionConfigs() map[string]string { if x != nil { - return x.LastStreamableBlock - } - return 0 -} - -func (x *ProcessRangeRequest) GetWasmModules() map[string]string { - if x != nil { - return x.WasmModules + return x.WasmExtensionConfigs } return nil } @@ -206,9 +193,9 @@ func (x *ProcessRangeRequest) GetStateStoreDefaultTag() string { return "" } -func (x *ProcessRangeRequest) GetStateBundleSize() uint64 { +func (x *ProcessRangeRequest) GetSegmentSize() uint64 { if x != nil { - return x.StateBundleSize + return x.SegmentSize } return 0 } @@ -220,6 +207,13 @@ func (x *ProcessRangeRequest) GetBlockType() string { return "" } +func (x *ProcessRangeRequest) GetSegmentNumber() uint64 { + if x != nil { + return x.SegmentNumber + } + return 0 +} + type ProcessRangeResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -754,154 +748,153 @@ var file_sf_substreams_intern_v2_service_proto_rawDesc = []byte{ 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdd, 0x05, + 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x05, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x12, 0x24, 0x0a, - 0x0e, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x4e, 0x75, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x6f, - 0x64, 0x75, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6d, 0x6f, 0x64, 0x75, - 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x66, 0x2e, 0x73, - 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x73, 0x52, 0x07, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x16, - 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x62, 0x0a, 0x0c, 0x77, 0x61, 0x73, 0x6d, 0x5f, 0x6d, - 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x73, - 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x57, 0x61, 0x73, - 0x6d, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x77, - 0x61, 0x73, 0x6d, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x65, - 0x72, 0x67, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, - 0x61, 0x67, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x6e, 0x64, - 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x3e, 0x0a, - 0x10, 0x57, 0x61, 0x73, 0x6d, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, - 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x76, 0x32, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x48, 0x00, 0x52, 0x06, 0x66, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x09, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x06, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x66, 0x2e, 0x73, - 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x06, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, - 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, - 0x22, 0xfb, 0x01, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x29, 0x0a, 0x10, - 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, - 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x61, - 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, - 0x6e, 0x12, 0x4b, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, - 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xa3, - 0x03, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, - 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x73, - 0x12, 0x35, 0x0a, 0x17, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x61, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, - 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x6c, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x38, 0x0a, 0x18, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x22, 0x57, 0x0a, 0x12, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x43, 0x61, 0x6c, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x74, 0x69, 0x6d, 0x65, 0x4d, 0x73, 0x22, 0x7f, 0x0a, - 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x57, 0x0a, 0x14, 0x61, 0x6c, - 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, - 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, - 0x12, 0x61, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0x5b, - 0x0a, 0x06, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, - 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x74, 0x72, 0x75, - 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6c, 0x6f, - 0x67, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0a, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, - 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x65, - 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2a, 0x51, 0x0a, 0x0e, 0x57, 0x41, 0x53, 0x4d, 0x4d, - 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x57, 0x41, 0x53, - 0x4d, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x57, - 0x41, 0x53, 0x4d, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x32, 0x7f, 0x0a, 0x0a, 0x53, 0x75, - 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x71, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, - 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, - 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x4d, 0x5a, 0x4b, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x70, 0x62, - 0x73, 0x73, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0e, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x12, + 0x23, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x07, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x7e, + 0x0a, 0x16, 0x77, 0x61, 0x73, 0x6d, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, + 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x57, + 0x61, 0x73, 0x6d, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x77, 0x61, 0x73, 0x6d, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x2e, + 0x0a, 0x13, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6d, 0x65, 0x72, + 0x67, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, + 0x35, 0x0a, 0x17, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x54, 0x61, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x65, + 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0d, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x1a, + 0x47, 0x0a, 0x19, 0x57, 0x61, 0x73, 0x6d, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x22, 0xf0, 0x01, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, + 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x48, 0x00, 0x52, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x09, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x12, 0x3b, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x06, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, + 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xfb, 0x01, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x28, + 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, + 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x4b, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xa3, 0x03, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, + 0x67, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x73, 0x12, 0x28, + 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, + 0x65, 0x61, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x6c, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x43, 0x61, 0x6c, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x18, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x57, 0x0a, 0x12, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x74, 0x69, + 0x6d, 0x65, 0x4d, 0x73, 0x22, 0x7f, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x12, 0x57, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0x5b, 0x0a, 0x06, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, + 0x6f, 0x67, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0a, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2a, 0x51, + 0x0a, 0x0e, 0x57, 0x41, 0x53, 0x4d, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x20, 0x0a, 0x1c, 0x57, 0x41, 0x53, 0x4d, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x57, 0x41, 0x53, 0x4d, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, + 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, + 0x01, 0x32, 0x7f, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, + 0x71, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x2e, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2f, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x42, 0x4d, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, + 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x66, 0x2f, + 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x70, 0x62, 0x73, 0x73, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -928,12 +921,12 @@ var file_sf_substreams_intern_v2_service_proto_goTypes = []interface{}{ (*Completed)(nil), // 6: sf.substreams.internal.v2.Completed (*Failed)(nil), // 7: sf.substreams.internal.v2.Failed (*BlockRange)(nil), // 8: sf.substreams.internal.v2.BlockRange - nil, // 9: sf.substreams.internal.v2.ProcessRangeRequest.WasmModulesEntry + nil, // 9: sf.substreams.internal.v2.ProcessRangeRequest.WasmExtensionConfigsEntry (*v1.Modules)(nil), // 10: sf.substreams.v1.Modules } var file_sf_substreams_intern_v2_service_proto_depIdxs = []int32{ 10, // 0: sf.substreams.internal.v2.ProcessRangeRequest.modules:type_name -> sf.substreams.v1.Modules - 9, // 1: sf.substreams.internal.v2.ProcessRangeRequest.wasm_modules:type_name -> sf.substreams.internal.v2.ProcessRangeRequest.WasmModulesEntry + 9, // 1: sf.substreams.internal.v2.ProcessRangeRequest.wasm_extension_configs:type_name -> sf.substreams.internal.v2.ProcessRangeRequest.WasmExtensionConfigsEntry 7, // 2: sf.substreams.internal.v2.ProcessRangeResponse.failed:type_name -> sf.substreams.internal.v2.Failed 6, // 3: sf.substreams.internal.v2.ProcessRangeResponse.completed:type_name -> sf.substreams.internal.v2.Completed 3, // 4: sf.substreams.internal.v2.ProcessRangeResponse.update:type_name -> sf.substreams.internal.v2.Update diff --git a/pb/sf/substreams/intern/v2/validate.go b/pb/sf/substreams/intern/v2/validate.go index 14f8dd4dd..0e99cd8e4 100644 --- a/pb/sf/substreams/intern/v2/validate.go +++ b/pb/sf/substreams/intern/v2/validate.go @@ -7,16 +7,23 @@ import ( ) func (r *ProcessRangeRequest) Validate() error { - if r.StartBlockNum >= r.StopBlockNum { - return fmt.Errorf("stop block %d should be higher than start block %d", r.StopBlockNum, r.StartBlockNum) - } - - if r.Modules == nil { + switch { + case r.StopBlockNum != 0: + return fmt.Errorf("invalid protocol: update your tier1") + case r.Modules == nil: return fmt.Errorf("no modules found in request") - } - - if r.OutputModule == "" { + case r.OutputModule == "": return fmt.Errorf("no output module defined in request") + case r.MeteringConfig == "": + return fmt.Errorf("metering config is required in request") + case r.BlockType == "": + return fmt.Errorf("block type is required in request") + case r.StateStore == "": + return fmt.Errorf("state store is required in request") + case r.MergedBlocksStore == "": + return fmt.Errorf("merged blocks store is required in request") + case r.SegmentSize == 0: + return fmt.Errorf("a non-zero state bundle size is required in request") } seenStores := map[string]bool{} diff --git a/pb/sf/substreams/options.pb.go b/pb/sf/substreams/options.pb.go index 920eb9c7a..6a00c93db 100644 --- a/pb/sf/substreams/options.pb.go +++ b/pb/sf/substreams/options.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/options.proto diff --git a/pb/sf/substreams/rpc/v2/service.pb.go b/pb/sf/substreams/rpc/v2/service.pb.go index 189e7d1bf..65ee68819 100644 --- a/pb/sf/substreams/rpc/v2/service.pb.go +++ b/pb/sf/substreams/rpc/v2/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/rpc/v2/service.proto diff --git a/pb/sf/substreams/sink/service/v1/service.pb.go b/pb/sf/substreams/sink/service/v1/service.pb.go index ad7a72799..d5e2d311e 100644 --- a/pb/sf/substreams/sink/service/v1/service.pb.go +++ b/pb/sf/substreams/sink/service/v1/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/sink/service/v1/service.proto diff --git a/pb/sf/substreams/v1/clock.pb.go b/pb/sf/substreams/v1/clock.pb.go index d4edd2ccd..7adc67b4e 100644 --- a/pb/sf/substreams/v1/clock.pb.go +++ b/pb/sf/substreams/v1/clock.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/v1/clock.proto diff --git a/pb/sf/substreams/v1/deltas.pb.go b/pb/sf/substreams/v1/deltas.pb.go index a4ee41356..bfd987498 100644 --- a/pb/sf/substreams/v1/deltas.pb.go +++ b/pb/sf/substreams/v1/deltas.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/v1/deltas.proto diff --git a/pb/sf/substreams/v1/modules.go b/pb/sf/substreams/v1/modules.go index 62486b65d..4ef27357e 100644 --- a/pb/sf/substreams/v1/modules.go +++ b/pb/sf/substreams/v1/modules.go @@ -1,6 +1,9 @@ package pbsubstreams -import "strings" +import ( + "fmt" + "strings" +) type ModuleKind int @@ -10,6 +13,25 @@ const ( ModuleKindBlockIndex ) +func (m *Module) BlockFilterQueryString() (string, error) { + if m.BlockFilter == nil { + return "", nil + } + switch q := m.BlockFilter.Query.(type) { + case *Module_BlockFilter_QueryString: + return q.QueryString, nil + case *Module_BlockFilter_QueryFromParams: + for _, input := range m.Inputs { + if p := input.GetParams(); p != nil { + return p.Value, nil + } + } + return "", fmt.Errorf("getting blockFilterQueryString: no params input") + default: + return "", fmt.Errorf("getting blockFilterQueryString: unsupported query type") + } +} + func (x *Module) ModuleKind() ModuleKind { switch x.Kind.(type) { case *Module_KindMap_: diff --git a/pb/sf/substreams/v1/modules.pb.go b/pb/sf/substreams/v1/modules.pb.go index d4c34e170..8d98bc303 100644 --- a/pb/sf/substreams/v1/modules.pb.go +++ b/pb/sf/substreams/v1/modules.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/v1/modules.proto @@ -84,7 +84,7 @@ func (x Module_KindStore_UpdatePolicy) Number() protoreflect.EnumNumber { // Deprecated: Use Module_KindStore_UpdatePolicy.Descriptor instead. func (Module_KindStore_UpdatePolicy) EnumDescriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 2, 0} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 3, 0} } type Module_Input_Store_Mode int32 @@ -133,7 +133,7 @@ func (x Module_Input_Store_Mode) Number() protoreflect.EnumNumber { // Deprecated: Use Module_Input_Store_Mode.Descriptor instead. func (Module_Input_Store_Mode) EnumDescriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 4, 2, 0} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 5, 2, 0} } type Modules struct { @@ -404,7 +404,11 @@ type Module_BlockFilter struct { unknownFields protoimpl.UnknownFields Module string `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` - Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + // Types that are assignable to Query: + // + // *Module_BlockFilter_QueryString + // *Module_BlockFilter_QueryFromParams + Query isModule_BlockFilter_Query `protobuf_oneof:"query"` } func (x *Module_BlockFilter) Reset() { @@ -446,13 +450,81 @@ func (x *Module_BlockFilter) GetModule() string { return "" } -func (x *Module_BlockFilter) GetQuery() string { - if x != nil { - return x.Query +func (m *Module_BlockFilter) GetQuery() isModule_BlockFilter_Query { + if m != nil { + return m.Query + } + return nil +} + +func (x *Module_BlockFilter) GetQueryString() string { + if x, ok := x.GetQuery().(*Module_BlockFilter_QueryString); ok { + return x.QueryString } return "" } +func (x *Module_BlockFilter) GetQueryFromParams() *Module_QueryFromParams { + if x, ok := x.GetQuery().(*Module_BlockFilter_QueryFromParams); ok { + return x.QueryFromParams + } + return nil +} + +type isModule_BlockFilter_Query interface { + isModule_BlockFilter_Query() +} + +type Module_BlockFilter_QueryString struct { + QueryString string `protobuf:"bytes,2,opt,name=query_string,json=queryString,proto3,oneof"` +} + +type Module_BlockFilter_QueryFromParams struct { + QueryFromParams *Module_QueryFromParams `protobuf:"bytes,3,opt,name=query_from_params,json=queryFromParams,proto3,oneof"` // QueryFromStore query_from_store_keys = 3; +} + +func (*Module_BlockFilter_QueryString) isModule_BlockFilter_Query() {} + +func (*Module_BlockFilter_QueryFromParams) isModule_BlockFilter_Query() {} + +type Module_QueryFromParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Module_QueryFromParams) Reset() { + *x = Module_QueryFromParams{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_substreams_v1_modules_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Module_QueryFromParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Module_QueryFromParams) ProtoMessage() {} + +func (x *Module_QueryFromParams) ProtoReflect() protoreflect.Message { + mi := &file_sf_substreams_v1_modules_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Module_QueryFromParams.ProtoReflect.Descriptor instead. +func (*Module_QueryFromParams) Descriptor() ([]byte, []int) { + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 1} +} + type Module_KindMap struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -464,7 +536,7 @@ type Module_KindMap struct { func (x *Module_KindMap) Reset() { *x = Module_KindMap{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[4] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -477,7 +549,7 @@ func (x *Module_KindMap) String() string { func (*Module_KindMap) ProtoMessage() {} func (x *Module_KindMap) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[4] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -490,7 +562,7 @@ func (x *Module_KindMap) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_KindMap.ProtoReflect.Descriptor instead. func (*Module_KindMap) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 1} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 2} } func (x *Module_KindMap) GetOutputType() string { @@ -520,7 +592,7 @@ type Module_KindStore struct { func (x *Module_KindStore) Reset() { *x = Module_KindStore{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[5] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -533,7 +605,7 @@ func (x *Module_KindStore) String() string { func (*Module_KindStore) ProtoMessage() {} func (x *Module_KindStore) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[5] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -546,7 +618,7 @@ func (x *Module_KindStore) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_KindStore.ProtoReflect.Descriptor instead. func (*Module_KindStore) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 2} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 3} } func (x *Module_KindStore) GetUpdatePolicy() Module_KindStore_UpdatePolicy { @@ -574,7 +646,7 @@ type Module_KindBlockIndex struct { func (x *Module_KindBlockIndex) Reset() { *x = Module_KindBlockIndex{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[6] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -587,7 +659,7 @@ func (x *Module_KindBlockIndex) String() string { func (*Module_KindBlockIndex) ProtoMessage() {} func (x *Module_KindBlockIndex) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[6] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -600,7 +672,7 @@ func (x *Module_KindBlockIndex) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_KindBlockIndex.ProtoReflect.Descriptor instead. func (*Module_KindBlockIndex) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 3} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 4} } func (x *Module_KindBlockIndex) GetOutputType() string { @@ -627,7 +699,7 @@ type Module_Input struct { func (x *Module_Input) Reset() { *x = Module_Input{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[7] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -640,7 +712,7 @@ func (x *Module_Input) String() string { func (*Module_Input) ProtoMessage() {} func (x *Module_Input) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[7] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -653,7 +725,7 @@ func (x *Module_Input) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_Input.ProtoReflect.Descriptor instead. func (*Module_Input) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 4} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 5} } func (m *Module_Input) GetInput() isModule_Input_Input { @@ -730,7 +802,7 @@ type Module_Output struct { func (x *Module_Output) Reset() { *x = Module_Output{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[8] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -743,7 +815,7 @@ func (x *Module_Output) String() string { func (*Module_Output) ProtoMessage() {} func (x *Module_Output) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[8] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -756,7 +828,7 @@ func (x *Module_Output) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_Output.ProtoReflect.Descriptor instead. func (*Module_Output) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 5} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 6} } func (x *Module_Output) GetType() string { @@ -777,7 +849,7 @@ type Module_Input_Source struct { func (x *Module_Input_Source) Reset() { *x = Module_Input_Source{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[9] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -790,7 +862,7 @@ func (x *Module_Input_Source) String() string { func (*Module_Input_Source) ProtoMessage() {} func (x *Module_Input_Source) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[9] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -803,7 +875,7 @@ func (x *Module_Input_Source) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_Input_Source.ProtoReflect.Descriptor instead. func (*Module_Input_Source) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 4, 0} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 5, 0} } func (x *Module_Input_Source) GetType() string { @@ -824,7 +896,7 @@ type Module_Input_Map struct { func (x *Module_Input_Map) Reset() { *x = Module_Input_Map{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[10] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -837,7 +909,7 @@ func (x *Module_Input_Map) String() string { func (*Module_Input_Map) ProtoMessage() {} func (x *Module_Input_Map) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[10] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -850,7 +922,7 @@ func (x *Module_Input_Map) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_Input_Map.ProtoReflect.Descriptor instead. func (*Module_Input_Map) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 4, 1} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 5, 1} } func (x *Module_Input_Map) GetModuleName() string { @@ -872,7 +944,7 @@ type Module_Input_Store struct { func (x *Module_Input_Store) Reset() { *x = Module_Input_Store{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[11] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -885,7 +957,7 @@ func (x *Module_Input_Store) String() string { func (*Module_Input_Store) ProtoMessage() {} func (x *Module_Input_Store) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[11] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -898,7 +970,7 @@ func (x *Module_Input_Store) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_Input_Store.ProtoReflect.Descriptor instead. func (*Module_Input_Store) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 4, 2} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 5, 2} } func (x *Module_Input_Store) GetModuleName() string { @@ -926,7 +998,7 @@ type Module_Input_Params struct { func (x *Module_Input_Params) Reset() { *x = Module_Input_Params{} if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[12] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -939,7 +1011,7 @@ func (x *Module_Input_Params) String() string { func (*Module_Input_Params) ProtoMessage() {} func (x *Module_Input_Params) ProtoReflect() protoreflect.Message { - mi := &file_sf_substreams_v1_modules_proto_msgTypes[12] + mi := &file_sf_substreams_v1_modules_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -952,7 +1024,7 @@ func (x *Module_Input_Params) ProtoReflect() protoreflect.Message { // Deprecated: Use Module_Input_Params.ProtoReflect.Descriptor instead. func (*Module_Input_Params) Descriptor() ([]byte, []int) { - return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 4, 3} + return file_sf_substreams_v1_modules_proto_rawDescGZIP(), []int{2, 5, 3} } func (x *Module_Input_Params) GetValue() string { @@ -979,7 +1051,7 @@ var file_sf_substreams_v1_modules_proto_rawDesc = []byte{ 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, - 0xb1, 0x0c, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0xb5, 0x0d, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x08, 0x6b, 0x69, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, @@ -1013,77 +1085,85 @@ var file_sf_substreams_v1_modules_proto_rawDesc = []byte{ 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x1a, 0x3b, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x1a, 0x2a, 0x0a, 0x07, 0x4b, 0x69, 0x6e, 0x64, 0x4d, 0x61, 0x70, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, - 0xc5, 0x02, 0x0a, 0x09, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x54, 0x0a, - 0x0d, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x4b, - 0x69, 0x6e, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, - 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, - 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, - 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x53, 0x45, 0x54, 0x5f, 0x49, 0x46, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x50, 0x44, 0x41, - 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x03, 0x12, - 0x15, 0x0a, 0x11, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, - 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, - 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x05, 0x12, 0x18, 0x0a, - 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x41, - 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x06, 0x1a, 0x31, 0x0a, 0x0e, 0x4b, 0x69, 0x6e, 0x64, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x80, 0x04, 0x0a, 0x05, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x2e, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x12, 0x3c, 0x0a, - 0x05, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, - 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x53, 0x74, 0x6f, - 0x72, 0x65, 0x48, 0x00, 0x52, 0x05, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x66, - 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, - 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x48, 0x00, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x0a, 0x06, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x26, 0x0a, 0x03, 0x4d, 0x61, - 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x73, 0x66, - 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, - 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, - 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0x26, 0x0a, 0x04, - 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, - 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x54, - 0x41, 0x53, 0x10, 0x02, 0x1a, 0x1e, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x1c, 0x0a, - 0x06, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, - 0x69, 0x6e, 0x64, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, - 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x66, - 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x70, - 0x62, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x1a, 0xab, 0x01, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x12, 0x56, 0x0a, 0x11, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x46, 0x72, + 0x6f, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x1a, 0x11, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x46, 0x72, 0x6f, + 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x2a, 0x0a, 0x07, 0x4b, 0x69, 0x6e, 0x64, 0x4d, + 0x61, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x1a, 0xc5, 0x02, 0x0a, 0x09, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x74, 0x6f, 0x72, + 0x65, 0x12, 0x54, 0x0a, 0x0d, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, + 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, + 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, + 0x12, 0x15, 0x0a, 0x11, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, + 0x59, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x55, 0x50, 0x44, 0x41, 0x54, + 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x53, 0x45, 0x54, 0x5f, 0x49, 0x46, 0x5f, + 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, + 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x41, 0x44, + 0x44, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, + 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4d, 0x41, 0x58, 0x10, + 0x05, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x49, + 0x43, 0x59, 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x06, 0x1a, 0x31, 0x0a, 0x0e, 0x4b, + 0x69, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x0a, + 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x80, + 0x04, 0x0a, 0x05, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, + 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, + 0x00, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x03, 0x6d, 0x61, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, + 0x70, 0x12, 0x3c, 0x0a, 0x05, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x48, 0x00, 0x52, 0x05, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, + 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x1a, 0x1c, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x26, + 0x0a, 0x03, 0x4d, 0x61, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x3d, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x2e, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, + 0x22, 0x26, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x53, 0x45, + 0x54, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x44, 0x45, 0x4c, 0x54, 0x41, 0x53, 0x10, 0x02, 0x1a, 0x1e, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x1a, 0x1c, 0x0a, 0x06, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, + 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, + 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x70, + 0x62, 0x2f, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, + 0x76, 0x31, 0x3b, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1099,7 +1179,7 @@ func file_sf_substreams_v1_modules_proto_rawDescGZIP() []byte { } var file_sf_substreams_v1_modules_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_sf_substreams_v1_modules_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_sf_substreams_v1_modules_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_sf_substreams_v1_modules_proto_goTypes = []interface{}{ (Module_KindStore_UpdatePolicy)(0), // 0: sf.substreams.v1.Module.KindStore.UpdatePolicy (Module_Input_Store_Mode)(0), // 1: sf.substreams.v1.Module.Input.Store.Mode @@ -1107,36 +1187,38 @@ var file_sf_substreams_v1_modules_proto_goTypes = []interface{}{ (*Binary)(nil), // 3: sf.substreams.v1.Binary (*Module)(nil), // 4: sf.substreams.v1.Module (*Module_BlockFilter)(nil), // 5: sf.substreams.v1.Module.BlockFilter - (*Module_KindMap)(nil), // 6: sf.substreams.v1.Module.KindMap - (*Module_KindStore)(nil), // 7: sf.substreams.v1.Module.KindStore - (*Module_KindBlockIndex)(nil), // 8: sf.substreams.v1.Module.KindBlockIndex - (*Module_Input)(nil), // 9: sf.substreams.v1.Module.Input - (*Module_Output)(nil), // 10: sf.substreams.v1.Module.Output - (*Module_Input_Source)(nil), // 11: sf.substreams.v1.Module.Input.Source - (*Module_Input_Map)(nil), // 12: sf.substreams.v1.Module.Input.Map - (*Module_Input_Store)(nil), // 13: sf.substreams.v1.Module.Input.Store - (*Module_Input_Params)(nil), // 14: sf.substreams.v1.Module.Input.Params + (*Module_QueryFromParams)(nil), // 6: sf.substreams.v1.Module.QueryFromParams + (*Module_KindMap)(nil), // 7: sf.substreams.v1.Module.KindMap + (*Module_KindStore)(nil), // 8: sf.substreams.v1.Module.KindStore + (*Module_KindBlockIndex)(nil), // 9: sf.substreams.v1.Module.KindBlockIndex + (*Module_Input)(nil), // 10: sf.substreams.v1.Module.Input + (*Module_Output)(nil), // 11: sf.substreams.v1.Module.Output + (*Module_Input_Source)(nil), // 12: sf.substreams.v1.Module.Input.Source + (*Module_Input_Map)(nil), // 13: sf.substreams.v1.Module.Input.Map + (*Module_Input_Store)(nil), // 14: sf.substreams.v1.Module.Input.Store + (*Module_Input_Params)(nil), // 15: sf.substreams.v1.Module.Input.Params } var file_sf_substreams_v1_modules_proto_depIdxs = []int32{ 4, // 0: sf.substreams.v1.Modules.modules:type_name -> sf.substreams.v1.Module 3, // 1: sf.substreams.v1.Modules.binaries:type_name -> sf.substreams.v1.Binary - 6, // 2: sf.substreams.v1.Module.kind_map:type_name -> sf.substreams.v1.Module.KindMap - 7, // 3: sf.substreams.v1.Module.kind_store:type_name -> sf.substreams.v1.Module.KindStore - 8, // 4: sf.substreams.v1.Module.kind_block_index:type_name -> sf.substreams.v1.Module.KindBlockIndex - 9, // 5: sf.substreams.v1.Module.inputs:type_name -> sf.substreams.v1.Module.Input - 10, // 6: sf.substreams.v1.Module.output:type_name -> sf.substreams.v1.Module.Output + 7, // 2: sf.substreams.v1.Module.kind_map:type_name -> sf.substreams.v1.Module.KindMap + 8, // 3: sf.substreams.v1.Module.kind_store:type_name -> sf.substreams.v1.Module.KindStore + 9, // 4: sf.substreams.v1.Module.kind_block_index:type_name -> sf.substreams.v1.Module.KindBlockIndex + 10, // 5: sf.substreams.v1.Module.inputs:type_name -> sf.substreams.v1.Module.Input + 11, // 6: sf.substreams.v1.Module.output:type_name -> sf.substreams.v1.Module.Output 5, // 7: sf.substreams.v1.Module.block_filter:type_name -> sf.substreams.v1.Module.BlockFilter - 0, // 8: sf.substreams.v1.Module.KindStore.update_policy:type_name -> sf.substreams.v1.Module.KindStore.UpdatePolicy - 11, // 9: sf.substreams.v1.Module.Input.source:type_name -> sf.substreams.v1.Module.Input.Source - 12, // 10: sf.substreams.v1.Module.Input.map:type_name -> sf.substreams.v1.Module.Input.Map - 13, // 11: sf.substreams.v1.Module.Input.store:type_name -> sf.substreams.v1.Module.Input.Store - 14, // 12: sf.substreams.v1.Module.Input.params:type_name -> sf.substreams.v1.Module.Input.Params - 1, // 13: sf.substreams.v1.Module.Input.Store.mode:type_name -> sf.substreams.v1.Module.Input.Store.Mode - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 6, // 8: sf.substreams.v1.Module.BlockFilter.query_from_params:type_name -> sf.substreams.v1.Module.QueryFromParams + 0, // 9: sf.substreams.v1.Module.KindStore.update_policy:type_name -> sf.substreams.v1.Module.KindStore.UpdatePolicy + 12, // 10: sf.substreams.v1.Module.Input.source:type_name -> sf.substreams.v1.Module.Input.Source + 13, // 11: sf.substreams.v1.Module.Input.map:type_name -> sf.substreams.v1.Module.Input.Map + 14, // 12: sf.substreams.v1.Module.Input.store:type_name -> sf.substreams.v1.Module.Input.Store + 15, // 13: sf.substreams.v1.Module.Input.params:type_name -> sf.substreams.v1.Module.Input.Params + 1, // 14: sf.substreams.v1.Module.Input.Store.mode:type_name -> sf.substreams.v1.Module.Input.Store.Mode + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_sf_substreams_v1_modules_proto_init() } @@ -1194,7 +1276,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_KindMap); i { + switch v := v.(*Module_QueryFromParams); i { case 0: return &v.state case 1: @@ -1206,7 +1288,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_KindStore); i { + switch v := v.(*Module_KindMap); i { case 0: return &v.state case 1: @@ -1218,7 +1300,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_KindBlockIndex); i { + switch v := v.(*Module_KindStore); i { case 0: return &v.state case 1: @@ -1230,7 +1312,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_Input); i { + switch v := v.(*Module_KindBlockIndex); i { case 0: return &v.state case 1: @@ -1242,7 +1324,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_Output); i { + switch v := v.(*Module_Input); i { case 0: return &v.state case 1: @@ -1254,7 +1336,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_Input_Source); i { + switch v := v.(*Module_Output); i { case 0: return &v.state case 1: @@ -1266,7 +1348,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_Input_Map); i { + switch v := v.(*Module_Input_Source); i { case 0: return &v.state case 1: @@ -1278,7 +1360,7 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module_Input_Store); i { + switch v := v.(*Module_Input_Map); i { case 0: return &v.state case 1: @@ -1290,6 +1372,18 @@ func file_sf_substreams_v1_modules_proto_init() { } } file_sf_substreams_v1_modules_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Module_Input_Store); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_substreams_v1_modules_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Module_Input_Params); i { case 0: return &v.state @@ -1307,7 +1401,11 @@ func file_sf_substreams_v1_modules_proto_init() { (*Module_KindStore_)(nil), (*Module_KindBlockIndex_)(nil), } - file_sf_substreams_v1_modules_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_sf_substreams_v1_modules_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Module_BlockFilter_QueryString)(nil), + (*Module_BlockFilter_QueryFromParams)(nil), + } + file_sf_substreams_v1_modules_proto_msgTypes[8].OneofWrappers = []interface{}{ (*Module_Input_Source_)(nil), (*Module_Input_Map_)(nil), (*Module_Input_Store_)(nil), @@ -1319,7 +1417,7 @@ func file_sf_substreams_v1_modules_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sf_substreams_v1_modules_proto_rawDesc, NumEnums: 2, - NumMessages: 13, + NumMessages: 14, NumExtensions: 0, NumServices: 0, }, diff --git a/pb/sf/substreams/v1/package.pb.go b/pb/sf/substreams/v1/package.pb.go index a665d9fca..bb6dee388 100644 --- a/pb/sf/substreams/v1/package.pb.go +++ b/pb/sf/substreams/v1/package.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sf/substreams/v1/package.proto diff --git a/pb/system/system.pb b/pb/system/system.pb index faf429fbb..c52142469 100644 Binary files a/pb/system/system.pb and b/pb/system/system.pb differ diff --git a/pipeline/cache/engine.go b/pipeline/cache/engine.go index 8e0ddb8c0..4382ea120 100644 --- a/pipeline/cache/engine.go +++ b/pipeline/cache/engine.go @@ -4,16 +4,18 @@ import ( "context" "fmt" - multierror "github.com/hashicorp/go-multierror" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - - "github.com/streamingfast/substreams/reqctx" + pbindex "github.com/streamingfast/substreams/pb/sf/substreams/index/v1" + "github.com/RoaringBitmap/roaring/roaring64" + multierror "github.com/hashicorp/go-multierror" "github.com/streamingfast/bstream" - "go.uber.org/zap" - + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" + "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" ) // Engine manages the reversible segments and keeps track of @@ -28,17 +30,19 @@ type Engine struct { reversibleBuffers map[uint64]*execout.Buffer // block num to modules' outputs for that given block execOutputWriters map[string]*execout.Writer // moduleName => writer (single file) existingExecOuts map[string]*execout.File + indexWriters map[string]*index.Writer logger *zap.Logger } -func NewEngine(ctx context.Context, execOutWriters map[string]*execout.Writer, blockType string, existingExecOuts map[string]*execout.File) (*Engine, error) { +func NewEngine(ctx context.Context, execOutWriters map[string]*execout.Writer, blockType string, existingExecOuts map[string]*execout.File, indexWriters map[string]*index.Writer) (*Engine, error) { e := &Engine{ ctx: ctx, reversibleBuffers: map[uint64]*execout.Buffer{}, execOutputWriters: execOutWriters, logger: reqctx.Logger(ctx), blockType: blockType, + indexWriters: indexWriters, existingExecOuts: existingExecOuts, } return e, nil @@ -52,9 +56,16 @@ func (e *Engine) NewBuffer(optionalBlock *pbbstream.Block, clock *pbsubstreams.C e.reversibleBuffers[clock.Number] = out for moduleName, existingExecOut := range e.existingExecOuts { - if val, ok := existingExecOut.Get(clock); ok { - out.Set(moduleName, val) + val, ok := existingExecOut.Get(clock) + if !ok { + continue + } + + err = out.Set(moduleName, val) + if err != nil { + return nil, fmt.Errorf("setting existing exec output for %s: %w", moduleName, err) } + } return out, nil @@ -88,10 +99,42 @@ func (e *Engine) HandleStalled(clock *pbsubstreams.Clock) error { func (e *Engine) EndOfStream(lastFinalClock *pbsubstreams.Clock) error { var errs error + for _, writer := range e.execOutputWriters { if err := writer.Close(context.Background()); err != nil { errs = multierror.Append(errs, err) } + + currentFile := writer.CurrentFile + + if e.indexWriters != nil { + if indexWriter, ok := e.indexWriters[currentFile.ModuleName]; ok { + indexes := make(map[string]*roaring64.Bitmap) + for _, item := range currentFile.Kv { + blockIndexOutput := item.Payload + extractedKeys := &pbindex.Keys{} + err := proto.Unmarshal(blockIndexOutput, extractedKeys) + if err != nil { + return fmt.Errorf("unmarshalling index keys from %s outputs: %w", currentFile.ModuleName, err) + } + + for _, key := range extractedKeys.Keys { + if _, ok = indexes[key]; !ok { + indexes[key] = roaring64.New() + } + indexes[key].Add(item.BlockNum) + } + } + + indexWriter.Write(indexes) + + err := indexWriter.Close(context.Background()) + if err != nil { + errs = multierror.Append(errs, err) + } + } + } } + return errs } diff --git a/pipeline/cache/init_test.go b/pipeline/cache/init_test.go deleted file mode 100644 index c3d3fc920..000000000 --- a/pipeline/cache/init_test.go +++ /dev/null @@ -1,5 +0,0 @@ -package cache - -import "github.com/streamingfast/logging" - -var zlog, _ = logging.PackageLogger("test", "github.com/streamingfast/substreams/pipeline/cache") diff --git a/pipeline/exec/baseexec.go b/pipeline/exec/baseexec.go index 2a06a033d..b93c8a261 100644 --- a/pipeline/exec/baseexec.go +++ b/pipeline/exec/baseexec.go @@ -7,6 +7,7 @@ import ( "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" "github.com/streamingfast/substreams/wasm" ttrace "go.opentelemetry.io/otel/trace" ) @@ -17,9 +18,11 @@ type BaseExecutor struct { ctx context.Context moduleName string + initialBlock uint64 wasmModule wasm.Module wasmArguments []wasm.Argument entrypoint string + blockIndex *index.BlockIndex tracer ttrace.Tracer instanceCacheEnabled bool @@ -31,9 +34,11 @@ type BaseExecutor struct { executionStack []string } -func NewBaseExecutor(ctx context.Context, moduleName string, wasmModule wasm.Module, cacheEnabled bool, wasmArguments []wasm.Argument, entrypoint string, tracer ttrace.Tracer) *BaseExecutor { +func NewBaseExecutor(ctx context.Context, moduleName string, initialBlock uint64, wasmModule wasm.Module, cacheEnabled bool, wasmArguments []wasm.Argument, blockIndex *index.BlockIndex, entrypoint string, tracer ttrace.Tracer) *BaseExecutor { return &BaseExecutor{ ctx: ctx, + initialBlock: initialBlock, + blockIndex: blockIndex, moduleName: moduleName, wasmModule: wasmModule, instanceCacheEnabled: cacheEnabled, @@ -43,7 +48,8 @@ func NewBaseExecutor(ctx context.Context, moduleName string, wasmModule wasm.Mod } } -//var Timer time.Duration +// var Timer time.Duration +var ErrNoInput = errors.New("no input") func (e *BaseExecutor) wasmCall(outputGetter execout.ExecutionOutputGetter) (call *wasm.Call, err error) { e.logs = nil @@ -51,7 +57,7 @@ func (e *BaseExecutor) wasmCall(outputGetter execout.ExecutionOutputGetter) (cal e.executionStack = nil hasInput := false - for _, input := range e.wasmArguments { + for i, input := range e.wasmArguments { switch v := input.(type) { case *wasm.StoreWriterOutput: case *wasm.StoreReaderInput: @@ -59,16 +65,27 @@ func (e *BaseExecutor) wasmCall(outputGetter execout.ExecutionOutputGetter) (cal case *wasm.ParamsInput: hasInput = true case wasm.ValueArgument: - hasInput = true + if !v.Active(outputGetter.Clock().Number) { + break // skipping input that is not active at this block + } + data, _, err := outputGetter.Get(v.Name()) if err != nil { - return nil, fmt.Errorf("input data for %q: %w", v.Name(), err) + if errors.Is(err, execout.ErrNotFound) { + break + } + return nil, fmt.Errorf("input data for %q, param %d: %w", v.Name(), i, err) } + hasInput = true v.SetValue(data) default: panic("unknown wasm argument type") } } + if !hasInput { + return nil, ErrNoInput + } + // This allows us to skip the execution of the VM if there are no inputs. // This assumption should either be configurable by the manifest, or clearly documented: // state builders will not be called if their input streams are 0 bytes length (and there is no @@ -112,6 +129,14 @@ func (e *BaseExecutor) wasmCall(outputGetter execout.ExecutionOutputGetter) (cal return } +func (e *BaseExecutor) BlockIndex() *index.BlockIndex { + return e.blockIndex +} + +func (e *BaseExecutor) RunsOnBlock(blockNum uint64) bool { + return blockNum >= e.initialBlock +} + func (e *BaseExecutor) Close(ctx context.Context) error { if e.cachedInstance != nil { return e.cachedInstance.Close(ctx) diff --git a/pipeline/outputmodules/graph.go b/pipeline/exec/graph.go similarity index 69% rename from pipeline/outputmodules/graph.go rename to pipeline/exec/graph.go index d57a630e4..be254b5b0 100644 --- a/pipeline/outputmodules/graph.go +++ b/pipeline/exec/graph.go @@ -1,21 +1,23 @@ -package outputmodules +package exec import ( "fmt" + "math" "github.com/streamingfast/substreams/manifest" pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" ) type Graph struct { - requestModules *pbsubstreams.Modules - usedModules []*pbsubstreams.Module // all modules that need to be processed (requested directly or a required module ancestor) - stagedUsedModules ExecutionStages // all modules that need to be processed (requested directly or a required module ancestor) - moduleHashes *manifest.ModuleHashes - stores []*pbsubstreams.Module // subset of allModules: only the stores - lowestInitBlock uint64 - - outputModule *pbsubstreams.Module + requestModules *pbsubstreams.Modules + usedModules []*pbsubstreams.Module // all modules that need to be processed (requested directly or a required module ancestor) + stagedUsedModules ExecutionStages // all modules that need to be processed (requested directly or a required module ancestor) + moduleHashes *manifest.ModuleHashes + stores []*pbsubstreams.Module // subset of allModules: only the stores + modulesInitBlocks map[string]uint64 + lowestInitBlock uint64 + lowestStoresInitBlock *uint64 + outputModule *pbsubstreams.Module schedulableModules []*pbsubstreams.Module // stores and output mappers needed to execute to produce output for all `output_modules`. schedulableAncestorsMap map[string][]string // modules that are ancestors (therefore dependencies) of a given module @@ -24,6 +26,16 @@ type Graph struct { func (g *Graph) OutputModule() *pbsubstreams.Module { return g.outputModule } func (g *Graph) Stores() []*pbsubstreams.Module { return g.stores } func (g *Graph) UsedModules() []*pbsubstreams.Module { return g.usedModules } +func (g *Graph) UsedIndexModules() []*pbsubstreams.Module { + indexModules := make([]*pbsubstreams.Module, 0) + for _, mod := range g.usedModules { + if mod.GetKindBlockIndex() != nil { + indexModules = append(indexModules, mod) + } + } + return indexModules +} + func (g *Graph) UsedModulesUpToStage(stage int) (out []*pbsubstreams.Module) { for i := 0; i <= int(stage); i++ { for _, layer := range g.StagedUsedModules()[i] { @@ -34,10 +46,25 @@ func (g *Graph) UsedModulesUpToStage(stage int) (out []*pbsubstreams.Module) { } return } + +func (g *Graph) UsedIndexesModulesUpToStage(stage int) (out []*pbsubstreams.Module) { + for i := 0; i <= stage; i++ { + for _, layer := range g.StagedUsedModules()[i] { + for _, mod := range layer { + if mod.GetKindBlockIndex() != nil { + out = append(out, mod) + } + } + } + } + return +} func (g *Graph) StagedUsedModules() ExecutionStages { return g.stagedUsedModules } func (g *Graph) IsOutputModule(name string) bool { return g.outputModule.Name == name } func (g *Graph) ModuleHashes() *manifest.ModuleHashes { return g.moduleHashes } func (g *Graph) LowestInitBlock() uint64 { return g.lowestInitBlock } +func (g *Graph) LowestStoresInitBlock() *uint64 { return g.lowestStoresInitBlock } +func (g *Graph) ModulesInitBlocks() map[string]uint64 { return g.modulesInitBlocks } func NewOutputModuleGraph(outputModule string, productionMode bool, modules *pbsubstreams.Modules) (out *Graph, err error) { out = &Graph{ @@ -62,9 +89,18 @@ func (g *Graph) computeGraph(outputModule string, productionMode bool, modules * return fmt.Errorf("building execution moduleGraph: %w", err) } g.usedModules = processModules - g.stagedUsedModules = computeStages(processModules) - g.lowestInitBlock = computeLowestInitBlock(processModules) + g.modulesInitBlocks = map[string]uint64{} + for _, mod := range g.usedModules { + g.modulesInitBlocks[mod.Name] = mod.InitialBlock + } + + g.stagedUsedModules, err = computeStages(g.usedModules, g.modulesInitBlocks) + if err != nil { + return err + } + g.lowestInitBlock = computeLowestInitBlock(processModules) + g.lowestStoresInitBlock = computeLowestStoresInitBlock(processModules) if err := g.hashModules(graph); err != nil { return fmt.Errorf("cannot hash module: %w", err) } @@ -88,13 +124,44 @@ func (g *Graph) computeGraph(outputModule string, productionMode bool, modules * return nil } +// computeLowestStoresInitBlock finds the lowest initial block of all store modules. +func computeLowestStoresInitBlock(modules []*pbsubstreams.Module) (out *uint64) { + lowest := uint64(math.MaxUint64) + countStores := 0 + for _, mod := range modules { + if mod.GetKindStore() != nil { + countStores += 1 + if mod.InitialBlock < lowest { + lowest = mod.InitialBlock + } + } + } + + // No stores in the modules + if countStores == 0 { + return nil + } + + return &lowest +} + +// computeLowestInitBlock finds the lowest initial block of all modules that are not block indexes. +// if there are only blockIndex types of modules, it returns 0, because blockIndex modules are always at 0. func computeLowestInitBlock(modules []*pbsubstreams.Module) (out uint64) { - lowest := modules[0].InitialBlock + var atLeastOneModuleThatIsNotAnIndex bool + lowest := uint64(math.MaxUint64) for _, mod := range modules { + if mod.GetKindBlockIndex() != nil { + continue + } + atLeastOneModuleThatIsNotAnIndex = true if mod.InitialBlock < lowest { lowest = mod.InitialBlock } } + if !atLeastOneModuleThatIsNotAnIndex { + return 0 + } return lowest } @@ -126,7 +193,7 @@ func (l LayerModules) IsStoreLayer() bool { return l[0].GetKindStore() != nil } -func computeStages(mods []*pbsubstreams.Module) (stages ExecutionStages) { +func computeStages(mods []*pbsubstreams.Module, initBlocks map[string]uint64) (stages ExecutionStages, err error) { seen := map[string]bool{} // Layers pre-define the list of modules that have all of their dependencies @@ -146,7 +213,7 @@ func computeStages(mods []*pbsubstreams.Module) (stages ExecutionStages) { modLoop: for _, mod := range mods { switch mod.Kind.(type) { - case *pbsubstreams.Module_KindMap_: + case *pbsubstreams.Module_KindMap_, *pbsubstreams.Module_KindBlockIndex_: if i%2 == 0 { continue } @@ -161,17 +228,25 @@ func computeStages(mods []*pbsubstreams.Module) (stages ExecutionStages) { continue } + var validInputsAtInitialBlock bool for _, dep := range mod.Inputs { var depModName string switch input := dep.Input.(type) { case *pbsubstreams.Module_Input_Params_: continue case *pbsubstreams.Module_Input_Source_: + validInputsAtInitialBlock = true continue case *pbsubstreams.Module_Input_Map_: depModName = input.Map.ModuleName + if mod.InitialBlock >= initBlocks[depModName] { + validInputsAtInitialBlock = true + } case *pbsubstreams.Module_Input_Store_: depModName = input.Store.ModuleName + if mod.InitialBlock >= initBlocks[depModName] { + validInputsAtInitialBlock = true + } default: panic(fmt.Errorf("unsupported input type %T", dep.Input)) } @@ -184,6 +259,17 @@ func computeStages(mods []*pbsubstreams.Module) (stages ExecutionStages) { } } + if !validInputsAtInitialBlock { + return nil, fmt.Errorf("module %q has no input available at its initial block %d", mod.Name, mod.InitialBlock) + } + + //Check block index dependence + if mod.BlockFilter != nil { + if !seen[mod.BlockFilter.Module] { + continue modLoop + } + } + layer = append(layer, mod) } if len(layer) != 0 { @@ -207,7 +293,7 @@ func computeStages(mods []*pbsubstreams.Module) (stages ExecutionStages) { } } - return stages + return stages, nil } func computeOutputModule(mods []*pbsubstreams.Module, outputModule string) *pbsubstreams.Module { diff --git a/pipeline/outputmodules/graph_test.go b/pipeline/exec/graph_test.go similarity index 82% rename from pipeline/outputmodules/graph_test.go rename to pipeline/exec/graph_test.go index 642c203d9..04ed52b9f 100644 --- a/pipeline/outputmodules/graph_test.go +++ b/pipeline/exec/graph_test.go @@ -1,4 +1,4 @@ -package outputmodules +package exec import ( "fmt" @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/test-go/testify/require" pbsubstreamsrpc "github.com/streamingfast/substreams/pb/sf/substreams/rpc/v2" pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" @@ -42,11 +43,22 @@ func TestGraph_computeStages(t *testing.T) { input: "Ma Mb:Ma Sc:Mb Md:Sc Se:Md,Sg Mf:Ma Sg:Mf Mh:Se,Ma", expect: "[[Ma] [Mb Mf] [Sc Sg]] [[Md] [Se]] [[Mh]]", }, + { + name: "sixth graph (block index impl)", + input: "Ia Mb Md:Sc,Ia Sc:Mb", + expect: "[[Ia Mb] [Sc]] [[Md]]", + }, + { + name: "seventh graph (block index impl)", + input: "Ia Mb Md:Sc,Ia Sc:Mb Me:Sc Mf:Me,Sg,Ia Sg:Md,Me", + expect: "[[Ia Mb] [Sc]] [[Md Me] [Sg]] [[Mf]]", + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - out := computeStages(computeStagesInput(test.input)) + out, err := computeStages(computeStagesInput(test.input), nil) + require.NoError(t, err) assert.Equal(t, test.expect, computeStagesOutput(out)) }) } @@ -67,12 +79,20 @@ func computeStagesInput(in string) (out []*pbsubstreams.Module) { case 'M': newMod.Kind = &pbsubstreams.Module_KindMap_{KindMap: &pbsubstreams.Module_KindMap{}} newMod.Name = modName[1:] + case 'I': + newMod.Kind = &pbsubstreams.Module_KindBlockIndex_{KindBlockIndex: &pbsubstreams.Module_KindBlockIndex{}} + newMod.Name = modName[1:] default: panic("invalid prefix in word: " + modName) } + // we set at least one block source so it doesn't fail the validation + newMod.Inputs = []*pbsubstreams.Module_Input{ + {Input: &pbsubstreams.Module_Input_Source_{Source: &pbsubstreams.Module_Input_Source{Type: "test.block"}}}, + } if len(params) > 1 { for _, input := range strings.Split(params[1], ",") { inputName := input[1:] + switch input[0] { case 'S': newMod.Inputs = append(newMod.Inputs, &pbsubstreams.Module_Input{Input: &pbsubstreams.Module_Input_Store_{Store: &pbsubstreams.Module_Input_Store{ModuleName: inputName}}}) @@ -82,6 +102,11 @@ func computeStagesInput(in string) (out []*pbsubstreams.Module) { newMod.Inputs = append(newMod.Inputs, &pbsubstreams.Module_Input{Input: &pbsubstreams.Module_Input_Params_{}}) case 'R': newMod.Inputs = append(newMod.Inputs, &pbsubstreams.Module_Input{Input: &pbsubstreams.Module_Input_Source_{}}) + case 'I': + newMod.BlockFilter = &pbsubstreams.Module_BlockFilter{ + Module: inputName, + Query: &pbsubstreams.Module_BlockFilter_QueryString{QueryString: "test"}, + } default: panic("invalid input prefix: " + input) } @@ -103,6 +128,9 @@ func computeStagesOutput(in ExecutionStages) string { if l3.GetKindMap() != nil { modKind = "M" } + if l3.GetKindBlockIndex() != nil { + modKind = "I" + } level3 = append(level3, modKind+l3.Name) } level2 = append(level2, fmt.Sprintf("%v", level3)) diff --git a/pipeline/exec/indexexec.go b/pipeline/exec/indexexec.go new file mode 100644 index 000000000..1f0aa171b --- /dev/null +++ b/pipeline/exec/indexexec.go @@ -0,0 +1,72 @@ +package exec + +import ( + "context" + "fmt" + + "github.com/streamingfast/substreams/reqctx" + + pbindex "github.com/streamingfast/substreams/pb/sf/substreams/index/v1" + pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" + "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/wasm" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +type IndexModuleExecutor struct { + BaseExecutor +} + +func NewIndexModuleExecutor(baseExecutor *BaseExecutor) *IndexModuleExecutor { + return &IndexModuleExecutor{BaseExecutor: *baseExecutor} +} + +func (i *IndexModuleExecutor) Name() string { return i.moduleName } +func (i *IndexModuleExecutor) String() string { return i.Name() } + +func (i *IndexModuleExecutor) applyCachedOutput([]byte) error { + return nil +} + +func (i *IndexModuleExecutor) run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { + _, span := reqctx.WithModuleExecutionSpan(ctx, "exec_index") + defer span.EndWithErr(&err) + + var call *wasm.Call + if call, err = i.wasmCall(reader); err != nil { + return nil, nil, nil, fmt.Errorf("maps wasm call: %w", err) + } + + if call != nil { + out = call.Output() + } + + modOut, err := i.toModuleOutput(out) + if err != nil { + return nil, nil, nil, fmt.Errorf("converting back to module output: %w", err) + } + + return out, out, modOut, nil +} + +func (i *IndexModuleExecutor) toModuleOutput(data []byte) (*pbssinternal.ModuleOutput, error) { + var indexKeys pbindex.Keys + err := proto.Unmarshal(data, &indexKeys) + if err != nil { + return nil, fmt.Errorf("unmarshalling index keys: %w", err) + } + + return &pbssinternal.ModuleOutput{ + Data: &pbssinternal.ModuleOutput_MapOutput{ + MapOutput: &anypb.Any{TypeUrl: "type.googleapis.com/sf.substreams.index.v1.Keys", Value: data}, + }, + }, nil +} + +func (i *IndexModuleExecutor) HasValidOutput() bool { + return true +} +func (i *IndexModuleExecutor) HasOutputForFiles() bool { + return true +} diff --git a/pipeline/exec/interface.go b/pipeline/exec/interface.go index e41fef13b..cb0467602 100644 --- a/pipeline/exec/interface.go +++ b/pipeline/exec/interface.go @@ -5,6 +5,7 @@ import ( pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" ) type ModuleExecutor interface { @@ -12,10 +13,14 @@ type ModuleExecutor interface { Name() string String() string Close(ctx context.Context) error - run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) + run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) applyCachedOutput(value []byte) error toModuleOutput(data []byte) (*pbssinternal.ModuleOutput, error) HasValidOutput() bool + HasOutputForFiles() bool + + BlockIndex() *index.BlockIndex + RunsOnBlock(blockNum uint64) bool lastExecutionLogs() (logs []string, truncated bool) lastExecutionStack() []string diff --git a/pipeline/exec/mapexec.go b/pipeline/exec/mapexec.go index 791df350e..9e1bde15a 100644 --- a/pipeline/exec/mapexec.go +++ b/pipeline/exec/mapexec.go @@ -4,13 +4,11 @@ import ( "context" "fmt" - "google.golang.org/protobuf/types/known/anypb" - pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" - "github.com/streamingfast/substreams/storage/execout" - "github.com/streamingfast/substreams/reqctx" + "github.com/streamingfast/substreams/storage/execout" "github.com/streamingfast/substreams/wasm" + "google.golang.org/protobuf/types/known/anypb" ) type MapperModuleExecutor struct { @@ -33,13 +31,13 @@ func (e *MapperModuleExecutor) String() string { return e.Name() } // and in this case, we don't do anything func (e *MapperModuleExecutor) applyCachedOutput([]byte) error { return nil } -func (e *MapperModuleExecutor) run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { - ctx, span := reqctx.WithModuleExecutionSpan(ctx, "exec_map") +func (e *MapperModuleExecutor) run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { + _, span := reqctx.WithModuleExecutionSpan(ctx, "exec_map") defer span.EndWithErr(&err) var call *wasm.Call if call, err = e.wasmCall(reader); err != nil { - return nil, nil, fmt.Errorf("maps wasm call: %w", err) + return nil, nil, nil, fmt.Errorf("maps wasm call: %w", err) } if call != nil { @@ -48,10 +46,10 @@ func (e *MapperModuleExecutor) run(ctx context.Context, reader execout.Execution modOut, err := e.toModuleOutput(out) if err != nil { - return nil, nil, fmt.Errorf("converting back to module output: %w", err) + return nil, nil, nil, fmt.Errorf("converting back to module output: %w", err) } - return out, modOut, nil + return out, out, modOut, nil // same output for files or for the module } func (e *MapperModuleExecutor) toModuleOutput(data []byte) (*pbssinternal.ModuleOutput, error) { @@ -65,3 +63,7 @@ func (e *MapperModuleExecutor) toModuleOutput(data []byte) (*pbssinternal.Module func (e *MapperModuleExecutor) HasValidOutput() bool { return true } + +func (e *MapperModuleExecutor) HasOutputForFiles() bool { + return true +} diff --git a/pipeline/exec/module_executor.go b/pipeline/exec/module_executor.go index 345b5720f..dee27fd69 100644 --- a/pipeline/exec/module_executor.go +++ b/pipeline/exec/module_executor.go @@ -2,9 +2,11 @@ package exec import ( "context" + "errors" "fmt" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel/attribute" @@ -15,7 +17,25 @@ import ( pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" ) -func RunModule(ctx context.Context, executor ModuleExecutor, execOutput execout.ExecutionOutputGetter) (*pbssinternal.ModuleOutput, []byte, error) { +func skipFromIndex(index *index.BlockIndex, execOutput execout.ExecutionOutputGetter) bool { + if index == nil { + return false + } + + if index.Precomputed() { + return index.Skip(uint64(execOutput.Clock().Number)) + } + + indexedKeys, _, err := execOutput.Get(index.IndexModule) + if err != nil { + panic(fmt.Errorf("getting index module output for keys: %w", err)) + } + + return index.SkipFromKeys(indexedKeys) + +} + +func RunModule(ctx context.Context, executor ModuleExecutor, execOutput execout.ExecutionOutputGetter) (*pbssinternal.ModuleOutput, []byte, []byte, bool, error) { logger := reqctx.Logger(ctx) modName := executor.Name() @@ -29,54 +49,62 @@ func RunModule(ctx context.Context, executor ModuleExecutor, execOutput execout. logger.Debug("running module") + if skipFromIndex(executor.BlockIndex(), execOutput) { + emptyOutput, _ := executor.toModuleOutput(nil) + return emptyOutput, nil, nil, true, nil + } + cached, outputBytes, err := getCachedOutput(execOutput, executor) if err != nil { - return nil, nil, fmt.Errorf("check cache output exists: %w", err) + return nil, nil, nil, false, fmt.Errorf("check cache output exists: %w", err) } span.SetAttributes(attribute.Bool("substreams.module.cached", cached)) if cached { if err = executor.applyCachedOutput(outputBytes); err != nil { - return nil, nil, fmt.Errorf("apply cached output: %w", err) + return nil, nil, nil, false, fmt.Errorf("apply cached output: %w", err) } moduleOutput, err := executor.toModuleOutput(outputBytes) if err != nil { - return moduleOutput, outputBytes, fmt.Errorf("converting output to module output: %w", err) + return moduleOutput, outputBytes, nil, false, fmt.Errorf("converting output to module output: %w", err) } if moduleOutput == nil { - return nil, nil, nil // output from PartialKV is always nil, we cannot use it + return nil, nil, nil, false, nil // output from PartialKV is always nil, we cannot use it } // For store modules, the output in cache is in "operations", but we get the proper store deltas in "toModuleOutput", so we need to transform back those deltas into outputBytes if storeDeltas := moduleOutput.GetStoreDeltas(); storeDeltas != nil { outputBytes, err = proto.Marshal(moduleOutput.GetStoreDeltas()) if err != nil { - return nil, nil, err + return nil, nil, nil, false, err } } fillModuleOutputMetadata(executor, moduleOutput) moduleOutput.Cached = true - return moduleOutput, outputBytes, nil + return moduleOutput, outputBytes, nil, false, nil } uid := reqctx.ReqStats(ctx).RecordModuleWasmBlockBegin(modName) - outputBytes, moduleOutput, err := executor.run(ctx, execOutput) + outputBytes, outputForFiles, moduleOutput, err := executor.run(ctx, execOutput) if err != nil { - return nil, nil, fmt.Errorf("execute: %w", err) + if errors.Is(err, ErrNoInput) { + return nil, nil, nil, true, nil + } + return nil, nil, nil, false, fmt.Errorf("execute: %w", err) } reqctx.ReqStats(ctx).RecordModuleWasmBlockEnd(modName, uid) fillModuleOutputMetadata(executor, moduleOutput) - return moduleOutput, outputBytes, nil + return moduleOutput, outputBytes, outputForFiles, false, nil } func getCachedOutput(execOutput execout.ExecutionOutputGetter, executor ModuleExecutor) (bool, []byte, error) { output, cached, err := execOutput.Get(executor.Name()) - if err != nil && err != execout.NotFound { + if err != nil && err != execout.ErrNotFound { return false, nil, fmt.Errorf("get cached output: %w", err) } return cached, output, nil diff --git a/pipeline/exec/module_executor_test.go b/pipeline/exec/module_executor_test.go index 0ee5afd8d..ad29f45f9 100644 --- a/pipeline/exec/module_executor_test.go +++ b/pipeline/exec/module_executor_test.go @@ -13,6 +13,7 @@ import ( pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" ) type MockExecOutput struct { @@ -32,7 +33,7 @@ func (t *MockExecOutput) Len() int { func (t *MockExecOutput) Get(name string) ([]byte, bool, error) { v, ok := t.cacheMap[name] if !ok { - return nil, false, execout.NotFound + return nil, false, execout.ErrNotFound } return v, true, nil } @@ -43,10 +44,9 @@ func (t *MockExecOutput) Set(name string, value []byte) (err error) { } type MockModuleExecutor struct { - name string - outputType string + name string - RunFunc func(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) + RunFunc func(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) ApplyFunc func(value []byte) error LogsFunc func() (logs []string, truncated bool) StackFunc func() []string @@ -56,17 +56,19 @@ type MockModuleExecutor struct { var _ ModuleExecutor = (*MockModuleExecutor)(nil) -func (t *MockModuleExecutor) Name() string { return t.name } -func (t *MockModuleExecutor) String() string { return fmt.Sprintf("TestModuleExecutor(%s)", t.name) } -func (t *MockModuleExecutor) Close(ctx context.Context) error { return nil } -func (t *MockModuleExecutor) HasValidOutput() bool { return t.cacheable } - -func (t *MockModuleExecutor) run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { +func (t *MockModuleExecutor) run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { if t.RunFunc != nil { return t.RunFunc(ctx, reader) } - return nil, nil, fmt.Errorf("not implemented") + return nil, nil, nil, fmt.Errorf("not implemented") } +func (t *MockModuleExecutor) BlockIndex() *index.BlockIndex { return nil } +func (t *MockModuleExecutor) RunsOnBlock(_ uint64) bool { return true } +func (t *MockModuleExecutor) Name() string { return t.name } +func (t *MockModuleExecutor) String() string { return fmt.Sprintf("TestModuleExecutor(%s)", t.name) } +func (t *MockModuleExecutor) Close(ctx context.Context) error { return nil } +func (t *MockModuleExecutor) HasValidOutput() bool { return t.cacheable } +func (t *MockModuleExecutor) HasOutputForFiles() bool { return false } func (t *MockModuleExecutor) applyCachedOutput(value []byte) error { if t.ApplyFunc != nil { @@ -102,8 +104,8 @@ func TestModuleExecutorRunner_Run_HappyPath(t *testing.T) { ctx = reqctx.WithReqStats(ctx, metrics.NewReqStats(&metrics.Config{}, zap.NewNop())) executor := &MockModuleExecutor{ name: "test", - RunFunc: func(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { - return []byte("test"), &pbssinternal.ModuleOutput{ + RunFunc: func(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { + return []byte("test"), nil, &pbssinternal.ModuleOutput{ Data: &pbssinternal.ModuleOutput_MapOutput{ MapOutput: nil, }, @@ -117,7 +119,7 @@ func TestModuleExecutorRunner_Run_HappyPath(t *testing.T) { cacheMap: make(map[string][]byte), } - moduleOutput, _, err := RunModule(ctx, executor, output) + moduleOutput, _, _, _, err := RunModule(ctx, executor, output) if err != nil { t.Fatal(err) } @@ -133,8 +135,8 @@ func TestModuleExecutorRunner_Run_CachedOutput(t *testing.T) { executor := &MockModuleExecutor{ name: "test", - RunFunc: func(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { - return []byte("test"), &pbssinternal.ModuleOutput{ + RunFunc: func(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { + return []byte("test"), nil, &pbssinternal.ModuleOutput{ Data: &pbssinternal.ModuleOutput_MapOutput{ MapOutput: nil, }, @@ -161,7 +163,7 @@ func TestModuleExecutorRunner_Run_CachedOutput(t *testing.T) { }, } - moduleOutput, _, err := RunModule(ctx, executor, output) + moduleOutput, _, _, _, err := RunModule(ctx, executor, output) if err != nil { t.Fatal(err) } diff --git a/pipeline/exec/storeexec.go b/pipeline/exec/storeexec.go index 2034895bc..cfb2e9174 100644 --- a/pipeline/exec/storeexec.go +++ b/pipeline/exec/storeexec.go @@ -32,25 +32,28 @@ func (e *StoreModuleExecutor) applyCachedOutput(value []byte) error { return e.outputStore.ApplyOps(value) } -func (e *StoreModuleExecutor) run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { - ctx, span := reqctx.WithModuleExecutionSpan(ctx, "exec_store") +func (e *StoreModuleExecutor) run(ctx context.Context, reader execout.ExecutionOutputGetter) (out []byte, outForFiles []byte, moduleOutputData *pbssinternal.ModuleOutput, err error) { + _, span := reqctx.WithModuleExecutionSpan(ctx, "exec_store") defer span.EndWithErr(&err) if _, err := e.wasmCall(reader); err != nil { - return nil, nil, fmt.Errorf("store wasm call: %w", err) + return nil, nil, nil, fmt.Errorf("store wasm call: %w", err) } - return e.wrapDeltas() + return e.wrapDeltasAndOps() } func (e *StoreModuleExecutor) HasValidOutput() bool { _, ok := e.outputStore.(*store.FullKV) return ok } +func (e *StoreModuleExecutor) HasOutputForFiles() bool { + return true +} -func (e *StoreModuleExecutor) wrapDeltas() ([]byte, *pbssinternal.ModuleOutput, error) { +func (e *StoreModuleExecutor) wrapDeltasAndOps() ([]byte, []byte, *pbssinternal.ModuleOutput, error) { if err := e.outputStore.Flush(); err != nil { - return nil, nil, err + return nil, nil, nil, err } deltas := &pbsubstreams.StoreDeltas{ @@ -59,15 +62,16 @@ func (e *StoreModuleExecutor) wrapDeltas() ([]byte, *pbssinternal.ModuleOutput, data, err := proto.Marshal(deltas) if err != nil { - return nil, nil, fmt.Errorf("caching: marshalling delta: %w", err) + return nil, nil, nil, fmt.Errorf("caching: marshalling delta: %w", err) } + dataForFiles := e.outputStore.ReadOps() moduleOutput := &pbssinternal.ModuleOutput{ Data: &pbssinternal.ModuleOutput_StoreDeltas{ StoreDeltas: deltas, }, } - return data, moduleOutput, nil + return data, dataForFiles, moduleOutput, nil } // toModuleOutput returns nil,nil on partialKV, because we never use the outputs of a partial store directly diff --git a/pipeline/outputmodules/testing.go b/pipeline/exec/testing.go similarity index 98% rename from pipeline/outputmodules/testing.go rename to pipeline/exec/testing.go index b19fa82cd..beeaa2d59 100644 --- a/pipeline/outputmodules/testing.go +++ b/pipeline/exec/testing.go @@ -1,4 +1,4 @@ -package outputmodules +package exec import ( pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" diff --git a/pipeline/forkhandler.go b/pipeline/forkhandler.go index 0e2909626..df463cbd7 100644 --- a/pipeline/forkhandler.go +++ b/pipeline/forkhandler.go @@ -1,7 +1,6 @@ package pipeline import ( - "github.com/streamingfast/bstream" "sync" pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" @@ -38,7 +37,6 @@ func (f *ForkHandler) registerUndoHandler(handler UndoHandler) { func (f *ForkHandler) handleUndo( clock *pbsubstreams.Clock, - cursor *bstream.Cursor, ) error { f.mu.RLock() defer f.mu.RUnlock() diff --git a/pipeline/init_test.go b/pipeline/init_test.go index 721ad034e..412509814 100644 --- a/pipeline/init_test.go +++ b/pipeline/init_test.go @@ -45,8 +45,9 @@ func assertProtoEqual(t *testing.T, expected proto.Message, actual proto.Message } type ExecOutputTesting struct { - Values map[string][]byte - clock *pbsubstreams.Clock + Values map[string][]byte + ValuesForFiles map[string][]byte + clock *pbsubstreams.Clock } func NewExecOutputTesting(t *testing.T, block *pbbstream.Block, clock *pbsubstreams.Clock) *ExecOutputTesting { @@ -61,6 +62,7 @@ func NewExecOutputTesting(t *testing.T, block *pbbstream.Block, clock *pbsubstre "sf.substreams.v1.test.Block": blkBytes, "sf.substreams.v1.Clock": clockBytes, }, + ValuesForFiles: map[string][]byte{}, } } @@ -74,7 +76,7 @@ func (i *ExecOutputTesting) Len() (out int) { func (i *ExecOutputTesting) Get(moduleName string) (value []byte, cached bool, err error) { val, found := i.Values[moduleName] if !found { - return nil, false, execout.NotFound + return nil, false, execout.ErrNotFound } return val, false, nil } @@ -84,6 +86,11 @@ func (i *ExecOutputTesting) Set(moduleName string, value []byte) (err error) { return nil } +func (i *ExecOutputTesting) SetFileOutput(moduleName string, value []byte) (err error) { + i.ValuesForFiles[moduleName] = value + return nil +} + func (i *ExecOutputTesting) Clock() *pbsubstreams.Clock { return i.clock } diff --git a/pipeline/pipeline.go b/pipeline/pipeline.go index d5f3c63e6..8d43fea7e 100644 --- a/pipeline/pipeline.go +++ b/pipeline/pipeline.go @@ -6,11 +6,9 @@ import ( "strings" "time" + "github.com/RoaringBitmap/roaring/roaring64" "github.com/streamingfast/bstream" "github.com/streamingfast/dmetering" - "go.opentelemetry.io/otel" - "go.uber.org/zap" - "github.com/streamingfast/substreams" "github.com/streamingfast/substreams/orchestrator" "github.com/streamingfast/substreams/orchestrator/plan" @@ -21,11 +19,14 @@ import ( pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" "github.com/streamingfast/substreams/pipeline/cache" "github.com/streamingfast/substreams/pipeline/exec" - "github.com/streamingfast/substreams/pipeline/outputmodules" "github.com/streamingfast/substreams/reqctx" + "github.com/streamingfast/substreams/sqe" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" "github.com/streamingfast/substreams/storage/store" "github.com/streamingfast/substreams/wasm" + "go.opentelemetry.io/otel" + "go.uber.org/zap" ) type processingModule struct { @@ -44,20 +45,20 @@ type Pipeline struct { postJobHooks []substreams.PostJobHook wasmRuntime *wasm.Registry - outputGraph *outputmodules.Graph + execGraph *exec.Graph loadedModules map[uint32]wasm.Module - moduleExecutors [][]exec.ModuleExecutor // Staged module executors - executionStages outputmodules.ExecutionStages + ModuleExecutors [][]exec.ModuleExecutor // Staged module executors + executionStages exec.ExecutionStages mapModuleOutput *pbsubstreamsrpc.MapModuleOutput extraMapModuleOutputs []*pbsubstreamsrpc.MapModuleOutput extraStoreModuleOutputs []*pbsubstreamsrpc.StoreModuleOutput + preexistingBlockIndices map[string]map[string]*roaring64.Bitmap respFunc substreams.ResponseFunc lastProgressSent time.Time startTime time.Time - modulesStats map[string]*pbssinternal.ModuleStats stores *Stores execoutStorage *execout.Configs @@ -81,8 +82,9 @@ type Pipeline struct { func New( ctx context.Context, - outputGraph *outputmodules.Graph, + execGraph *exec.Graph, stores *Stores, + indices map[string]map[string]*roaring64.Bitmap, execoutStorage *execout.Configs, wasmRuntime *wasm.Registry, execOutputCache *cache.Engine, @@ -92,19 +94,20 @@ func New( opts ...Option, ) *Pipeline { pipe := &Pipeline{ - ctx: ctx, - gate: newGate(ctx), - execOutputCache: execOutputCache, - stateBundleSize: stateBundleSize, - workerFactory: workerFactory, - outputGraph: outputGraph, - wasmRuntime: wasmRuntime, - respFunc: respFunc, - stores: stores, - execoutStorage: execoutStorage, - forkHandler: NewForkHandler(), - blockStepMap: make(map[bstream.StepType]uint64), - startTime: time.Now(), + ctx: ctx, + gate: newGate(ctx), + execOutputCache: execOutputCache, + stateBundleSize: stateBundleSize, + workerFactory: workerFactory, + preexistingBlockIndices: indices, + execGraph: execGraph, + wasmRuntime: wasmRuntime, + respFunc: respFunc, + stores: stores, + execoutStorage: execoutStorage, + forkHandler: NewForkHandler(), + blockStepMap: make(map[bstream.StepType]uint64), + startTime: time.Now(), } for _, opt := range opts { opt(pipe) @@ -123,7 +126,7 @@ func (p *Pipeline) Init(ctx context.Context) (err error) { p.setupProcessingModule(reqDetails) - stagedModules := p.outputGraph.StagedUsedModules() + stagedModules := p.execGraph.StagedUsedModules() // truncate stages to highest scheduled stage if highest := p.highestStage; highest != nil { @@ -206,13 +209,20 @@ func (p *Pipeline) setupSubrequestStores(ctx context.Context) (storeMap store.Ma storeConfig := p.stores.configs[mod.Name] if isLastStage { - partialStore := storeConfig.NewPartialKV(reqDetails.ResolvedStartBlockNum, logger) + initialBlock := reqDetails.ResolvedStartBlockNum + if storeConfig.ModuleInitialBlock() > reqDetails.ResolvedStartBlockNum { + if storeConfig.ModuleInitialBlock() > reqDetails.StopBlockNum { + continue + } + initialBlock = storeConfig.ModuleInitialBlock() + } + partialStore := storeConfig.NewPartialKV(initialBlock, logger) storeMap.Set(partialStore) } else { fullStore := storeConfig.NewFullKV(logger) - if fullStore.InitialBlock() != reqDetails.ResolvedStartBlockNum { + if fullStore.InitialBlock() < reqDetails.ResolvedStartBlockNum { file := store.NewCompleteFileInfo(fullStore.Name(), fullStore.InitialBlock(), reqDetails.ResolvedStartBlockNum) // FIXME: run debugging session with conditional breakpoint // `request.Stage == 1 && request.StartBlockNum == 20` @@ -258,7 +268,7 @@ func (p *Pipeline) runParallelProcess(ctx context.Context, reqPlan *plan.Request reqPlan, p.workerFactory, int(reqDetails.MaxParallelJobs), - p.outputGraph, + p.execGraph, p.execoutStorage, p.respFunc, p.stores.configs, @@ -301,7 +311,7 @@ func (p *Pipeline) runParallelProcess(ctx context.Context, reqPlan *plan.Request } func (p *Pipeline) isOutputModule(name string) bool { - return p.outputGraph.IsOutputModule(name) + return p.execGraph.IsOutputModule(name) } func (p *Pipeline) runPostJobHooks(ctx context.Context, clock *pbsubstreams.Clock) { @@ -374,6 +384,7 @@ func toRPCMapModuleOutputs(in *pbssinternal.ModuleOutput) (out *pbsubstreamsrpc. if data == nil { return nil } + return &pbsubstreamsrpc.MapModuleOutput{ Name: in.ModuleName, MapOutput: data, @@ -385,7 +396,7 @@ func toRPCMapModuleOutputs(in *pbssinternal.ModuleOutput) (out *pbsubstreamsrpc. } } -func (p *Pipeline) returnRPCModuleProgressOutputs(clock *pbsubstreams.Clock, forceOutput bool) error { +func (p *Pipeline) returnRPCModuleProgressOutputs(forceOutput bool) error { if time.Since(p.lastProgressSent) < progressMessageInterval && !forceOutput { return nil } @@ -406,13 +417,17 @@ func (p *Pipeline) returnRPCModuleProgressOutputs(clock *pbsubstreams.Clock, for func (p *Pipeline) toInternalUpdate(clock *pbsubstreams.Clock) *pbssinternal.Update { meter := dmetering.GetBytesMeter(p.ctx) - return &pbssinternal.Update{ - ProcessedBlocks: clock.Number - p.processingModule.initialBlockNum, + out := &pbssinternal.Update{ DurationMs: uint64(time.Since(p.startTime).Milliseconds()), TotalBytesRead: meter.BytesRead(), TotalBytesWritten: meter.BytesWritten(), ModulesStats: reqctx.ReqStats(p.ctx).LocalModulesStats(), } + + if clock != nil { + out.ProcessedBlocks = clock.Number - p.processingModule.initialBlockNum + } + return out } func (p *Pipeline) returnInternalModuleProgressOutputs(clock *pbsubstreams.Clock, forceOutput bool) error { @@ -435,12 +450,12 @@ func (p *Pipeline) returnInternalModuleProgressOutputs(clock *pbsubstreams.Clock return nil } -// buildModuleExecutors builds the moduleExecutors, and the loadedModules. -func (p *Pipeline) buildModuleExecutors(ctx context.Context) ([][]exec.ModuleExecutor, error) { - if p.moduleExecutors != nil { +// BuildModuleExecutors builds the ModuleExecutors, and the loadedModules. +func (p *Pipeline) BuildModuleExecutors(ctx context.Context) error { + if p.ModuleExecutors != nil { // Eventually, we can invalidate our catch to accomodate the PATCH // and rebuild all the modules, and tear down the previously loaded ones. - return p.moduleExecutors, nil + return nil } reqModules := reqctx.Details(ctx).Modules @@ -456,7 +471,7 @@ func (p *Pipeline) buildModuleExecutors(ctx context.Context) ([][]exec.ModuleExe code := reqModules.Binaries[module.BinaryIndex] m, err := p.wasmRuntime.NewModule(ctx, code.Content, code.Type) if err != nil { - return nil, fmt.Errorf("new wasm module: %w", err) + return fmt.Errorf("new wasm module: %w", err) } loadedModules[module.BinaryIndex] = m } @@ -472,7 +487,24 @@ func (p *Pipeline) buildModuleExecutors(ctx context.Context) ([][]exec.ModuleExe for _, module := range layer { inputs, err := p.renderWasmInputs(module) if err != nil { - return nil, fmt.Errorf("module %q: get wasm inputs: %w", module.Name, err) + return fmt.Errorf("module %q: get wasm inputs: %w", module.Name, err) + } + var moduleBlockIndex *index.BlockIndex + if module.BlockFilter != nil { + qs, err := module.BlockFilterQueryString() + if err != nil { + return err + } + expr, err := sqe.Parse(ctx, qs) + if err != nil { + return fmt.Errorf("parse block filter: %q: %w", module.BlockFilter.Query, err) + } + var precomputedBitmap *roaring64.Bitmap + + if indices := p.preexistingBlockIndices[module.BlockFilter.Module]; indices != nil { + precomputedBitmap = sqe.RoaringBitmapsApply(expr, indices) + } + moduleBlockIndex = index.NewBlockIndex(expr, module.BlockFilter.Module, precomputedBitmap) } entrypoint := module.BinaryEntrypoint @@ -484,9 +516,11 @@ func (p *Pipeline) buildModuleExecutors(ctx context.Context) ([][]exec.ModuleExe baseExecutor := exec.NewBaseExecutor( ctx, module.Name, + module.InitialBlock, mod, p.wasmRuntime.InstanceCacheEnabled(), inputs, + moduleBlockIndex, entrypoint, tracer, ) @@ -499,22 +533,43 @@ func (p *Pipeline) buildModuleExecutors(ctx context.Context) ([][]exec.ModuleExe outputStore, found := p.stores.StoreMap.Get(module.Name) if !found { - return nil, fmt.Errorf("store %q not found", module.Name) + return fmt.Errorf("store %q not found", module.Name) } inputs = append(inputs, wasm.NewStoreWriterOutput(module.Name, outputStore, updatePolicy, valueType)) baseExecutor := exec.NewBaseExecutor( ctx, module.Name, + module.InitialBlock, mod, p.wasmRuntime.InstanceCacheEnabled(), inputs, + moduleBlockIndex, entrypoint, tracer, ) executor := exec.NewStoreModuleExecutor(baseExecutor, outputStore) moduleExecutors = append(moduleExecutors, executor) + case *pbsubstreams.Module_KindBlockIndex_: + if indices := p.preexistingBlockIndices[module.Name]; indices != nil { + break // don't execute index modules that are useless + } + baseExecutor := exec.NewBaseExecutor( + ctx, + module.Name, + module.InitialBlock, + mod, + p.wasmRuntime.InstanceCacheEnabled(), + inputs, + moduleBlockIndex, + entrypoint, + tracer, + ) + + executor := exec.NewIndexModuleExecutor(baseExecutor) + moduleExecutors = append(moduleExecutors, executor) + default: panic(fmt.Errorf("invalid kind %q input module %q", module.Kind, module.Name)) } @@ -523,12 +578,12 @@ func (p *Pipeline) buildModuleExecutors(ctx context.Context) ([][]exec.ModuleExe } } - p.moduleExecutors = stagedModuleExecutors - return stagedModuleExecutors, nil + p.ModuleExecutors = stagedModuleExecutors + return nil } func (p *Pipeline) cleanUpModuleExecutors(ctx context.Context) error { - for _, stage := range p.moduleExecutors { + for _, stage := range p.ModuleExecutors { for _, executor := range stage { if err := executor.Close(ctx); err != nil { return fmt.Errorf("closing module executor %q: %w", executor.Name(), err) @@ -551,6 +606,10 @@ func returnModuleDataOutputs( extraStoreModuleOutputs []*pbsubstreamsrpc.StoreModuleOutput, respFunc substreams.ResponseFunc, ) error { + if mapModuleOutput == nil { + return nil + } + out := &pbsubstreamsrpc.BlockScopedData{ Clock: clock, Output: mapModuleOutput, @@ -574,22 +633,22 @@ func (p *Pipeline) renderWasmInputs(module *pbsubstreams.Module) (out []wasm.Arg case *pbsubstreams.Module_Input_Params_: out = append(out, wasm.NewParamsInput(input.GetParams().GetValue())) case *pbsubstreams.Module_Input_Map_: - out = append(out, wasm.NewMapInput(in.Map.ModuleName)) + out = append(out, wasm.NewMapInput(in.Map.ModuleName, p.execGraph.ModulesInitBlocks()[in.Map.ModuleName])) case *pbsubstreams.Module_Input_Store_: inputName := input.GetStore().ModuleName if input.GetStore().Mode == pbsubstreams.Module_Input_Store_DELTAS { - out = append(out, wasm.NewMapInput(inputName)) + out = append(out, wasm.NewMapInput(inputName, p.execGraph.ModulesInitBlocks()[inputName])) } else { inputStore, found := storeAccessor.Get(inputName) if !found { return nil, fmt.Errorf("store %q npt found", inputName) } - out = append(out, wasm.NewStoreReaderInput(inputName, inputStore)) + out = append(out, wasm.NewStoreReaderInput(inputName, inputStore, p.execGraph.ModulesInitBlocks()[inputName])) } case *pbsubstreams.Module_Input_Source_: // in.Source.Type checking against `blockType` is already done // upfront in `validateGraph`. - out = append(out, wasm.NewSourceInput(in.Source.Type)) + out = append(out, wasm.NewSourceInput(in.Source.Type, 0)) default: return nil, fmt.Errorf("invalid input struct for module %q", module.Name) } diff --git a/pipeline/pipeline_test.go b/pipeline/pipeline_test.go index caeb33142..212182eee 100644 --- a/pipeline/pipeline_test.go +++ b/pipeline/pipeline_test.go @@ -21,7 +21,6 @@ import ( pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" pbsubstreamstest "github.com/streamingfast/substreams/pb/sf/substreams/v1/test" "github.com/streamingfast/substreams/pipeline/exec" - "github.com/streamingfast/substreams/pipeline/outputmodules" "github.com/streamingfast/substreams/reqctx" store2 "github.com/streamingfast/substreams/storage/store" "github.com/streamingfast/substreams/wasm" @@ -59,7 +58,7 @@ func TestPipeline_runExecutor(t *testing.T) { ctx = reqctx.WithReqStats(ctx, metrics.NewReqStats(&metrics.Config{}, zap.NewNop())) pipe := &Pipeline{ forkHandler: NewForkHandler(), - outputGraph: outputmodules.TestNew(), + execGraph: exec.TestNew(), } clock := &pbsubstreams.Clock{Id: test.block.Id, Number: test.block.Number} execOutput := NewExecOutputTesting(t, bstreamBlk(t, test.block), clock) @@ -75,7 +74,7 @@ func TestPipeline_runExecutor(t *testing.T) { } func mapTestExecutor(t *testing.T, ctx context.Context, name string) *exec.MapperModuleExecutor { - pkg := manifest.TestReadManifest(t, "../test/testdata/substreams-test-v0.1.0.spkg") + pkg := manifest.TestReadManifest(t, "../test/testdata/simple_substreams/substreams-test-v0.1.0.spkg") binaryIndex := uint32(0) for _, module := range pkg.Modules.Modules { @@ -94,12 +93,14 @@ func mapTestExecutor(t *testing.T, ctx context.Context, name string) *exec.Mappe exec.NewBaseExecutor( ctx, name, + 0, module, false, // could exercice with cache enabled too []wasm.Argument{ wasm.NewParamsInput("my test params"), - wasm.NewSourceInput("sf.substreams.v1.test.Block"), + wasm.NewSourceInput("sf.substreams.v1.test.Block", 0), }, + nil, name, otel.GetTracerProvider().Tracer("test"), ), @@ -137,19 +138,19 @@ func TestSetupSubrequestStores(t *testing.T) { storeModuleKind := &pbsubstreams.Module_KindStore_{KindStore: &pbsubstreams.Module_KindStore{}} p := Pipeline{ stores: &Stores{configs: confMap}, - executionStages: outputmodules.ExecutionStages{ - outputmodules.StageLayers{ - outputmodules.LayerModules{ + executionStages: exec.ExecutionStages{ + exec.StageLayers{ + exec.LayerModules{ &pbsubstreams.Module{Name: "mod1", Kind: storeModuleKind}, }, }, - outputmodules.StageLayers{ - outputmodules.LayerModules{ + exec.StageLayers{ + exec.LayerModules{ &pbsubstreams.Module{Name: "mod2", Kind: storeModuleKind}, }, }, - outputmodules.StageLayers{ - outputmodules.LayerModules{ + exec.StageLayers{ + exec.LayerModules{ &pbsubstreams.Module{Name: "mod3", Kind: storeModuleKind}, }, }, @@ -219,6 +220,7 @@ func withTestRequest(t *testing.T, outputModule string, startBlock uint64) conte func() (uint64, error) { return 0, nil }, newTestCursorResolver().resolveCursor, func() (uint64, error) { return 0, nil }, + 100, ) require.NoError(t, err) return reqctx.WithRequest(context.Background(), req) diff --git a/pipeline/process_block.go b/pipeline/process_block.go index 007a4b60e..ddcfdeeb0 100644 --- a/pipeline/process_block.go +++ b/pipeline/process_block.go @@ -8,13 +8,9 @@ import ( "runtime/debug" "sync" + "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dmetering" - - "github.com/streamingfast/bstream" - "go.uber.org/zap" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/streamingfast/substreams/metrics" pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" pbsubstreamsrpc "github.com/streamingfast/substreams/pb/sf/substreams/rpc/v2" @@ -22,7 +18,8 @@ import ( "github.com/streamingfast/substreams/pipeline/exec" "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/storage/execout" - "github.com/streamingfast/substreams/storage/store" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" ) func (p *Pipeline) ProcessFromExecOutput( @@ -117,7 +114,7 @@ func (p *Pipeline) processBlock( switch step { case bstream.StepUndo: p.blockStepMap[bstream.StepUndo]++ - if err = p.handleStepUndo(ctx, clock, cursor, reorgJunctionBlock); err != nil { + if err = p.handleStepUndo(clock, cursor, reorgJunctionBlock); err != nil { return fmt.Errorf("step undo: %w", err) } case bstream.StepStalled: @@ -180,9 +177,9 @@ func (p *Pipeline) handleStepStalled(clock *pbsubstreams.Clock) error { return nil } -func (p *Pipeline) handleStepUndo(ctx context.Context, clock *pbsubstreams.Clock, cursor *bstream.Cursor, reorgJunctionBlock bstream.BlockRef) error { +func (p *Pipeline) handleStepUndo(clock *pbsubstreams.Clock, cursor *bstream.Cursor, reorgJunctionBlock bstream.BlockRef) error { - if err := p.forkHandler.handleUndo(clock, cursor); err != nil { + if err := p.forkHandler.handleUndo(clock); err != nil { return fmt.Errorf("reverting outputs: %w", err) } @@ -232,7 +229,7 @@ func (p *Pipeline) handleStepNew(ctx context.Context, clock *pbsubstreams.Clock, if reqDetails.IsTier2Request { sendError = p.returnInternalModuleProgressOutputs(clock, forceSend) } else { - sendError = p.returnRPCModuleProgressOutputs(clock, forceSend) + sendError = p.returnRPCModuleProgressOutputs(forceSend) } if err == nil { err = sendError @@ -244,19 +241,6 @@ func (p *Pipeline) handleStepNew(ctx context.Context, clock *pbsubstreams.Clock, return io.EOF } - // FIXME: when handling the real-time segment, it's dangerous - // to save the stores, as they might have components that get - // reverted, and we won't go change the stores then. - // So we _shouldn't_ save the stores unless we're in irreversible-only - // mode. Basically, tier1 shouldn't save unless it's a StepNewIrreversible - // (we're in a historical segment) - // When we're in the real-time segment, we shouldn't save anything. - if reqDetails.IsTier2Request { - if err := p.stores.flushStores(ctx, p.executionStages, clock.Number); err != nil { - return fmt.Errorf("step new irr: stores end of stream: %w", err) - } - } - // note: if we start on a forked cursor, the undo signal will appear BEFORE we send the snapshot if p.gate.shouldSendSnapshot() && !reqDetails.IsTier2Request { if err := p.sendSnapshots(p.stores.StoreMap, reqDetails.DebugInitialStoreSnapshotForModules); err != nil { @@ -300,19 +284,23 @@ func (p *Pipeline) executeModules(ctx context.Context, execOutput execout.Execut p.mapModuleOutput = nil p.extraMapModuleOutputs = nil p.extraStoreModuleOutputs = nil - moduleExecutors, err := p.buildModuleExecutors(ctx) - if err != nil { + blockNum := execOutput.Clock().Number + + // they may be already built, but we call this function every time to enable future dynamic changes + if err := p.BuildModuleExecutors(ctx); err != nil { return fmt.Errorf("building wasm module tree: %w", err) } - for _, stage := range moduleExecutors { + for _, stage := range p.ModuleExecutors { //t0 := time.Now() - if len(stage) < 2 { //fmt.Println("Linear stage", len(stage)) for _, executor := range stage { + if !executor.RunsOnBlock(blockNum) { + continue + } res := p.execute(ctx, executor, execOutput) if err := p.applyExecutionResult(ctx, executor, res, execOutput); err != nil { - return fmt.Errorf("applying executor results %q: %w", executor.Name(), res.err) + return fmt.Errorf("applying executor results %q on block %d: %w", executor.Name(), blockNum, res.err) } } } else { @@ -320,6 +308,10 @@ func (p *Pipeline) executeModules(ctx context.Context, execOutput execout.Execut wg := sync.WaitGroup{} //fmt.Println("Parallelized in stage", stageIdx, len(stage)) for i, executor := range stage { + if !executor.RunsOnBlock(execOutput.Clock().Number) { + results[i] = resultObj{skipped: true} + continue + } wg.Add(1) i := i executor := executor @@ -332,13 +324,16 @@ func (p *Pipeline) executeModules(ctx context.Context, execOutput execout.Execut wg.Wait() for i, result := range results { + if result.skipped { + continue + } executor := stage[i] if result.err != nil { //p.returnFailureProgress(ctx, err, executor) return fmt.Errorf("running executor %q: %w", executor.Name(), result.err) } if err := p.applyExecutionResult(ctx, executor, result, execOutput); err != nil { - return fmt.Errorf("applying executor results %q: %w", executor.Name(), result.err) + return fmt.Errorf("applying executor results %q on block %d: %w", executor.Name(), blockNum, result.err) } } } @@ -349,9 +344,12 @@ func (p *Pipeline) executeModules(ctx context.Context, execOutput execout.Execut } type resultObj struct { - output *pbssinternal.ModuleOutput - bytes []byte - err error + output *pbssinternal.ModuleOutput + bytes []byte + bytesForFiles []byte + err error + skipped bool + skippedFromIndex bool } func (p *Pipeline) execute(ctx context.Context, executor exec.ModuleExecutor, execOutput execout.ExecutionOutput) resultObj { @@ -360,43 +358,42 @@ func (p *Pipeline) execute(ctx context.Context, executor exec.ModuleExecutor, ex executorName := executor.Name() logger.Debug("executing", zap.Uint64("block", execOutput.Clock().Number), zap.String("module_name", executorName)) - moduleOutput, outputBytes, runError := exec.RunModule(ctx, executor, execOutput) - return resultObj{moduleOutput, outputBytes, runError} + moduleOutput, outputBytes, outputBytesFiles, skippedFromIndex, runError := exec.RunModule(ctx, executor, execOutput) + + return resultObj{moduleOutput, outputBytes, outputBytesFiles, runError, false, skippedFromIndex} } func (p *Pipeline) applyExecutionResult(ctx context.Context, executor exec.ModuleExecutor, res resultObj, execOutput execout.ExecutionOutput) (err error) { executorName := executor.Name() - hasValidOutput := executor.HasValidOutput() moduleOutput, outputBytes, runError := res.output, res.bytes, res.err if runError != nil { - if hasValidOutput { - p.saveModuleOutput(moduleOutput, executor.Name(), reqctx.Details(ctx).ProductionMode) - } return fmt.Errorf("execute module: %w", runError) } - if hasValidOutput { + if executor.HasValidOutput() { p.saveModuleOutput(moduleOutput, executor.Name(), reqctx.Details(ctx).ProductionMode) + } + + if !res.skippedFromIndex && executor.HasValidOutput() { if err := execOutput.Set(executorName, outputBytes); err != nil { return fmt.Errorf("set output cache: %w", err) } if moduleOutput != nil { p.forkHandler.addReversibleOutput(moduleOutput, execOutput.Clock().Id) } - } else { // we are in a partial store - if stor, ok := p.GetStoreMap().Get(executorName); ok { - if pkvs, ok := stor.(*store.PartialKV); ok { - if err := execOutput.Set(executorName, pkvs.ReadOps()); err != nil { - return fmt.Errorf("set output cache: %w", err) - } - } + } + if !res.skippedFromIndex && executor.HasOutputForFiles() { + if err := execOutput.SetFileOutput(executorName, res.bytesForFiles); err != nil { + return fmt.Errorf("set output cache: %w", err) } } + return nil } +// this will be sent to the requestor func (p *Pipeline) saveModuleOutput(output *pbssinternal.ModuleOutput, moduleName string, isProduction bool) { if p.isOutputModule(moduleName) { p.mapModuleOutput = toRPCMapModuleOutputs(output) diff --git a/pipeline/resolve.go b/pipeline/resolve.go index 8108a55dd..43baea1a1 100644 --- a/pipeline/resolve.go +++ b/pipeline/resolve.go @@ -7,17 +7,15 @@ import ( "sync/atomic" "connectrpc.com/connect" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - "github.com/streamingfast/substreams/manifest" - "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/hub" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" - "go.uber.org/zap" - + "github.com/streamingfast/substreams/manifest" pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" pbsubstreamsrpc "github.com/streamingfast/substreams/pb/sf/substreams/rpc/v2" "github.com/streamingfast/substreams/reqctx" + "go.uber.org/zap" ) type getBlockFunc func() (uint64, error) @@ -27,7 +25,8 @@ func BuildRequestDetails( request *pbsubstreamsrpc.Request, getRecentFinalBlock getBlockFunc, resolveCursor CursorResolver, - getHeadBlock getBlockFunc) (req *reqctx.RequestDetails, undoSignal *pbsubstreamsrpc.BlockUndoSignal, err error) { + getHeadBlock getBlockFunc, + segmentSize uint64) (req *reqctx.RequestDetails, undoSignal *pbsubstreamsrpc.BlockUndoSignal, err error) { req = &reqctx.RequestDetails{ Modules: request.Modules, OutputModule: request.OutputModule, @@ -56,7 +55,7 @@ func BuildRequestDetails( } } - linearHandoff, err := computeLinearHandoffBlockNum(request.ProductionMode, req.ResolvedStartBlockNum, request.StopBlockNum, getRecentFinalBlock, moduleHasStatefulDependencies) + linearHandoff, err := computeLinearHandoffBlockNum(request.ProductionMode, req.ResolvedStartBlockNum, request.StopBlockNum, getRecentFinalBlock, moduleHasStatefulDependencies, segmentSize) if err != nil { return nil, nil, err } @@ -78,9 +77,9 @@ func BuildRequestDetailsFromSubrequest(request *pbssinternal.ProcessRangeRequest ProductionMode: true, IsTier2Request: true, Tier2Stage: int(request.Stage), - StopBlockNum: request.StopBlockNum, - LinearHandoffBlockNum: request.StopBlockNum, - ResolvedStartBlockNum: request.StartBlockNum, + StopBlockNum: request.StopBlock(), + LinearHandoffBlockNum: request.StopBlock(), + ResolvedStartBlockNum: request.StartBlock(), UniqueID: nextUniqueID(), } return req @@ -92,19 +91,28 @@ func nextUniqueID() uint64 { return uniqueRequestIDCounter.Add(1) } -func computeLinearHandoffBlockNum(productionMode bool, startBlock, stopBlock uint64, getRecentFinalBlockFunc func() (uint64, error), stateRequired bool) (uint64, error) { +func computeLinearHandoffBlockNum(productionMode bool, startBlock, stopBlock uint64, getRecentFinalBlockFunc func() (uint64, error), stateRequired bool, segmentSize uint64) (uint64, error) { + //get value of of next boundary after stopBlock if productionMode { - maxHandoff, err := getRecentFinalBlockFunc() + nextBoundary := stopBlock + if remainder := (stopBlock % segmentSize); remainder != 0 { + nextBoundary = nextBoundary - remainder + segmentSize + } + + libHandoff, err := getRecentFinalBlockFunc() if err != nil { if stopBlock == 0 { return 0, fmt.Errorf("cannot determine a recent finalized block: %w", err) } - return stopBlock, nil + return nextBoundary, nil } - if stopBlock == 0 { - return maxHandoff, nil + libHandoffBoundary := libHandoff - (libHandoff % segmentSize) + + if stopBlock == 0 || libHandoff < stopBlock { + return libHandoffBoundary, nil } - return min(stopBlock, maxHandoff), nil + + return nextBoundary, nil } //if no state required, we don't need to ever back-process blocks. we can start flowing blocks right away from the start block @@ -112,12 +120,15 @@ func computeLinearHandoffBlockNum(productionMode bool, startBlock, stopBlock uin return startBlock, nil } - maxHandoff, err := getRecentFinalBlockFunc() + prevBoundary := startBlock - (startBlock % segmentSize) + + libHandoff, err := getRecentFinalBlockFunc() if err != nil { - return startBlock, nil + return prevBoundary, nil } + libHandoffBoundary := libHandoff - (libHandoff % segmentSize) - return min(startBlock, maxHandoff), nil + return min(prevBoundary, libHandoffBoundary), nil } // resolveStartBlockNum will occasionally modify or remove the cursor inside the request @@ -197,7 +208,7 @@ type junctionBlockGetter struct { currentHead bstream.BlockRef } -var Done = errors.New("done") +var ErrDone = errors.New("done") func (j *junctionBlockGetter) ProcessBlock(block *pbbstream.Block, obj interface{}) error { j.currentHead = obj.(bstream.Cursorable).Cursor().HeadBlock @@ -205,10 +216,10 @@ func (j *junctionBlockGetter) ProcessBlock(block *pbbstream.Block, obj interface stepable := obj.(bstream.Stepable) switch { case stepable.Step().Matches(bstream.StepNew): - return Done + return ErrDone case stepable.Step().Matches(bstream.StepUndo): j.reorgJunctionBlock = stepable.ReorgJunctionBlock() - return Done + return ErrDone } // ignoring other steps return nil @@ -230,7 +241,7 @@ func NewCursorResolver(hub *hub.ForkableHub, mergedBlocksStore, forkedBlocksStor case <-src.Terminated(): } - if !errors.Is(src.Err(), Done) { + if !errors.Is(src.Err(), ErrDone) { headBlock := cursor.HeadBlock if headNum, headID, _, _, err := hub.HeadInfo(); err == nil { headBlock = bstream.NewBlockRef(headID, headNum) diff --git a/pipeline/resolve_test.go b/pipeline/resolve_test.go index 199a4e47a..28b6f28e7 100644 --- a/pipeline/resolve_test.go +++ b/pipeline/resolve_test.go @@ -172,8 +172,9 @@ func Test_resolveStartBlockNum(t *testing.T) { } } -func Test_computeLiveHandoffBlockNum(t *testing.T) { +func Test_computeLinaerHandoffBlockNum(t *testing.T) { tests := []struct { + name string liveHubAvailable bool recentBlockNum uint64 prodMode bool @@ -183,39 +184,23 @@ func Test_computeLiveHandoffBlockNum(t *testing.T) { expectError bool stateRequired bool }{ - // prod (start-block ignored) - {true, 100, true, 10, 0, 100, false, true}, - {true, 100, true, 10, 150, 100, false, true}, - {true, 100, true, 10, 50, 50, false, true}, - {false, 0, true, 10, 50, 50, false, true}, - {false, 0, true, 10, 0, 0, true, true}, + // development mode + {"g1_start_stop_same_boundary", true, 500, false, 138, 142, 100, false, true}, + {"g1_start_stop_same_boundary_livehub_fails", false, 500, false, 138, 142, 100, false, true}, + {"g2_start_stop_across_boundary", true, 500, false, 138, 242, 100, false, true}, + {"g2_start_stop_across_boundary_livehub_fails", true, 500, false, 138, 242, 100, false, true}, - // prod (start-block ignored) (state not required) - {true, 100, true, 10, 0, 100, false, false}, - {true, 100, true, 10, 150, 100, false, false}, - {true, 100, true, 10, 50, 50, false, false}, - {false, 0, true, 10, 50, 50, false, false}, - {false, 0, true, 10, 0, 0, true, false}, - - // non-prod (stop-block ignored) (state required) - {true, 100, false, 10, 0, 10, false, true}, - {true, 100, false, 10, 9999, 10, false, true}, - {true, 100, false, 150, 0, 100, false, true}, - {true, 100, false, 150, 9999, 100, false, true}, - {false, 0, false, 150, 0, 150, false, true}, - {false, 0, false, 150, 9999, 150, false, true}, - - // non-prod (stop-block ignored) (state not required) - {true, 100, false, 10, 0, 10, false, false}, - {true, 100, false, 10, 9999, 10, false, false}, - {true, 100, false, 150, 0, 150, false, false}, - {true, 100, false, 150, 9999, 150, false, false}, - {false, 0, false, 150, 0, 150, false, false}, - {false, 0, false, 150, 9999, 150, false, false}, + // production mode + {"g4_start_stop_same_boundary", true, 500, true, 138, 142, 200, false, true}, + {"g5_start_stop_across_boundary", true, 500, true, 138, 242, 300, false, true}, + {"g6_lib_between_start_and_stop", true, 342, true, 121, 498, 300, false, true}, + {"g6_lib_between_start_and_stop_livehub_fails", false, 342, true, 121, 498, 500, false, true}, + {"g7_stop_block_infinity", true, 342, true, 121, 0, 300, false, true}, + {"g7_stop_block_infinity_livehub_fails", false, 342, true, 121, 0, 300, true, true}, } for _, test := range tests { - t.Run("", func(t *testing.T) { + t.Run(test.name, func(t *testing.T) { got, err := computeLinearHandoffBlockNum( test.prodMode, test.startBlockNum, @@ -225,7 +210,7 @@ func Test_computeLiveHandoffBlockNum(t *testing.T) { return 0, fmt.Errorf("live not available") } return test.recentBlockNum, nil - }, test.stateRequired) + }, test.stateRequired, 100) if test.expectError { assert.Error(t, err) } else { @@ -253,10 +238,11 @@ func TestBuildRequestDetails(t *testing.T) { t.Error("should not pass here") return 0, nil }, + 100, ) require.NoError(t, err) - assert.Equal(t, 10, int(req.ResolvedStartBlockNum)) - assert.Equal(t, 10, int(req.LinearHandoffBlockNum)) + assert.Equal(t, 10, int(req.ResolvedStartBlockNum), "resolved start block") + assert.Equal(t, 0, int(req.LinearHandoffBlockNum), "linear handoff blocknum") req, _, err = BuildRequestDetails( context.Background(), @@ -273,8 +259,9 @@ func TestBuildRequestDetails(t *testing.T) { t.Error("should not pass here") return 0, nil }, + 100, ) require.NoError(t, err) assert.Equal(t, 10, int(req.ResolvedStartBlockNum)) - assert.Equal(t, 999, int(req.LinearHandoffBlockNum)) + assert.Equal(t, 900, int(req.LinearHandoffBlockNum)) } diff --git a/pipeline/storeboundary.go b/pipeline/storeboundary.go index f0c13625c..06e439587 100644 --- a/pipeline/storeboundary.go +++ b/pipeline/storeboundary.go @@ -5,7 +5,6 @@ import "sort" type storeBoundary struct { nextBoundary uint64 interval uint64 - isSubRequest bool requestStopBlock uint64 stopBlockReached bool } diff --git a/pipeline/stores.go b/pipeline/stores.go index 39dae02de..641ba5d9c 100644 --- a/pipeline/stores.go +++ b/pipeline/stores.go @@ -9,7 +9,7 @@ import ( "github.com/streamingfast/substreams/block" pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" - "github.com/streamingfast/substreams/pipeline/outputmodules" + "github.com/streamingfast/substreams/pipeline/exec" "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/storage/store" ) @@ -24,9 +24,10 @@ type Stores struct { // tier1 to tier2. partialsWritten block.Ranges // when backprocessing, to report back to orchestrator tier string + storesToWrite map[string]struct{} } -func NewStores(ctx context.Context, storeConfigs store.ConfigMap, storeSnapshotSaveInterval, requestStartBlockNum, stopBlockNum uint64, isTier2Request bool) *Stores { +func NewStores(ctx context.Context, storeConfigs store.ConfigMap, storeSnapshotSaveInterval, requestStartBlockNum, stopBlockNum uint64, isTier2Request bool, storesToWrite map[string]struct{}) *Stores { // FIXME(abourget): a StoreBoundary should exist for EACH Store // because the module's Initial Block could change the range of each // store. @@ -41,6 +42,7 @@ func NewStores(ctx context.Context, storeConfigs store.ConfigMap, storeSnapshotS bounder: bounder, tier: tier, logger: reqctx.Logger(ctx), + storesToWrite: storesToWrite, } } @@ -57,35 +59,32 @@ func (s *Stores) resetStores() { } // flushStores is called only for Tier2 request, as to not save reversible stores. -func (s *Stores) flushStores(ctx context.Context, executionStages outputmodules.ExecutionStages, blockNum uint64) (err error) { +func (s *Stores) flushStores(ctx context.Context, executionStages exec.ExecutionStages, blockNum uint64) (err error) { if s.StoreMap == nil { return // fast exit for cases without stores or no linear processing } - lastLayer := executionStages.LastStage().LastLayer() - if !lastLayer.IsStoreLayer() { - return nil - } boundaryIntervals := s.bounder.GetStoreFlushRanges(s.isTier2Request, s.bounder.requestStopBlock, blockNum) for _, boundaryBlock := range boundaryIntervals { - if err := s.saveStoresSnapshots(ctx, lastLayer, len(executionStages)-1, boundaryBlock); err != nil { + if err := s.saveStoresSnapshots(ctx, len(executionStages)-1, boundaryBlock); err != nil { return fmt.Errorf("saving stores snapshot at bound %d: %w", boundaryBlock, err) } } return nil } -func (s *Stores) saveStoresSnapshots(ctx context.Context, lastLayer outputmodules.LayerModules, stage int, boundaryBlock uint64) (err error) { - for _, mod := range lastLayer { - store := s.StoreMap[mod.Name] - s.logger.Info("flushing store at boundary", zap.Uint64("boundary", boundaryBlock), zap.String("store", mod.Name), zap.Int("stage", stage)) +func (s *Stores) saveStoresSnapshots(ctx context.Context, stage int, boundaryBlock uint64) (err error) { + for mod := range s.storesToWrite { + store := s.StoreMap[mod] + s.logger.Info("flushing store at boundary", zap.Uint64("boundary", boundaryBlock), zap.String("store", mod), zap.Int("stage", stage)) // TODO when partials are generic again, we can also check if PartialKV exists and skip if it does. - exists, _ := s.configs[mod.Name].ExistsFullKV(ctx, boundaryBlock) - if exists { + existsFullKv, _ := s.configs[mod].ExistsFullKV(ctx, boundaryBlock) + if existsFullKv { continue } + if err := s.saveStoreSnapshot(ctx, store, boundaryBlock); err != nil { - return fmt.Errorf("save store snapshot %q: %w", mod.Name, err) + return fmt.Errorf("save store snapshot %q: %w", mod, err) } } return nil diff --git a/pipeline/terminate.go b/pipeline/terminate.go index 627ee8c2e..6c0881c2e 100644 --- a/pipeline/terminate.go +++ b/pipeline/terminate.go @@ -10,8 +10,6 @@ import ( "github.com/streamingfast/bstream/stream" "go.uber.org/zap" - "github.com/streamingfast/substreams/block" - pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" "github.com/streamingfast/substreams/reqctx" ) @@ -50,24 +48,20 @@ func (p *Pipeline) OnStreamTerminated(ctx context.Context, err error) error { return fmt.Errorf("end of stream: %w", err) } - // WARN/FIXME: calling flushStores once at the end of a process - // is super risky, as this function was made to b e called at each - // block to flush stores supporting holes in chains. - // And it will write multiple stores with the same content - // when presented with multiple boundaries / ranges. if err := p.stores.flushStores(ctx, p.executionStages, reqDetails.StopBlockNum); err != nil { return fmt.Errorf("step new irr: stores end of stream: %w", err) } - return nil -} - -func toPBInternalBlockRanges(in block.Ranges) (out []*pbssinternal.BlockRange) { - for _, r := range in { - out = append(out, &pbssinternal.BlockRange{ - StartBlock: r.StartBlock, - EndBlock: r.ExclusiveEndBlock, - }) + if reqctx.Details(ctx).IsTier2Request { + err := p.returnInternalModuleProgressOutputs(p.lastFinalClock, true) + if err != nil { + logger.Error("returning internal module progress outputs", zap.Error(err)) + } + } else { + err := p.returnRPCModuleProgressOutputs(true) + if err != nil { + logger.Error("returning internal module progress outputs", zap.Error(err)) + } } - return + return nil } diff --git a/proto/sf/substreams/intern/v2/deltas.proto b/proto/sf/substreams/intern/v2/deltas.proto index bef041ea1..e06890172 100644 --- a/proto/sf/substreams/intern/v2/deltas.proto +++ b/proto/sf/substreams/intern/v2/deltas.proto @@ -4,6 +4,7 @@ package sf.substreams.internal.v2; import "google/protobuf/any.proto"; import "sf/substreams/v1/deltas.proto"; +import "sf/substreams/index/v1/keys.proto"; option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2;pbssinternal"; diff --git a/proto/sf/substreams/intern/v2/service.proto b/proto/sf/substreams/intern/v2/service.proto index 3462f53a6..b18f7045c 100644 --- a/proto/sf/substreams/intern/v2/service.proto +++ b/proto/sf/substreams/intern/v2/service.proto @@ -17,24 +17,26 @@ enum WASMModuleType { } message ProcessRangeRequest { - uint64 start_block_num = 1; - uint64 stop_block_num = 2; + reserved 1; + uint64 stop_block_num = 2 [deprecated = true]; + string output_module = 3; sf.substreams.v1.Modules modules = 4; uint32 stage = 5; // 0-based index of stage to execute up to string metering_config = 6; - uint64 first_streamable_block = 7; // first block that can be streamed - uint64 last_streamable_block = 8; // last block that can be streamed + uint64 first_streamable_block = 7; // first block that can be streamed on that chain + reserved 8; - map wasm_modules = 9; // TODO: rename to `wasm_extension_configs` + map wasm_extension_configs = 9; // TODO: rename to `wasm_extension_configs` string merged_blocks_store = 10; // store to use for merged blocks string state_store = 11; // store to use for substreams state string state_store_default_tag = 12; // default tag to use for state store - uint64 state_bundle_size = 13; // number of blocks to process in a single batch + uint64 segment_size = 13; // number of blocks to process in a single batch string block_type = 14; // block type to process + uint64 segment_number = 15; // segment_number * segment_size = start_block_num } message ProcessRangeResponse { diff --git a/proto/sf/substreams/v1/modules.proto b/proto/sf/substreams/v1/modules.proto index 2f154ba7b..4d16adfc9 100644 --- a/proto/sf/substreams/v1/modules.proto +++ b/proto/sf/substreams/v1/modules.proto @@ -33,11 +33,17 @@ message Module { BlockFilter block_filter = 9; - message BlockFilter { string module = 1; - string query = 2; + oneof query { + string query_string = 2; + QueryFromParams query_from_params = 3; + // QueryFromStore query_from_store_keys = 3; + }; } + + message QueryFromParams {} + message KindMap { string output_type = 1; } diff --git a/reqctx/context.go b/reqctx/context.go index 4d550d44f..58d915312 100644 --- a/reqctx/context.go +++ b/reqctx/context.go @@ -6,7 +6,9 @@ import ( "io" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace/noop" + "github.com/streamingfast/dmetering" "github.com/streamingfast/logging" "github.com/streamingfast/substreams/metrics" "go.opentelemetry.io/otel/codes" @@ -33,7 +35,7 @@ func Tracer(ctx context.Context) ttrace.Tracer { if t, ok := tracer.(ttrace.Tracer); ok { return t } - return ttrace.NewNoopTracerProvider().Tracer("") + return noop.NewTracerProvider().Tracer("") } func WithTracer(ctx context.Context, tracer ttrace.Tracer) context.Context { @@ -71,6 +73,22 @@ func WithSpan(ctx context.Context, name string) (context.Context, ISpan) { return context.WithValue(ctx, spanKey, s), s } +type emitterKeyType struct{} + +var emitterKey = emitterKeyType{} + +func Emitter(ctx context.Context) dmetering.EventEmitter { + emitter := ctx.Value(emitterKey) + if t, ok := emitter.(dmetering.EventEmitter); ok { + return t + } + return nil +} + +func WithEmitter(ctx context.Context, emitter dmetering.EventEmitter) context.Context { + return context.WithValue(ctx, emitterKey, emitter) +} + type ISpan interface { // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method diff --git a/reqctx/noopspan.go b/reqctx/noopspan.go index 9f70ac52a..94267694a 100644 --- a/reqctx/noopspan.go +++ b/reqctx/noopspan.go @@ -4,6 +4,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ttrace "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" ) // NoopSpan is an implementation of span that preforms no operations. @@ -39,4 +40,4 @@ func (n *NoopSpan) AddEvent(string, ...ttrace.EventOption) {} func (n *NoopSpan) SetName(string) {} // TracerProvider returns a no-op TracerProvider. -func (n *NoopSpan) TracerProvider() ttrace.TracerProvider { return ttrace.NewNoopTracerProvider() } +func (n *NoopSpan) TracerProvider() ttrace.TracerProvider { return noop.NewTracerProvider() } diff --git a/reqctx/request.go b/reqctx/request.go index 46abb4b19..e666c614f 100644 --- a/reqctx/request.go +++ b/reqctx/request.go @@ -21,7 +21,6 @@ type RequestDetails struct { LinearGateBlockNum uint64 StopBlockNum uint64 MaxParallelJobs uint64 - CacheTag string UniqueID uint64 ProductionMode bool diff --git a/service/config/runtimeconfig.go b/service/config/runtimeconfig.go index 202a9c105..99fbcf449 100644 --- a/service/config/runtimeconfig.go +++ b/service/config/runtimeconfig.go @@ -9,7 +9,7 @@ import ( // RuntimeConfig is a global configuration for the service. // It is passed down and should not be modified unless cloned. type RuntimeConfig struct { - StateBundleSize uint64 + SegmentSize uint64 MaxJobsAhead uint64 // limit execution of depencency jobs so they don't go too far ahead of the modules that depend on them (ex: module X is 2 million blocks ahead of module Y that depends on it, we don't want to schedule more module X jobs until Y caught up a little bit) DefaultParallelSubrequests uint64 // how many sub-jobs to launch for a given user @@ -23,7 +23,7 @@ type RuntimeConfig struct { } func NewTier1RuntimeConfig( - stateBundleSize uint64, + segmentSize uint64, parallelSubrequests uint64, maxJobsAhead uint64, baseObjectStore dstore.Store, @@ -31,7 +31,7 @@ func NewTier1RuntimeConfig( workerFactory work.WorkerFactory, ) RuntimeConfig { return RuntimeConfig{ - StateBundleSize: stateBundleSize, + SegmentSize: segmentSize, DefaultParallelSubrequests: parallelSubrequests, MaxJobsAhead: maxJobsAhead, BaseObjectStore: baseObjectStore, diff --git a/service/logging.go b/service/logging.go index be48006e0..9b0e46887 100644 --- a/service/logging.go +++ b/service/logging.go @@ -4,4 +4,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, tracer = logging.PackageLogger("substreams-service", "github.com/streamingfast/substreams/service") +var zlog, _ = logging.PackageLogger("substreams-service", "github.com/streamingfast/substreams/service") diff --git a/service/metering.go b/service/metering.go index 6b417b7d3..abf39e8c4 100644 --- a/service/metering.go +++ b/service/metering.go @@ -4,13 +4,12 @@ import ( "context" "time" - "go.uber.org/zap" - "github.com/streamingfast/dmetering" + "github.com/streamingfast/substreams/reqctx" "google.golang.org/protobuf/proto" ) -func sendMetering(ctx context.Context, meter dmetering.Meter, userID, apiKeyID, ip, userMeta, endpoint string, resp proto.Message, logger *zap.Logger) { +func sendMetering(ctx context.Context, meter dmetering.Meter, userID, apiKeyID, ip, userMeta, endpoint string, resp proto.Message) { bytesRead := meter.BytesReadDelta() bytesWritten := meter.BytesWrittenDelta() egressBytes := proto.Size(resp) @@ -35,7 +34,7 @@ func sendMetering(ctx context.Context, meter dmetering.Meter, userID, apiKeyID, Timestamp: time.Now(), } - emitter := ctx.Value("event_emitter").(dmetering.EventEmitter) + emitter := reqctx.Emitter(ctx) if emitter == nil { dmetering.Emit(context.WithoutCancel(ctx), event) } else { diff --git a/service/stream.go b/service/stream.go index 08afc36d0..9225c06d8 100644 --- a/service/stream.go +++ b/service/stream.go @@ -88,8 +88,6 @@ func (s *StreamFactory) GetRecentFinalBlock() (uint64, error) { if finalBlockNum > bstream.GetProtocolFirstStreamableBlock+200 { finalBlockNum -= finalBlockNum % 100 finalBlockNum -= 100 - } else if finalBlockNum > bstream.GetProtocolFirstStreamableBlock+200 { - finalBlockNum -= finalBlockNum % 100 } return finalBlockNum, err diff --git a/service/testing.go b/service/testing.go index cc171c00d..b107e2307 100644 --- a/service/testing.go +++ b/service/testing.go @@ -13,7 +13,7 @@ import ( "github.com/streamingfast/substreams" pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" pbsubstreamsrpc "github.com/streamingfast/substreams/pb/sf/substreams/rpc/v2" - "github.com/streamingfast/substreams/pipeline/outputmodules" + "github.com/streamingfast/substreams/pipeline/exec" "github.com/streamingfast/substreams/service/config" ) @@ -34,12 +34,12 @@ func TestNewService(runtimeConfig config.RuntimeConfig, linearHandoffBlockNum ui } func (s *Tier1Service) TestBlocks(ctx context.Context, isSubRequest bool, request *pbsubstreamsrpc.Request, respFunc substreams.ResponseFunc) error { - outputGraph, err := outputmodules.NewOutputModuleGraph(request.OutputModule, request.ProductionMode, request.Modules) + execGraph, err := exec.NewOutputModuleGraph(request.OutputModule, request.ProductionMode, request.Modules) if err != nil { return stream.NewErrInvalidArg(err.Error()) } - return s.blocks(ctx, request, outputGraph, respFunc) + return s.blocks(ctx, request, execGraph, respFunc) } func TestNewServiceTier2(moduleExecutionTracing bool, streamFactoryFunc StreamFactoryFunc) *Tier2Service { diff --git a/service/tier1.go b/service/tier1.go index 01818b281..deb221c19 100644 --- a/service/tier1.go +++ b/service/tier1.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "path/filepath" "regexp" "strconv" "strings" @@ -38,7 +37,6 @@ import ( "github.com/streamingfast/substreams/pipeline" "github.com/streamingfast/substreams/pipeline/cache" "github.com/streamingfast/substreams/pipeline/exec" - "github.com/streamingfast/substreams/pipeline/outputmodules" "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/service/config" "github.com/streamingfast/substreams/storage/execout" @@ -71,10 +69,7 @@ type Tier1Service struct { resolveCursor pipeline.CursorResolver getHeadBlock func() (uint64, error) - maximumTier2Retries uint64 tier2RequestParameters reqctx.Tier2RequestParameters - - pipelineOptions []pipeline.Option } func getBlockTypeFromStreamFactory(sf *StreamFactory) (string, error) { @@ -162,7 +157,7 @@ func NewTier1( } tier2RequestParameters.BlockType = blockType - tier2RequestParameters.StateBundleSize = runtimeConfig.StateBundleSize + tier2RequestParameters.StateBundleSize = runtimeConfig.SegmentSize logger.Info("launching tier1 service", zap.Reflect("client_config", substreamsClientConfig), zap.String("block_type", blockType), zap.Bool("with_live", hub != nil)) s := &Tier1Service{ @@ -228,15 +223,15 @@ func (s *Tier1Service) Blocks( return connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("missing modules in request")) } - if err := outputmodules.ValidateTier1Request(request, s.blockType); err != nil { + if err := ValidateTier1Request(request, s.blockType); err != nil { return connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("validate request: %w", err)) } - outputGraph, err := outputmodules.NewOutputModuleGraph(request.OutputModule, request.ProductionMode, request.Modules) + execGraph, err := exec.NewOutputModuleGraph(request.OutputModule, request.ProductionMode, request.Modules) if err != nil { return bsstream.NewErrInvalidArg(err.Error()) } - outputModuleHash := outputGraph.ModuleHashes().Get(request.OutputModule) + outputModuleHash := execGraph.ModuleHashes().Get(request.OutputModule) moduleNames := make([]string, len(request.Modules.Modules)) for i := 0; i < len(moduleNames); i++ { @@ -301,7 +296,7 @@ func (s *Tier1Service) Blocks( } }() - err = s.blocks(runningContext, request, outputGraph, respFunc) + err = s.blocks(runningContext, request, execGraph, respFunc) if connectError := toConnectError(runningContext, err); connectError != nil { switch connect.CodeOf(connectError) { @@ -322,7 +317,7 @@ func (s *Tier1Service) Blocks( return nil } -func (s *Tier1Service) writePackage(ctx context.Context, request *pbsubstreamsrpc.Request, outputGraph *outputmodules.Graph) error { +func (s *Tier1Service) writePackage(ctx context.Context, request *pbsubstreamsrpc.Request, execGraph *exec.Graph, cacheStore dstore.Store) error { asPackage := &pbsubstreams.Package{ Modules: request.Modules, ModuleMeta: []*pbsubstreams.ModuleMetadata{}, @@ -333,8 +328,7 @@ func (s *Tier1Service) writePackage(ctx context.Context, request *pbsubstreamsrp return fmt.Errorf("marshalling package: %w", err) } - modulePath := filepath.Join(reqctx.Details(ctx).CacheTag, outputGraph.ModuleHashes().Get(request.OutputModule)) - moduleStore, err := s.runtimeConfig.BaseObjectStore.SubStore(modulePath) + moduleStore, err := cacheStore.SubStore(execGraph.ModuleHashes().Get(request.OutputModule)) if err != nil { return fmt.Errorf("getting substore: %w", err) } @@ -352,7 +346,7 @@ func (s *Tier1Service) writePackage(ctx context.Context, request *pbsubstreamsrp var IsValidCacheTag = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`).MatchString -func (s *Tier1Service) blocks(ctx context.Context, request *pbsubstreamsrpc.Request, outputGraph *outputmodules.Graph, respFunc substreams.ResponseFunc) error { +func (s *Tier1Service) blocks(ctx context.Context, request *pbsubstreamsrpc.Request, execGraph *exec.Graph, respFunc substreams.ResponseFunc) error { chainFirstStreamableBlock := bstream.GetProtocolFirstStreamableBlock if request.StartBlockNum > 0 && request.StartBlockNum < int64(chainFirstStreamableBlock) { return bsstream.NewErrInvalidArg("invalid start block %d, must be >= %d (the first streamable block of the chain)", request.StartBlockNum, chainFirstStreamableBlock) @@ -366,31 +360,31 @@ func (s *Tier1Service) blocks(ctx context.Context, request *pbsubstreamsrpc.Requ logger := reqctx.Logger(ctx) - requestDetails, undoSignal, err := pipeline.BuildRequestDetails(ctx, request, s.getRecentFinalBlock, s.resolveCursor, s.getHeadBlock) + requestDetails, undoSignal, err := pipeline.BuildRequestDetails(ctx, request, s.getRecentFinalBlock, s.resolveCursor, s.getHeadBlock, s.runtimeConfig.SegmentSize) if err != nil { return fmt.Errorf("build request details: %w", err) } requestDetails.MaxParallelJobs = s.runtimeConfig.DefaultParallelSubrequests - requestDetails.CacheTag = s.runtimeConfig.DefaultCacheTag + cacheTag := s.runtimeConfig.DefaultCacheTag if auth := dauth.FromContext(ctx); auth != nil { if parallelJobs := auth.Get("X-Sf-Substreams-Parallel-Jobs"); parallelJobs != "" { if ll, err := strconv.ParseUint(parallelJobs, 10, 64); err == nil { requestDetails.MaxParallelJobs = ll } } - if cacheTag := auth.Get("X-Sf-Substreams-Cache-Tag"); cacheTag != "" { - if IsValidCacheTag(cacheTag) { - requestDetails.CacheTag = cacheTag + if ct := auth.Get("X-Sf-Substreams-Cache-Tag"); ct != "" { + if IsValidCacheTag(ct) { + cacheTag = ct } else { - return fmt.Errorf("invalid value for X-Sf-Substreams-Cache-Tag %s, should only contain letters, numbers, hyphens and undescores", cacheTag) + return fmt.Errorf("invalid value for X-Sf-Substreams-Cache-Tag %s, should only contain letters, numbers, hyphens and undescores", ct) } } } var requestStats *metrics.Stats - ctx, requestStats = setupRequestStats(ctx, requestDetails, outputGraph, false) + ctx, requestStats = setupRequestStats(ctx, requestDetails, execGraph.ModuleHashes().Get(requestDetails.OutputModule), false) defer requestStats.LogAndClose() traceId := tracing.GetTraceID(ctx).String() @@ -410,17 +404,13 @@ func (s *Tier1Service) blocks(ctx context.Context, request *pbsubstreamsrpc.Requ ctx = reqctx.WithModuleExecutionTracing(ctx) } - if err := s.writePackage(ctx, request, outputGraph); err != nil { - logger.Warn("cannot write package", zap.Error(err)) - } - - if err := outputGraph.ValidateRequestStartBlock(requestDetails.ResolvedStartBlockNum); err != nil { + if err := execGraph.ValidateRequestStartBlock(requestDetails.ResolvedStartBlockNum); err != nil { return bsstream.NewErrInvalidArg(err.Error()) } wasmRuntime := wasm.NewRegistry(s.wasmExtensions) - cacheStore, err := s.runtimeConfig.BaseObjectStore.SubStore(requestDetails.CacheTag) + cacheStore, err := s.runtimeConfig.BaseObjectStore.SubStore(cacheTag) if err != nil { return fmt.Errorf("internal error setting store: %w", err) } @@ -434,19 +424,23 @@ func (s *Tier1Service) blocks(ctx context.Context, request *pbsubstreamsrpc.Requ cacheStore = cloned } - execOutputConfigs, err := execout.NewConfigs(cacheStore, outputGraph.UsedModules(), outputGraph.ModuleHashes(), s.runtimeConfig.StateBundleSize, logger) + if err := s.writePackage(ctx, request, execGraph, cacheStore); err != nil { + logger.Warn("cannot write package", zap.Error(err)) + } + + execOutputConfigs, err := execout.NewConfigs(cacheStore, execGraph.UsedModules(), execGraph.ModuleHashes(), s.runtimeConfig.SegmentSize, logger) if err != nil { return fmt.Errorf("new config map: %w", err) } - storeConfigs, err := store.NewConfigMap(cacheStore, outputGraph.Stores(), outputGraph.ModuleHashes()) + storeConfigs, err := store.NewConfigMap(cacheStore, execGraph.Stores(), execGraph.ModuleHashes()) if err != nil { return fmt.Errorf("configuring stores: %w", err) } - stores := pipeline.NewStores(ctx, storeConfigs, s.runtimeConfig.StateBundleSize, requestDetails.LinearHandoffBlockNum, request.StopBlockNum, false) + stores := pipeline.NewStores(ctx, storeConfigs, s.runtimeConfig.SegmentSize, requestDetails.LinearHandoffBlockNum, request.StopBlockNum, false, nil) - execOutputCacheEngine, err := cache.NewEngine(ctx, nil, s.blockType, nil) // we don't read or write ExecOuts on tier1 + execOutputCacheEngine, err := cache.NewEngine(ctx, nil, s.blockType, nil, nil) // we don't read or write ExecOuts on tier1 if err != nil { return fmt.Errorf("error building caching engine: %w", err) } @@ -467,12 +461,13 @@ func (s *Tier1Service) blocks(ctx context.Context, request *pbsubstreamsrpc.Requ pipe := pipeline.New( ctx, - outputGraph, + execGraph, stores, + nil, execOutputConfigs, wasmRuntime, execOutputCacheEngine, - s.runtimeConfig.StateBundleSize, + s.runtimeConfig.SegmentSize, s.runtimeConfig.WorkerFactory, respFunc, opts..., @@ -483,12 +478,12 @@ func (s *Tier1Service) blocks(ctx context.Context, request *pbsubstreamsrpc.Requ // needs to be produced. // But it seems a bit more involved in here. - scheduleStores := outputGraph.StagedUsedModules()[0].LastLayer().IsStoreLayer() + scheduleStores := execGraph.StagedUsedModules()[0].LastLayer().IsStoreLayer() reqPlan, err := plan.BuildTier1RequestPlan( requestDetails.ProductionMode, - s.runtimeConfig.StateBundleSize, - outputGraph.LowestInitBlock(), + s.runtimeConfig.SegmentSize, + execGraph.LowestInitBlock(), requestDetails.ResolvedStartBlockNum, requestDetails.LinearHandoffBlockNum, requestDetails.StopBlockNum, @@ -563,7 +558,7 @@ func tier1ResponseHandler(ctx context.Context, mut *sync.Mutex, logger *zap.Logg userMeta := auth.Meta() ip := auth.RealIP() meter := dmetering.GetBytesMeter(ctx) - ctx = context.WithValue(ctx, "event_emitter", dmetering.GetDefaultEmitter()) + ctx = reqctx.WithEmitter(ctx, dmetering.GetDefaultEmitter()) return func(respAny substreams.ResponseFromAnyTier) error { resp := respAny.(*pbsubstreamsrpc.Response) @@ -579,12 +574,12 @@ func tier1ResponseHandler(ctx context.Context, mut *sync.Mutex, logger *zap.Logg return connect.NewError(connect.CodeUnavailable, err) } - sendMetering(ctx, meter, userID, apiKeyID, ip, userMeta, "sf.substreams.rpc.v2/Blocks", resp, logger) + sendMetering(ctx, meter, userID, apiKeyID, ip, userMeta, "sf.substreams.rpc.v2/Blocks", resp) return nil } } -func setupRequestStats(ctx context.Context, requestDetails *reqctx.RequestDetails, graph *outputmodules.Graph, tier2 bool) (context.Context, *metrics.Stats) { +func setupRequestStats(ctx context.Context, requestDetails *reqctx.RequestDetails, outputModuleGraph string, tier2 bool) (context.Context, *metrics.Stats) { logger := reqctx.Logger(ctx) auth := dauth.FromContext(ctx) stats := metrics.NewReqStats(&metrics.Config{ @@ -592,7 +587,7 @@ func setupRequestStats(ctx context.Context, requestDetails *reqctx.RequestDetail ApiKeyID: auth.APIKeyID(), Tier2: tier2, OutputModule: requestDetails.OutputModule, - OutputModuleHash: graph.ModuleHashes().Get(requestDetails.OutputModule), + OutputModuleHash: outputModuleGraph, ProductionMode: requestDetails.ProductionMode, }, logger) return reqctx.WithReqStats(ctx, stats), stats diff --git a/service/tier2.go b/service/tier2.go index d3f1b24df..04130af66 100644 --- a/service/tier2.go +++ b/service/tier2.go @@ -9,6 +9,7 @@ import ( "sync" "connectrpc.com/connect" + "github.com/RoaringBitmap/roaring/roaring64" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/dauth" "github.com/streamingfast/dgrpc" @@ -24,9 +25,9 @@ import ( "github.com/streamingfast/substreams/pipeline" "github.com/streamingfast/substreams/pipeline/cache" "github.com/streamingfast/substreams/pipeline/exec" - "github.com/streamingfast/substreams/pipeline/outputmodules" "github.com/streamingfast/substreams/reqctx" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" "github.com/streamingfast/substreams/storage/store" "github.com/streamingfast/substreams/wasm" "go.opentelemetry.io/otel/attribute" @@ -37,6 +38,25 @@ import ( "google.golang.org/grpc/status" ) +type ModuleExecutionConfig struct { + name string + moduleHash string + objStore dstore.Store + + skipExecution bool + cachedOutputs map[string][]byte // ?? + blockFilter *BlockFilter + + modKind pbsubstreams.ModuleKind + moduleInitialBlock uint64 + + logger *zap.Logger +} + +type BlockFilter struct { + preexistingExecOuts map[uint64]struct{} +} + type Tier2Service struct { wasmExtensions func(map[string]string) (map[string]map[string]wasm.WASMExtension, error) //todo: rename tracer ttrace.Tracer @@ -103,7 +123,7 @@ func (s *Tier2Service) setOverloaded() { s.setReadyFunc(!overloaded) } -func (s *Tier2Service) ProcessRange(request *pbssinternal.ProcessRangeRequest, streamSrv pbssinternal.Substreams_ProcessRangeServer) (grpcError error) { +func (s *Tier2Service) ProcessRange(request *pbssinternal.ProcessRangeRequest, streamSrv pbssinternal.Substreams_ProcessRangeServer) error { metrics.Tier2ActiveRequests.Inc() metrics.Tier2RequestCounter.Inc() defer metrics.Tier2ActiveRequests.Dec() @@ -122,16 +142,9 @@ func (s *Tier2Service) ProcessRange(request *pbssinternal.ProcessRangeRequest, s s.decrementConcurrentRequests() }() - // TODO: use stage and segment numbers when implemented stage := request.OutputModule - segment := fmt.Sprintf("%d:%d", - request.StartBlockNum, - request.StopBlockNum) - logger := reqctx.Logger(ctx).Named("tier2").With( - zap.String("stage", stage), - zap.String("segment", segment), - ) + logger := reqctx.Logger(ctx).Named("tier2").With(zap.String("stage", stage), zap.Uint64("segment_number", request.SegmentNumber)) ctx = logging.WithLogger(ctx, logger) ctx = dmetering.WithBytesMeter(ctx) @@ -154,8 +167,7 @@ func (s *Tier2Service) ProcessRange(request *pbssinternal.ProcessRangeRequest, s } fields := []zap.Field{ - zap.Uint64("start_block", request.StartBlockNum), - zap.Uint64("stop_block", request.StopBlockNum), + zap.Uint64("segment_size", request.SegmentSize), zap.Uint32("stage", request.Stage), zap.Strings("modules", moduleNames), zap.String("output_module", request.OutputModule), @@ -176,17 +188,8 @@ func (s *Tier2Service) ProcessRange(request *pbssinternal.ProcessRangeRequest, s logger.Info("incoming substreams ProcessRange request", fields...) - switch { - case request.MeteringConfig == "": - return fmt.Errorf("metering config is required in request") - case request.BlockType == "": - return fmt.Errorf("block type is required in request") - case request.StateStore == "": - return fmt.Errorf("state store is required in request") - case request.MergedBlocksStore == "": - return fmt.Errorf("merged blocks store is required in request") - case request.StateBundleSize == 0: - return fmt.Errorf("a non-zero state bundle size is required in request") + if err := ValidateTier2Request(request); err != nil { + return connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("validate request: %w", err)) } emitter, err := dmetering.New(request.MeteringConfig, logger) @@ -197,11 +200,11 @@ func (s *Tier2Service) ProcessRange(request *pbssinternal.ProcessRangeRequest, s emitter.Shutdown(nil) }() - ctx = context.WithValue(ctx, "event_emitter", emitter) + ctx = reqctx.WithEmitter(ctx, dmetering.GetDefaultEmitter()) respFunc := tier2ResponseHandler(ctx, logger, streamSrv) err = s.processRange(ctx, request, respFunc) - grpcError = toGRPCError(ctx, err) + grpcError := toGRPCError(ctx, err) switch status.Code(grpcError) { case codes.Unknown, codes.Internal, codes.Unavailable: @@ -211,38 +214,27 @@ func (s *Tier2Service) ProcessRange(request *pbssinternal.ProcessRangeRequest, s return grpcError } -func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.ProcessRangeRequest, respFunc substreams.ResponseFunc) error { - logger := reqctx.Logger(ctx) - - mergedBlocksStore, err := dstore.NewDBinStore(request.MergedBlocksStore) - if err != nil { - return fmt.Errorf("setting up block store from url %q: %w", request.MergedBlocksStore, err) - } - - if cloned, ok := mergedBlocksStore.(dstore.Clonable); ok { - mergedBlocksStore, err = cloned.Clone(ctx) +func (s *Tier2Service) getWASMRegistry(wasmExtensionConfigs map[string]string) (*wasm.Registry, error) { + var exts map[string]map[string]wasm.WASMExtension + if s.wasmExtensions != nil { + x, err := s.wasmExtensions(wasmExtensionConfigs) // sets eth_call extensions to wasm machine, ex., for ethereum if err != nil { - return fmt.Errorf("cloning store: %w", err) + return nil, fmt.Errorf("loading wasm extensions: %w", err) } - mergedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) + exts = x } + return wasm.NewRegistry(exts), nil +} - stateStore, err := dstore.NewStore(request.StateStore, "zst", "zstd", false) - if cloned, ok := stateStore.(dstore.Clonable); ok { - stateStore, err = cloned.Clone(ctx) - if err != nil { - return fmt.Errorf("cloning store: %w", err) - } - stateStore.SetMeter(dmetering.GetBytesMeter(ctx)) - } +func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.ProcessRangeRequest, respFunc substreams.ResponseFunc) error { + logger := reqctx.Logger(ctx) - if err := outputmodules.ValidateTier2Request(request); err != nil { - return stream.NewErrInvalidArg(fmt.Errorf("validate request: %w", err).Error()) + mergedBlocksStore, cacheStore, unmeteredCacheStore, err := s.getStores(ctx, request) + if err != nil { + return err } - // FIXME: here, we validate that we have only modules on the same - // stage, otherwise we fall back. - outputGraph, err := outputmodules.NewOutputModuleGraph(request.OutputModule, true, request.Modules) + execGraph, err := exec.NewOutputModuleGraph(request.OutputModule, true, request.Modules) if err != nil { return stream.NewErrInvalidArg(err.Error()) } @@ -253,75 +245,52 @@ func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.P ctx = reqctx.WithModuleExecutionTracing(ctx) } - requestDetails.CacheTag = request.StateStoreDefaultTag - if auth := dauth.FromContext(ctx); auth != nil { - if cacheTag := auth.Get("X-Sf-Substreams-Cache-Tag"); cacheTag != "" { - if IsValidCacheTag(cacheTag) { - requestDetails.CacheTag = cacheTag - } else { - return fmt.Errorf("invalid value for X-Sf-Substreams-Cache-Tag %s, should only contain letters, numbers, hyphens and undescores", cacheTag) - } - } - } - var requestStats *metrics.Stats - ctx, requestStats = setupRequestStats(ctx, requestDetails, outputGraph, true) + ctx, requestStats = setupRequestStats(ctx, requestDetails, execGraph.ModuleHashes().Get(requestDetails.OutputModule), true) defer requestStats.LogAndClose() - if err := outputGraph.ValidateRequestStartBlock(requestDetails.ResolvedStartBlockNum); err != nil { - return stream.NewErrInvalidArg(err.Error()) - } - - var exts map[string]map[string]wasm.WASMExtension - if s.wasmExtensions != nil { - x, err := s.wasmExtensions(request.WasmModules) - if err != nil { - return fmt.Errorf("loading wasm extensions: %w", err) - } - exts = x - } - wasmRuntime := wasm.NewRegistry(exts) - - cacheStore, err := stateStore.SubStore(requestDetails.CacheTag) + wasmRegistry, err := s.getWASMRegistry(request.WasmExtensionConfigs) if err != nil { - return fmt.Errorf("internal error setting store: %w", err) + return err } - if clonableStore, ok := cacheStore.(dstore.Clonable); ok { - cloned, err := clonableStore.Clone(ctx) - if err != nil { - return fmt.Errorf("cloning store: %w", err) - } - cloned.SetMeter(dmetering.GetBytesMeter(ctx)) - cacheStore = cloned - } + startBlock := request.StartBlock() + stopBlock := request.StopBlock() - execOutputConfigs, err := execout.NewConfigs(cacheStore, outputGraph.UsedModulesUpToStage(int(request.Stage)), outputGraph.ModuleHashes(), request.StateBundleSize, logger) + execOutputConfigs, err := execout.NewConfigs( + cacheStore, + execGraph.UsedModulesUpToStage(int(request.Stage)), + execGraph.ModuleHashes(), + request.SegmentSize, + logger) if err != nil { return fmt.Errorf("new config map: %w", err) } - storeConfigs, err := store.NewConfigMap(cacheStore, outputGraph.Stores(), outputGraph.ModuleHashes()) + storeConfigs, err := store.NewConfigMap(cacheStore, execGraph.Stores(), execGraph.ModuleHashes()) if err != nil { return fmt.Errorf("configuring stores: %w", err) } - stores := pipeline.NewStores(ctx, storeConfigs, request.StateBundleSize, requestDetails.ResolvedStartBlockNum, request.StopBlockNum, true) - isCompleteRange := request.StopBlockNum%request.StateBundleSize == 0 - // note all modules that are not in 'modulesRequiredToRun' are still iterated in 'pipeline.executeModules', but they will skip actual execution when they see that the cache provides the data - // This way, stores get updated at each block from the cached execouts without the actual execution of the module - modulesRequiredToRun, existingExecOuts, execOutWriters, err := evaluateModulesRequiredToRun(ctx, logger, outputGraph, request.Stage, request.StartBlockNum, request.StopBlockNum, isCompleteRange, request.OutputModule, execOutputConfigs, storeConfigs) + // indexes are not metered: we want users to use them as much as possible + indexConfigs, err := index.NewConfigs(unmeteredCacheStore, execGraph.UsedIndexesModulesUpToStage(int(request.Stage)), execGraph.ModuleHashes(), logger) + if err != nil { + return fmt.Errorf("configuring indexes: %w", err) + } + + executionPlan, err := GetExecutionPlan(ctx, logger, execGraph, request.Stage, startBlock, stopBlock, request.OutputModule, execOutputConfigs, indexConfigs, storeConfigs) if err != nil { - return fmt.Errorf("evaluating required modules: %w", err) + return fmt.Errorf("creating execution plan: %w", err) } - if len(modulesRequiredToRun) == 0 { + if executionPlan == nil || len(executionPlan.RequiredModules) == 0 { logger.Info("no modules required to run, skipping") return nil } + stores := pipeline.NewStores(ctx, storeConfigs, request.SegmentSize, requestDetails.ResolvedStartBlockNum, stopBlock, true, executionPlan.StoresToWrite) - // this engine will keep the existingExecOuts to optimize the execution (for inputs from modules that skip execution) - execOutputCacheEngine, err := cache.NewEngine(ctx, execOutWriters, request.BlockType, existingExecOuts) + // this engine will keep the ExistingExecOuts to optimize the execution (for inputs from modules that skip execution) + execOutputCacheEngine, err := cache.NewEngine(ctx, executionPlan.ExecoutWriters, request.BlockType, executionPlan.ExistingExecOuts, executionPlan.IndexWriters) if err != nil { return fmt.Errorf("error building caching engine: %w", err) } @@ -333,12 +302,13 @@ func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.P pipe := pipeline.New( ctx, - outputGraph, + execGraph, stores, + executionPlan.ExistingIndices, execOutputConfigs, - wasmRuntime, + wasmRegistry, execOutputCacheEngine, - request.StateBundleSize, + request.SegmentSize, nil, respFunc, // This must always be the parent/global trace id, the one that comes from tier1 @@ -347,7 +317,6 @@ func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.P logger.Debug("initializing tier2 pipeline", zap.Uint64("request_start_block", requestDetails.ResolvedStartBlockNum), - zap.Uint64("request_stop_block", request.StopBlockNum), zap.String("output_module", request.OutputModule), zap.Uint32("stage", request.Stage), ) @@ -355,25 +324,38 @@ func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.P if err := pipe.Init(ctx); err != nil { return fmt.Errorf("error during pipeline init: %w", err) } + if err := pipe.InitTier2Stores(ctx); err != nil { return fmt.Errorf("error building pipeline: %w", err) } - var streamFactoryFunc StreamFactoryFunc - if s.streamFactoryFuncOverride != nil { //this is only for testing purposes. - streamFactoryFunc = s.streamFactoryFuncOverride - } else { - sf := &StreamFactory{ - mergedBlocksStore: mergedBlocksStore, + if err := pipe.BuildModuleExecutors(ctx); err != nil { + return fmt.Errorf("error building module executors: %w", err) + } + + allExecutorsExcludedByBlockIndex := true +excludable: + for _, stage := range pipe.ModuleExecutors { + for _, executor := range stage { + if executionPlan.ExistingExecOuts[executor.Name()] != nil { + continue + } + if !executor.BlockIndex().ExcludesAllBlocks() { + allExecutorsExcludedByBlockIndex = false + break excludable + } } - streamFactoryFunc = sf.New + } + if allExecutorsExcludedByBlockIndex { + logger.Info("all executors are excluded by block index. Skipping execution of segment") + return pipe.OnStreamTerminated(ctx, io.EOF) } var streamErr error - if canSkipBlockSource(existingExecOuts, modulesRequiredToRun, request.BlockType) { - maxDistributorLength := int(request.StopBlockNum - requestDetails.ResolvedStartBlockNum) + if canSkipBlockSource(executionPlan.ExistingExecOuts, executionPlan.RequiredModules, request.BlockType) { + maxDistributorLength := int(stopBlock - requestDetails.ResolvedStartBlockNum) clocksDistributor := make(map[uint64]*pbsubstreams.Clock) - for _, execOutput := range existingExecOuts { + for _, execOutput := range executionPlan.ExistingExecOuts { execOutput.ExtractClocks(clocksDistributor) if len(clocksDistributor) >= maxDistributorLength { break @@ -383,7 +365,7 @@ func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.P sortedClocksDistributor := sortClocksDistributor(clocksDistributor) ctx, span := reqctx.WithSpan(ctx, "substreams/tier2/pipeline/mapper_stream") for _, clock := range sortedClocksDistributor { - if clock.Number < request.StartBlockNum || clock.Number >= request.StopBlockNum { + if clock.Number < startBlock || clock.Number >= stopBlock { panic("reading from mapper, block was out of range") // we don't want to have this case undetected } cursor := irreversibleCursorFromClock(clock) @@ -395,122 +377,84 @@ func (s *Tier2Service) processRange(ctx context.Context, request *pbssinternal.P } streamErr = io.EOF span.EndWithErr(&streamErr) - } else { - blockStream, err := streamFactoryFunc( - ctx, - pipe, - int64(requestDetails.ResolvedStartBlockNum), - request.StopBlockNum, - "", - true, - false, - logger.Named("stream"), - ) - if err != nil { - return fmt.Errorf("error getting stream: %w", err) - } + return pipe.OnStreamTerminated(ctx, streamErr) + } + sf := &StreamFactory{ + mergedBlocksStore: mergedBlocksStore, + } + streamFactoryFunc := sf.New - ctx, span := reqctx.WithSpan(ctx, "substreams/tier2/pipeline/blocks_stream") - streamErr = blockStream.Run(ctx) - span.EndWithErr(&streamErr) + if s.streamFactoryFuncOverride != nil { //this is only for testing purposes. + streamFactoryFunc = s.streamFactoryFuncOverride } + blockStream, err := streamFactoryFunc( + ctx, + pipe, + int64(requestDetails.ResolvedStartBlockNum), + stopBlock, + "", + true, + false, + logger.Named("stream"), + ) + if err != nil { + return fmt.Errorf("error getting stream: %w", err) + } + + ctx, span := reqctx.WithSpan(ctx, "substreams/tier2/pipeline/blocks_stream") + streamErr = blockStream.Run(ctx) + span.EndWithErr(&streamErr) + return pipe.OnStreamTerminated(ctx, streamErr) } -// evaluateModulesRequiredToRun will also load the existing execution outputs to be used as cache -// if it returns no modules at all, it means that we can skip the whole thing -func evaluateModulesRequiredToRun( - ctx context.Context, - logger *zap.Logger, - outputGraph *outputmodules.Graph, - stage uint32, - startBlock uint64, - stopBlock uint64, - isCompleteRange bool, - outputModule string, - execoutConfigs *execout.Configs, - storeConfigs store.ConfigMap, -) (requiredModules map[string]*pbsubstreams.Module, existingExecOuts map[string]*execout.File, execoutWriters map[string]*execout.Writer, err error) { - existingExecOuts = make(map[string]*execout.File) - requiredModules = make(map[string]*pbsubstreams.Module) - execoutWriters = make(map[string]*execout.Writer) - usedModules := make(map[string]*pbsubstreams.Module) - for _, module := range outputGraph.UsedModulesUpToStage(int(stage)) { - usedModules[module.Name] = module +func (s *Tier2Service) getStores(ctx context.Context, request *pbssinternal.ProcessRangeRequest) (mergedBlocksStore, cacheStore, unmeteredCacheStore dstore.Store, err error) { + + mergedBlocksStore, err = dstore.NewDBinStore(request.MergedBlocksStore) + if err != nil { + return nil, nil, nil, fmt.Errorf("setting up block store from url %q: %w", request.MergedBlocksStore, err) } - stageUsedModules := outputGraph.StagedUsedModules()[stage] - runningLastStage := stageUsedModules.IsLastStage() - stageUsedModulesName := make(map[string]bool) - for _, layer := range stageUsedModules { - for _, mod := range layer { - stageUsedModulesName[mod.Name] = true + if cloned, ok := mergedBlocksStore.(dstore.Clonable); ok { + mergedBlocksStore, err = cloned.Clone(ctx) + if err != nil { + return nil, nil, nil, fmt.Errorf("cloning store: %w", err) } + mergedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) } - for name, c := range execoutConfigs.ConfigMap { - if _, found := usedModules[name]; !found { // skip modules that are only present in later stages - continue - } - file, readErr := c.ReadFile(ctx, &block.Range{StartBlock: startBlock, ExclusiveEndBlock: stopBlock}) - if readErr != nil { - requiredModules[name] = usedModules[name] - continue - } - existingExecOuts[name] = file + stateStore, err := dstore.NewStore(request.StateStore, "zst", "zstd", false) + if err != nil { + return nil, nil, nil, fmt.Errorf("getting store: %w", err) + } - if c.ModuleKind() == pbsubstreams.ModuleKindMap { - if runningLastStage && name == outputModule { - // WARNING be careful, if we want to force producing module outputs/stores states for ALL STAGES on the first block range, - // this optimization will be in our way.. - logger.Info("found existing exec output for output_module, skipping run", zap.String("output_module", name)) - return nil, nil, nil, nil + cacheTag := request.StateStoreDefaultTag + if auth := dauth.FromContext(ctx); auth != nil { + if ct := auth.Get("X-Sf-Substreams-Cache-Tag"); ct != "" { + if IsValidCacheTag(ct) { + cacheTag = ct + } else { + return nil, nil, nil, fmt.Errorf("invalid value for X-Sf-Substreams-Cache-Tag %s, should only contain letters, numbers, hyphens and undescores", ct) } - continue } + } - // if either full or partial kv exists, we can skip the module - storeExists, err := storeConfigs[name].ExistsFullKV(ctx, stopBlock) - if err != nil { - return nil, nil, nil, fmt.Errorf("checking fullkv file existence: %w", err) - } - if !storeExists { - partialStoreExists, err := storeConfigs[name].ExistsPartialKV(ctx, startBlock, stopBlock) - if err != nil { - return nil, nil, nil, fmt.Errorf("checking partial file existence: %w", err) - } - if !partialStoreExists { - // some stores may already exist completely on this stage, but others do not, so we keep going but ignore those - requiredModules[name] = usedModules[name] - } - } + unmeteredCacheStore, err = stateStore.SubStore(cacheTag) + if err != nil { + return nil, nil, nil, fmt.Errorf("internal error setting store: %w", err) } - for name, module := range requiredModules { - if _, exists := existingExecOuts[name]; exists { - continue // for stores that need to be run for the partials, but already have cached execution outputs - } - if !isCompleteRange && name != outputModule { - // if we are not running a complete range, we can skip writing the outputs of every module except the requested outputModule if it's in our stage - continue - } - if module.ModuleKind() == pbsubstreams.ModuleKindStore { - if _, found := stageUsedModulesName[name]; !found { - continue - } + if clonableStore, ok := unmeteredCacheStore.(dstore.Clonable); ok { + cloned, err := clonableStore.Clone(ctx) + if err != nil { + return nil, nil, nil, fmt.Errorf("cloning store: %w", err) } - - execoutWriters[name] = execout.NewWriter( - startBlock, - stopBlock, - name, - execoutConfigs, - ) + cloned.SetMeter(dmetering.GetBytesMeter(ctx)) + cacheStore = cloned } return - } func canSkipBlockSource(existingExecOuts map[string]*execout.File, requiredModules map[string]*pbsubstreams.Module, blockType string) bool { @@ -553,7 +497,7 @@ func tier2ResponseHandler(ctx context.Context, logger *zap.Logger, streamSrv pbs return connect.NewError(connect.CodeUnavailable, err) } - sendMetering(ctx, meter, userID, apiKeyID, ip, userMeta, "sf.substreams.internal.v2/ProcessRange", resp, logger) + sendMetering(ctx, meter, userID, apiKeyID, ip, userMeta, "sf.substreams.internal.v2/ProcessRange", resp) return nil } } @@ -637,3 +581,147 @@ func toGRPCError(ctx context.Context, err error) error { } return status.Error(codes.Internal, err.Error()) } + +type ExecutionPlan struct { + ExistingExecOuts map[string]*execout.File + ExecoutWriters map[string]*execout.Writer + ExistingIndices map[string]map[string]*roaring64.Bitmap + IndexWriters map[string]*index.Writer + RequiredModules map[string]*pbsubstreams.Module + StoresToWrite map[string]struct{} +} + +func GetExecutionPlan( + ctx context.Context, + logger *zap.Logger, + execGraph *exec.Graph, + stage uint32, + startBlock uint64, + stopBlock uint64, + outputModule string, + execoutConfigs *execout.Configs, + indexConfigs *index.Configs, + storeConfigs store.ConfigMap, +) (*ExecutionPlan, error) { + storesToWrite := make(map[string]struct{}) + existingExecOuts := make(map[string]*execout.File) + existingIndices := make(map[string]map[string]*roaring64.Bitmap) + requiredModules := make(map[string]*pbsubstreams.Module) + execoutWriters := make(map[string]*execout.Writer) // this affects stores and mappers, per-block data + indexWriters := make(map[string]*index.Writer) // write the full index file + // storeWriters := .... // write the snapshots + usedModules := make(map[string]*pbsubstreams.Module) + for _, module := range execGraph.UsedModulesUpToStage(int(stage)) { + usedModules[module.Name] = module + } + + stageUsedModules := execGraph.StagedUsedModules()[stage] + runningLastStage := stageUsedModules.IsLastStage() + stageUsedModulesName := make(map[string]bool) + for _, layer := range stageUsedModules { + for _, mod := range layer { + stageUsedModulesName[mod.Name] = true + } + } + for _, mod := range usedModules { + if mod.InitialBlock >= stopBlock { + continue + } + + name := mod.Name + + c := execoutConfigs.ConfigMap[name] + + moduleStartBlock := startBlock + if mod.InitialBlock > startBlock { + moduleStartBlock = mod.InitialBlock + } + + switch mod.ModuleKind() { + case pbsubstreams.ModuleKindBlockIndex: + indexFile := indexConfigs.ConfigMap[name].NewFile(&block.Range{StartBlock: moduleStartBlock, ExclusiveEndBlock: stopBlock}) + err := indexFile.Load(ctx) + if err != nil { + requiredModules[name] = usedModules[name] + indexWriters[name] = index.NewWriter(indexFile) + break + } + + existingIndices[name] = indexFile.Indices + + case pbsubstreams.ModuleKindMap: + file, readErr := c.ReadFile(ctx, &block.Range{StartBlock: moduleStartBlock, ExclusiveEndBlock: stopBlock}) + if readErr != nil { + requiredModules[name] = usedModules[name] + break + } + existingExecOuts[name] = file + + if runningLastStage && name == outputModule { + logger.Info("found existing exec output for output_module, skipping run", zap.String("output_module", name)) + return nil, nil + } + + case pbsubstreams.ModuleKindStore: + file, readErr := c.ReadFile(ctx, &block.Range{StartBlock: moduleStartBlock, ExclusiveEndBlock: stopBlock}) + if readErr != nil { + requiredModules[name] = usedModules[name] + } else { + existingExecOuts[name] = file + } + + // if either full or partial kv exists, we can skip the module + // some stores may already exist completely on this stage, but others do not, so we keep going but ignore those + storeExists, err := storeConfigs[name].ExistsFullKV(ctx, stopBlock) + if err != nil { + return nil, fmt.Errorf("checking fullkv file existence: %w", err) + } + if !storeExists { + partialStoreExists, err := storeConfigs[name].ExistsPartialKV(ctx, moduleStartBlock, stopBlock) + if err != nil { + return nil, fmt.Errorf("checking partial file existence: %w", err) + } + if !partialStoreExists { + storesToWrite[name] = struct{}{} + requiredModules[name] = usedModules[name] + } + } + + } + + } + + for name, module := range requiredModules { + if _, exists := existingExecOuts[name]; exists { + continue // for stores that need to be run for the partials, but already have cached execution outputs + } + + writerStartBlock := startBlock + if module.InitialBlock > startBlock { + writerStartBlock = module.InitialBlock + } + + var isIndexWriter bool + if module.ModuleKind() == pbsubstreams.ModuleKindBlockIndex { + isIndexWriter = true + } + + execoutWriters[name] = execout.NewWriter( + writerStartBlock, + stopBlock, + name, + execoutConfigs, + isIndexWriter, + ) + + } + + return &ExecutionPlan{ + ExistingExecOuts: existingExecOuts, + ExecoutWriters: execoutWriters, + ExistingIndices: existingIndices, + IndexWriters: indexWriters, + RequiredModules: requiredModules, + StoresToWrite: storesToWrite, + }, nil +} diff --git a/pipeline/outputmodules/validate.go b/service/validate.go similarity index 85% rename from pipeline/outputmodules/validate.go rename to service/validate.go index 61ec17714..385e18e10 100644 --- a/pipeline/outputmodules/validate.go +++ b/service/validate.go @@ -1,4 +1,4 @@ -package outputmodules +package service import ( "fmt" @@ -52,21 +52,6 @@ func validateRequest(binaries []*pbsubstreams.Binary, modules *pbsubstreams.Modu return err } - if err := checkNotImplemented(modules.Modules); err != nil { - return fmt.Errorf("checking feature not implemented: %w", err) - } - return nil -} - -func checkNotImplemented(mods []*pbsubstreams.Module) error { - for _, mod := range mods { - if mod.ModuleKind() == pbsubstreams.ModuleKindBlockIndex { - return fmt.Errorf("block index module is not implemented") - } - if mod.GetBlockFilter() != nil { - return fmt.Errorf("block filter module is not implemented") - } - } return nil } diff --git a/pipeline/outputmodules/validate_test.go b/service/validate_test.go similarity index 99% rename from pipeline/outputmodules/validate_test.go rename to service/validate_test.go index 571c94802..4ce4084e9 100644 --- a/pipeline/outputmodules/validate_test.go +++ b/service/validate_test.go @@ -1,4 +1,4 @@ -package outputmodules +package service import ( "fmt" diff --git a/sink-server/docker/clickhouse.go b/sink-server/docker/clickhouse.go index 93b105715..50c6b2366 100644 --- a/sink-server/docker/clickhouse.go +++ b/sink-server/docker/clickhouse.go @@ -7,10 +7,9 @@ import ( "time" "github.com/docker/cli/cli/compose/types" - pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" ) -func (e *DockerEngine) newClickhouse(deploymentID string, pkg *pbsubstreams.Package) (types.ServiceConfig, string, error) { +func (e *DockerEngine) newClickhouse(deploymentID string) (types.ServiceConfig, string, error) { name := fmt.Sprintf("%s-clickhouse", deploymentID) dataFolder := filepath.Join(e.dir, deploymentID, "data", "clickhouse") diff --git a/sink-server/docker/docker.go b/sink-server/docker/docker.go index 8d1353cdf..57dcd3f65 100644 --- a/sink-server/docker/docker.go +++ b/sink-server/docker/docker.go @@ -95,7 +95,7 @@ func (e *DockerEngine) CheckVersion() error { return nil } - return fmt.Errorf("Cannot determine docker compose version %q. Upgrade your Docker engine here: https://docs.docker.com/engine/install/", ver) + return fmt.Errorf("cannot determine docker compose version %q. Upgrade your Docker engine here: https://docs.docker.com/engine/install/", ver) } func (e *DockerEngine) writeDeploymentInfo(deploymentID string, usedPorts []uint32, runMeFirst []string, svcInfo map[string]string, pkg *pbsubstreams.Package) error { @@ -144,7 +144,7 @@ func (e *DockerEngine) Create(ctx context.Context, deploymentID string, pkg *pbs return nil, fmt.Errorf("this substreams-sink engine only supports a single active deployment. Stop any active sink before launching another one or use `sink-update`") } - manifest, usedPorts, serviceInfo, runMeFirst, err := e.createManifest(ctx, deploymentID, e.token, pkg) + manifest, usedPorts, serviceInfo, runMeFirst, err := e.createManifest(ctx, deploymentID, pkg) if err != nil { return nil, fmt.Errorf("creating manifest from package: %w", err) } @@ -179,7 +179,7 @@ func (e *DockerEngine) Update(ctx context.Context, deploymentID string, pkg *pbs } } - manifest, usedPorts, serviceInfo, runMeFirst, err := e.createManifest(ctx, deploymentID, e.token, pkg) + manifest, usedPorts, serviceInfo, runMeFirst, err := e.createManifest(ctx, deploymentID, pkg) if err != nil { return fmt.Errorf("creating manifest from package: %w", err) } @@ -220,8 +220,6 @@ func (e *DockerEngine) otherDeploymentIsActive(ctx context.Context, deploymentID return false } -var reasonInternalError = "internal error" - func (e *DockerEngine) Info(ctx context.Context, deploymentID string, zlog *zap.Logger) (*pbsinksvc.InfoResponse, error) { cmd := exec.Command("docker", "compose", "ps", "--format", "json") cmd.Dir = filepath.Join(e.dir, deploymentID) @@ -414,8 +412,7 @@ func (e *DockerEngine) Resume(ctx context.Context, deploymentID string, _ pbsink } } - var cmd *exec.Cmd - cmd = exec.Command("docker", "compose", "up", "-d", "--wait") + cmd := exec.Command("docker", "compose", "up", "-d", "--wait") cmd.Dir = filepath.Join(e.dir, deploymentID) out, err := cmd.CombinedOutput() if err != nil { @@ -522,9 +519,9 @@ func (e *DockerEngine) Shutdown(ctx context.Context, _ error, zlog *zap.Logger) return err } -func (e *DockerEngine) createManifest(ctx context.Context, deploymentID string, token string, pkg *pbsubstreams.Package) (content []byte, usedPorts []uint32, servicesDesc map[string]string, runMeFirst []string, err error) { +func (e *DockerEngine) createManifest(ctx context.Context, deploymentID string, pkg *pbsubstreams.Package) (content []byte, usedPorts []uint32, servicesDesc map[string]string, runMeFirst []string, err error) { if pkg.SinkConfig.TypeUrl != "sf.substreams.sink.sql.v1.Service" { - return nil, nil, nil, nil, fmt.Errorf("invalid sinkconfig type: %q. Only sf.substreams.sink.sql.v1.Service is supported for now.", pkg.SinkConfig.TypeUrl) + return nil, nil, nil, nil, fmt.Errorf("invalid sinkconfig type: %q, only sf.substreams.sink.sql.v1.Service is supported for now", pkg.SinkConfig.TypeUrl) } sinkConfig := &pbsql.Service{} if err := pkg.SinkConfig.UnmarshalTo(sinkConfig); err != nil { @@ -539,7 +536,7 @@ func (e *DockerEngine) createManifest(ctx context.Context, deploymentID string, switch sinkConfig.Engine { case pbsql.Service_clickhouse: - db, dbMotd, err := e.newClickhouse(deploymentID, pkg) + db, dbMotd, err := e.newClickhouse(deploymentID) if err != nil { return nil, nil, nil, nil, fmt.Errorf("creating clickhouse deployment: %w", err) } @@ -550,7 +547,7 @@ func (e *DockerEngine) createManifest(ctx context.Context, deploymentID string, isClickhouse = true case pbsql.Service_postgres, pbsql.Service_unset: - pg, pgMotd, err := e.newPostgres(deploymentID, pkg) + pg, pgMotd, err := e.newPostgres(deploymentID) if err != nil { return nil, nil, nil, nil, fmt.Errorf("creating postgres deployment: %w", err) } @@ -602,9 +599,7 @@ func (e *DockerEngine) createManifest(ctx context.Context, deploymentID string, if dbt != nil { servicesDesc[dbt.Name] = motd - if dbt != nil { - services = append(services, *dbt) - } + services = append(services, *dbt) } else { servicesDesc["dbt"] = motd } diff --git a/sink-server/docker/postgres.go b/sink-server/docker/postgres.go index 6bae6bf46..d250152a6 100644 --- a/sink-server/docker/postgres.go +++ b/sink-server/docker/postgres.go @@ -7,10 +7,9 @@ import ( "time" "github.com/docker/cli/cli/compose/types" - pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" ) -func (e *DockerEngine) newPostgres(deploymentID string, pkg *pbsubstreams.Package) (types.ServiceConfig, string, error) { +func (e *DockerEngine) newPostgres(deploymentID string) (types.ServiceConfig, string, error) { name := fmt.Sprintf("%s-postgres", deploymentID) localPort := uint32(5432) // TODO: assign dynamically diff --git a/sqe/bitmap.go b/sqe/bitmap.go index 669a4f9c7..88d058f48 100644 --- a/sqe/bitmap.go +++ b/sqe/bitmap.go @@ -8,7 +8,11 @@ import ( ) func RoaringBitmapsApply(expr Expression, bitmaps map[string]*roaring64.Bitmap) *roaring64.Bitmap { - return roaringQuerier{bitmaps: bitmaps}.apply(expr) + out := roaringQuerier{bitmaps: bitmaps}.apply(expr) + if out == nil { + return roaring64.New() + } + return out } type roaringRange struct { @@ -29,10 +33,12 @@ type roaringQuerier struct { } func (q roaringQuerier) apply(expr Expression) *roaring64.Bitmap { - switch v := expr.(type) { case *KeyTerm: - return q.bitmaps[v.Value.Value] + if out, ok := q.bitmaps[v.Value.Value]; ok { + return out + } + return roaring64.New() case *AndExpression, *OrExpression: children := v.(HasChildrenExpression).GetChildren() diff --git a/sqe/bitmap_test.go b/sqe/bitmap_test.go index ca91e3c99..2d655479a 100644 --- a/sqe/bitmap_test.go +++ b/sqe/bitmap_test.go @@ -42,14 +42,6 @@ func TestApplyRoaringBitmap(t *testing.T) { expr: "(alice || bob) (delegate || mint)", result: []uint64{4, 5}, }, - { - expr: "-bob", - result: []uint64{4, 5}, - }, - { - expr: "(alice || john) -(delegate || mint)", - result: []uint64{1, 3}, - }, } // Run test cases diff --git a/sqe/keys.go b/sqe/keys.go new file mode 100644 index 000000000..04e4c5fe3 --- /dev/null +++ b/sqe/keys.go @@ -0,0 +1,82 @@ +package sqe + +import ( + "fmt" + + pbindex "github.com/streamingfast/substreams/pb/sf/substreams/index/v1" +) + +type KeysQuerier struct { + blockKeys map[string]struct{} +} + +func NewFromIndexKeys(indexKeys *pbindex.Keys) KeysQuerier { + blockKeys := make(map[string]struct{}, len(indexKeys.Keys)) + for _, key := range indexKeys.Keys { + blockKeys[key] = struct{}{} + } + + return KeysQuerier{blockKeys: blockKeys} +} +func KeysApply(expr Expression, blockKeys KeysQuerier) bool { + return blockKeys.apply(expr) +} + +func (k KeysQuerier) apply(expr Expression) bool { + switch v := expr.(type) { + case *KeyTerm: + if k.blockKeys == nil { + return false + } + + _, ok := k.blockKeys[v.Value.Value] + return ok + + case *AndExpression, *OrExpression: + children := v.(HasChildrenExpression).GetChildren() + if len(children) == 0 { + panic(fmt.Errorf("%T expression with no children. this make no sense something is wrong in the parser", v)) + } + + firstChild := children[0] + if len(children) == 1 { + return k.apply(firstChild) + } + + result := k.apply(firstChild) + + var op func(bool) + switch v.(type) { + case *AndExpression: + op = func(x bool) { + result = result && x + } + + case *OrExpression: + op = func(x bool) { + result = result || x + } + default: + panic(fmt.Errorf("has children expression of type %T is not handled correctly", v)) + } + + for _, child := range children[1:] { + op(k.apply(child)) + } + + return result + + case *ParenthesisExpression: + return k.apply(v.Child) + + case *NotExpression: + if k.blockKeys == nil { + return false + } + + return !k.apply(v.Child) + + default: + panic(fmt.Errorf("element of type %T is not handled correctly", v)) + } +} diff --git a/sqe/keys_test.go b/sqe/keys_test.go new file mode 100644 index 000000000..f0af2ea5e --- /dev/null +++ b/sqe/keys_test.go @@ -0,0 +1,70 @@ +package sqe + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestApplyKeys(t *testing.T) { + kv := map[string]struct{}{ + "bob": {}, + "alice": {}, + "etienne": {}, + "charlie": {}, + "delegate": {}, + "mint": {}, + } + + blockKeys := KeysQuerier{blockKeys: kv} + + // Matrix-based test cases + testCases := []struct { + name string + expr string + result bool + }{ + { + name: "Or", + expr: "bob || alice", + result: true, + }, + { + name: "And", + expr: "bob transfer", + result: false, + }, + { + name: "And(Or key)", + expr: "(alice || bob) transfer", + result: false, + }, + { + name: "And(Or Or)", + expr: "(alice || bob) (delegate || mint)", + result: true, + }, + + { + name: "2 And", + expr: "alice john mint", + result: false, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + parser, err := NewParser(strings.NewReader(tc.expr)) + require.NoError(t, err) + + expr, err := parser.Parse(context.Background()) + require.NoError(t, err) + + assert.Equal(t, tc.result, blockKeys.apply(expr)) + }) + } +} diff --git a/sqe/lexer_test.go b/sqe/lexer_test.go index 00406eb8c..0ab00972d 100644 --- a/sqe/lexer_test.go +++ b/sqe/lexer_test.go @@ -2,7 +2,6 @@ package sqe import ( "bytes" - "fmt" "testing" lex "github.com/alecthomas/participle/lexer" @@ -51,7 +50,7 @@ func tokensList(t *testing.T, input string) (out []string) { require.NoError(t, err) for _, token := range tokens { - out = append(out, fmt.Sprintf("%s", lexer.getTokenType(token))) + out = append(out, lexer.getTokenType(token)) } return diff --git a/sqe/parser.go b/sqe/parser.go index d90856420..d627170bf 100644 --- a/sqe/parser.go +++ b/sqe/parser.go @@ -175,7 +175,7 @@ func (p *Parser) parseUnaryExpression(depth int) (Expression, error) { case p.l.isLeftParenthesis(token): return p.parseParenthesisExpression(depth) case p.l.isNotOperator(token): - return p.parseNotExpression(depth) + return nil, fmt.Errorf("NOT operator (-) is not supported in the block filter") default: return nil, parserError(fmt.Sprintf("expected a key term, minus sign or left parenthesis, got %s", p.l.getTokenType(token)), token.Pos) } @@ -209,18 +209,6 @@ func (p *Parser) parseParenthesisExpression(depth int) (Expression, error) { return &ParenthesisExpression{child}, nil } -func (p *Parser) parseNotExpression(depth int) (Expression, error) { - // Consume minus sign - p.l.mustLexNext() - - child, err := p.parseUnaryExpression(depth) - if err != nil { - return nil, fmt.Errorf("invalid expression after minus sign: %w", err) - } - - return &NotExpression{child}, nil -} - func (p *Parser) parseKeyTerm() (Expression, error) { token := p.l.mustLexNext() diff --git a/sqe/parser_test.go b/sqe/parser_test.go index 7e218ae5a..06e6c7b59 100644 --- a/sqe/parser_test.go +++ b/sqe/parser_test.go @@ -69,12 +69,6 @@ func TestParser(t *testing.T) { `" test || value AND other ( 10 )!"`, nil, }, - { - "double_quoted_string_with_minus_sign", - `"eosio.token-open"`, - `"eosio.token-open"`, - nil, - }, { "single_quoted_string", @@ -163,92 +157,6 @@ func TestParser(t *testing.T) { `([one || two])`, nil, }, - { - "top_level_parenthesis_with_both_not", - ` ( -one || -two ) `, - `([!one || !two])`, - nil, - }, - - { - "top_level_not_term", - `- one`, - `!one`, - nil, - }, - { - "top_level_not_parenthesis", - `- ( one)`, - `!(one)`, - nil, - }, - { - "top_level_not_parenthesis_or", - `- ( one || two)`, - `!([one || two])`, - nil, - }, - - { - "top_level_implicit_and_with_left_not", - ` - two one`, - ``, - nil, - }, - { - "top_level_implicit_and_with_right_not", - `two -one`, - ``, - nil, - }, - { - "top_level_implicit_and_both_not", - `-two -one`, - ``, - nil, - }, - { - "top_level_and_with_left_not", - ` - two && one`, - ``, - nil, - }, - { - "top_level_and_with_right_not", - `two && -one`, - ``, - nil, - }, - { - "top_level_and_both_not", - `-two && -one`, - ``, - nil, - }, - { - "top_level_or_with_left_not", - ` - two || one`, - `[!two || one]`, - nil, - }, - { - "top_level_or_with_right_not", - `two || -one`, - `[two || !one]`, - nil, - }, - { - "top_level_or_with_both_not", - `-two || -one`, - `[!two || !one]`, - nil, - }, - { - "top_level_legacy_or_with_both_not", - `-two || -one`, - `[!two || !one]`, - nil, - }, { "top_level_multi_and", @@ -287,18 +195,7 @@ func TestParser(t *testing.T) { `[ || d]`, nil, }, - { - "precedence_not_and_or", - `-a b || c`, - `[ || c]`, - nil, - }, - { - "precedence_parenthesis_not_and_or", - `-a (b || c)`, - ``, - nil, - }, + { "precedence_parenthesis_and_or_and", `a (b || c) d`, @@ -344,16 +241,6 @@ func TestParser(t *testing.T) { nil, }, - { - "error_missing_expresssion_after_not", - `a - `, - "", - fmt.Errorf("missing expression after implicit 'and' clause: %w", - fmt.Errorf("invalid expression after minus sign: %w", - &ParseError{"expected a key term, minus sign or left parenthesis, got end of input", pos(1, 4, 5)}, - ), - ), - }, { "error_missing_expression_after_and", `a && `, diff --git a/sqe/traversal.go b/sqe/traversal.go index 61113df99..a6f7c4a23 100644 --- a/sqe/traversal.go +++ b/sqe/traversal.go @@ -95,7 +95,7 @@ func (v *DepthFirstVisitor) executeCallback(ctx context.Context, e Expression, c return false, nil } - if v.stopped == true { + if v.stopped { return true, nil } diff --git a/sqe/types.go b/sqe/types.go index 396e59bc1..41841dbc9 100644 --- a/sqe/types.go +++ b/sqe/types.go @@ -3,7 +3,6 @@ package sqe import ( "context" "fmt" - "strings" ) type Visitor interface { @@ -95,21 +94,6 @@ type StringLiteral struct { QuotingChar string } -const restrictedLiteralChars = `'":,-()[] ` + "\n" + "\t" - -func stringLiteral(in string) *StringLiteral { - stringLiteral := &StringLiteral{Value: in} - if strings.ContainsAny(in, restrictedLiteralChars) { - stringLiteral.QuotingChar = "\"" - } - - return stringLiteral -} - -func (e *StringLiteral) isValue() bool { - return true -} - func (e *StringLiteral) Literal() string { return e.Value } diff --git a/storage/execout/buffer.go b/storage/execout/buffer.go index 986c94fe7..a8e3db86b 100644 --- a/storage/execout/buffer.go +++ b/storage/execout/buffer.go @@ -13,13 +13,22 @@ import ( // Buffer holds the values produced by modules and exchanged between them // as a sort of buffer. +// Here are the types of exec outputs per module type: +// +// values valuesForFileOutput +// --------------------------------------------------- +// store: deltas kvops +// mapper: data same data +// index: keys -- type Buffer struct { - values map[string][]byte - clock *pbsubstreams.Clock + values map[string][]byte + valuesForFileOutput map[string][]byte + + clock *pbsubstreams.Clock } -func (b *Buffer) Len() (out int) { - for _, v := range b.values { +func (i *Buffer) Len() (out int) { + for _, v := range i.values { out += len(v) } @@ -40,8 +49,9 @@ func NewBuffer(blockType string, block *pbbstream.Block, clock *pbsubstreams.Clo } return &Buffer{ - clock: clock, - values: values, + clock: clock, + values: values, + valuesForFileOutput: make(map[string][]byte), }, nil } @@ -52,7 +62,7 @@ func (i *Buffer) Clock() *pbsubstreams.Clock { func (i *Buffer) Get(moduleName string) (value []byte, cached bool, err error) { val, found := i.values[moduleName] if !found { - return nil, false, NotFound + return nil, false, ErrNotFound } return val, true, nil } @@ -61,3 +71,9 @@ func (i *Buffer) Set(moduleName string, value []byte) (err error) { i.values[moduleName] = value return nil } + +func (i *Buffer) SetFileOutput(moduleName string, value []byte) (err error) { + i.valuesForFileOutput[moduleName] = value + + return nil +} diff --git a/storage/execout/config.go b/storage/execout/config.go index 9d8b3cea9..253ba28ed 100644 --- a/storage/execout/config.go +++ b/storage/execout/config.go @@ -26,7 +26,12 @@ type Config struct { } func NewConfig(name string, moduleInitialBlock uint64, modKind pbsubstreams.ModuleKind, moduleHash string, baseStore dstore.Store, logger *zap.Logger) (*Config, error) { - subStore, err := baseStore.SubStore(fmt.Sprintf("%s/outputs", moduleHash)) + subName := fmt.Sprintf("%s/outputs", moduleHash) + if modKind == pbsubstreams.ModuleKindBlockIndex { + subName = fmt.Sprintf("%s/index", moduleHash) + } + + subStore, err := baseStore.SubStore(subName) if err != nil { return nil, fmt.Errorf("creating sub store: %w", err) } @@ -43,7 +48,7 @@ func NewConfig(name string, moduleInitialBlock uint64, modKind pbsubstreams.Modu func (c *Config) NewFile(targetRange *block.Range) *File { return &File{ - kv: make(map[string]*pboutput.Item), + Kv: make(map[string]*pboutput.Item), ModuleName: c.name, store: c.objStore, Range: targetRange, @@ -61,7 +66,16 @@ func (c *Config) ListSnapshotFiles(ctx context.Context, inRange *bstream.Range) files = nil return c.objStore.WalkFrom(ctx, "", computeDBinFilename(inRange.StartBlock(), 0), func(filename string) (err error) { - fileInfo, err := parseFileName(filename) + var fileInfo *FileInfo + + switch c.modKind { + case pbsubstreams.ModuleKindBlockIndex: + fileInfo, err = parseIndexFileName(filename) + case pbsubstreams.ModuleKindMap: + fileInfo, err = parseExecoutFileName(filename) + default: + return fmt.Errorf("wrong module kind: %v", c.modKind) + } if err != nil { c.logger.Warn("seen exec output file that we don't know how to parse", zap.String("filename", filename), zap.Error(err)) return nil @@ -82,7 +96,6 @@ func (c *Config) ListSnapshotFiles(ctx context.Context, inRange *bstream.Range) } func (c *Config) ReadFile(ctx context.Context, inrange *block.Range) (*File, error) { - file := c.NewFile(inrange) if err := file.Load(ctx); err != nil { return nil, err diff --git a/storage/execout/file.go b/storage/execout/file.go index f9abc185f..1c5f559ea 100644 --- a/storage/execout/file.go +++ b/storage/execout/file.go @@ -5,10 +5,8 @@ import ( "context" "fmt" "io" - "math" "sort" "strconv" - "strings" "sync" pboutput "github.com/streamingfast/substreams/storage/execout/pb" @@ -30,9 +28,11 @@ type File struct { *block.Range ModuleName string - kv map[string]*pboutput.Item + Kv map[string]*pboutput.Item store dstore.Store logger *zap.Logger + loaded bool + loadedSize uint64 } func (c *File) Filename() string { @@ -42,7 +42,7 @@ func (c *File) Filename() string { func (c *File) SortedItems() (out []*pboutput.Item) { // TODO(abourget): eventually, what is saved should be sorted before saving, // or we import a list and Load() automatically sorts what needs to be sorted. - for _, item := range c.kv { + for _, item := range c.Kv { out = append(out, item) } sort.Slice(out, func(i, j int) bool { @@ -52,7 +52,7 @@ func (c *File) SortedItems() (out []*pboutput.Item) { } func (c *File) ExtractClocks(clocksMap map[uint64]*pbsubstreams.Clock) { - for _, item := range c.kv { + for _, item := range c.Kv { if _, found := clocksMap[item.BlockNum]; !found { clocksMap[item.BlockNum] = &pbsubstreams.Clock{ Number: item.BlockNum, @@ -61,7 +61,6 @@ func (c *File) ExtractClocks(clocksMap map[uint64]*pbsubstreams.Clock) { } } } - return } func (c *File) SetItem(clock *pbsubstreams.Clock, data []byte) { @@ -80,14 +79,14 @@ func (c *File) SetItem(clock *pbsubstreams.Clock, data []byte) { Payload: cp, } - c.kv[clock.Id] = ci + c.Kv[clock.Id] = ci } func (c *File) Get(clock *pbsubstreams.Clock) ([]byte, bool) { c.Lock() defer c.Unlock() - cacheItem, found := c.kv[clock.Id] + cacheItem, found := c.Kv[clock.Id] if !found { return nil, false @@ -100,7 +99,7 @@ func (c *File) GetAtBlock(blockNumber uint64) ([]byte, bool) { c.Lock() defer c.Unlock() - for _, value := range c.kv { + for _, value := range c.Kv { if value.BlockNum == blockNumber { return value.Payload, true } @@ -110,10 +109,16 @@ func (c *File) GetAtBlock(blockNumber uint64) ([]byte, bool) { } func (c *File) Load(ctx context.Context) error { + c.Lock() + defer c.Unlock() + if c.loaded { + return nil + } + filename := computeDBinFilename(c.Range.StartBlock, c.Range.ExclusiveEndBlock) c.logger.Debug("loading execout file", zap.String("file_name", filename), zap.Object("block_range", c.Range)) - return derr.RetryContext(ctx, 5, func(ctx context.Context) error { + err := derr.RetryContext(ctx, 5, func(ctx context.Context) error { objectReader, err := c.store.OpenObject(ctx, filename) if err == dstore.ErrNotFound { return derr.NewFatalError(err) @@ -128,23 +133,27 @@ func (c *File) Load(ctx context.Context) error { if err != nil { return fmt.Errorf("reading store file %s: %w", filename, err) } + c.loadedSize = uint64(len(bytes)) outputData := &pboutput.Map{} if err = outputData.UnmarshalFast(bytes); err != nil { return fmt.Errorf("unmarshalling file %s: %w", filename, err) } - c.kv = outputData.Kv + c.Kv = outputData.Kv - c.logger.Debug("outputs data loaded", zap.Int("output_count", len(c.kv)), zap.Stringer("block_range", c.Range)) + c.logger.Debug("outputs data loaded", zap.Int("output_count", len(c.Kv)), zap.Stringer("block_range", c.Range)) return nil }) + if err == nil { + c.loaded = true + } + return err } func (c *File) Save(ctx context.Context) error { - filename := c.Filename() - outputData := &pboutput.Map{Kv: c.kv} + outputData := &pboutput.Map{Kv: c.Kv} cnt, err := outputData.MarshalFast() if err != nil { return fmt.Errorf("unmarshalling file %s: %w", filename, err) @@ -169,88 +178,14 @@ func (c *File) MarshalLogObject(enc zapcore.ObjectEncoder) error { enc.AddString("module", c.ModuleName) enc.AddUint64("start_block", c.Range.StartBlock) enc.AddUint64("end_block", c.Range.ExclusiveEndBlock) - enc.AddInt("kv_count", len(c.kv)) + enc.AddInt("kv_count", len(c.Kv)) return nil } -// -//func listContinuousCacheRanges(cachedRanges block.Ranges, from uint64) block.Ranges { -// cachedRangeCount := len(cachedRanges) -// var out block.Ranges -// for i, r := range cachedRanges { -// if r.StartBlock < from { -// continue -// } -// out = append(out, r) -// if cachedRangeCount > i+1 { -// next := cachedRanges[i+1] -// if next.StartBlock != r.ExclusiveEndBlock { //continuous seq broken -// break -// } -// } -// } -// -// return out -//} - -func findBlockRange(ctx context.Context, store dstore.Store, prefixStartBlock uint64) (*block.Range, bool, error) { - var exclusiveEndBlock uint64 - - paddedBlock := pad(prefixStartBlock) - - var files []string - err := derr.RetryContext(ctx, 3, func(ctx context.Context) (err error) { - files, err = store.ListFiles(ctx, paddedBlock, math.MaxInt64) - return - }) - if err != nil { - return nil, false, fmt.Errorf("walking prefix for padded block %s: %w", paddedBlock, err) - } - - if len(files) == 0 { - return nil, false, nil - } - - biggestEndBlock := uint64(0) - - for _, file := range files { - endBlock, err := getExclusiveEndBlock(file) - if err != nil { - return nil, false, fmt.Errorf("getting exclusive end block from file %s: %w", file, err) - } - if endBlock > biggestEndBlock { - biggestEndBlock = endBlock - } - } - - exclusiveEndBlock = biggestEndBlock - - return block.NewRange(prefixStartBlock, exclusiveEndBlock), true, nil -} - func computeDBinFilename(startBlock, stopBlock uint64) string { return fmt.Sprintf("%010d-%010d.output", startBlock, stopBlock) } -func pad(blockNumber uint64) string { - return fmt.Sprintf("%010d", blockNumber) -} - -func ComputeStartBlock(startBlock uint64, saveBlockInterval uint64) uint64 { - return startBlock - startBlock%saveBlockInterval -} - -func getExclusiveEndBlock(filename string) (uint64, error) { - endBlock := strings.Split(strings.Split(filename, "-")[1], ".")[0] - parsedInt, err := strconv.ParseInt(strings.TrimLeft(endBlock, "0"), 10, 64) - - if err != nil { - return 0, fmt.Errorf("parsing int %d: %w", parsedInt, err) - } - - return uint64(parsedInt), nil -} - func mustAtoi(s string) int { i, err := strconv.Atoi(s) if err != nil { diff --git a/storage/execout/file_test.go b/storage/execout/file_test.go index cd5ef2224..953ce352c 100644 --- a/storage/execout/file_test.go +++ b/storage/execout/file_test.go @@ -12,15 +12,15 @@ import ( func TestExtractClocks(t *testing.T) { cases := []struct { name string - file File + file *File clocksDistributor map[uint64]*pbsubstreams.Clock expectedResult map[uint64]*pbsubstreams.Clock }{ { name: "sunny path", - file: File{ + file: &File{ ModuleName: "sunny_path", - kv: map[string]*pboutput.Item{"id1": {BlockNum: 1, BlockId: "1"}, "id2": {BlockNum: 2, BlockId: "3"}}, + Kv: map[string]*pboutput.Item{"id1": {BlockNum: 1, BlockId: "1"}, "id2": {BlockNum: 2, BlockId: "3"}}, }, clocksDistributor: map[uint64]*pbsubstreams.Clock{}, expectedResult: map[uint64]*pbsubstreams.Clock{1: {Number: 1, Id: "1"}, 2: {Number: 2, Id: "3"}}, diff --git a/storage/execout/filename.go b/storage/execout/filename.go index 6551a7c14..b0b880795 100644 --- a/storage/execout/filename.go +++ b/storage/execout/filename.go @@ -8,9 +8,11 @@ import ( ) var cacheFilenameRegex *regexp.Regexp +var indexFilenameRegex *regexp.Regexp func init() { cacheFilenameRegex = regexp.MustCompile(`([\d]+)-([\d]+)\.output`) + indexFilenameRegex = regexp.MustCompile(`([\d]+)-([\d]+)\.index`) } type FileInfos = []*FileInfo @@ -20,8 +22,8 @@ type FileInfo struct { BlockRange *block.Range } -func parseFileName(filename string) (*FileInfo, error) { - blockRange, err := fileNameToRange(filename) +func parseExecoutFileName(filename string) (*FileInfo, error) { + blockRange, err := fileNameToRange(filename, cacheFilenameRegex) if err != nil { return nil, fmt.Errorf("parsing filename %q: %w", filename, err) } @@ -30,8 +32,20 @@ func parseFileName(filename string) (*FileInfo, error) { BlockRange: blockRange, }, nil } -func fileNameToRange(filename string) (*block.Range, error) { - res := cacheFilenameRegex.FindAllStringSubmatch(filename, 1) + +func parseIndexFileName(filename string) (*FileInfo, error) { + blockRange, err := fileNameToRange(filename, indexFilenameRegex) + if err != nil { + return nil, fmt.Errorf("parsing filename %q: %w", filename, err) + } + return &FileInfo{ + Filename: filename, + BlockRange: blockRange, + }, nil +} + +func fileNameToRange(filename string, regex *regexp.Regexp) (*block.Range, error) { + res := regex.FindAllStringSubmatch(filename, 1) if len(res) != 1 { return nil, fmt.Errorf("invalid output cache filename, %q", filename) } diff --git a/storage/execout/filewalk.go b/storage/execout/filewalk.go index c414213e9..22780eca0 100644 --- a/storage/execout/filewalk.go +++ b/storage/execout/filewalk.go @@ -1,19 +1,32 @@ package execout -import "github.com/streamingfast/substreams/block" +import ( + "context" + "sync" + "sync/atomic" + + "github.com/streamingfast/substreams/block" +) // FileWalker allows you to jump from file to file, from segment to segment type FileWalker struct { config *Config segmenter *block.Segmenter segment int + + IsLocal bool + buffer map[int]*File + bufferLock sync.Mutex + previousFileSize atomic.Uint64 } func (c *Config) NewFileWalker(segmenter *block.Segmenter) *FileWalker { return &FileWalker{ config: c, + IsLocal: c.objStore.BaseURL().Scheme == "file", segmenter: segmenter, segment: segmenter.FirstIndex(), + buffer: make(map[int]*File), } } @@ -24,9 +37,50 @@ func (fw *FileWalker) File() *File { if rng == nil { return nil } + + fw.bufferLock.Lock() + defer fw.bufferLock.Unlock() + if file, found := fw.buffer[fw.segment]; found { + delete(fw.buffer, fw.segment) + return file + } + return fw.config.NewFile(rng) } +// PreloadNext loads the next file in the background so the consumer doesn't wait between each file. +// This affects maximum throughput +func (fw *FileWalker) PreloadNext(ctx context.Context) { + fw.bufferLock.Lock() + defer fw.bufferLock.Unlock() + fw.preload(ctx, fw.segment+1) + // we can preload two next files if they are small enough. + // More than 2 shows no performance improvement and gobbles up memory. + if fw.segment != fw.segmenter.FirstIndex() && fw.previousFileSize.Load() < 104_857_600 { + fw.preload(ctx, fw.segment+2) + } + +} + +func (fw *FileWalker) preload(ctx context.Context, seg int) { + if _, found := fw.buffer[seg]; found { + return + } + rng := fw.segmenter.Range(seg) + if rng == nil { + return + } + + f := fw.config.NewFile(rng) + go func() { + if err := f.Load(ctx); err == nil { + // purposefully ignoring preload errors + fw.previousFileSize.Store(f.loadedSize) + } + }() + fw.buffer[seg] = f +} + // Move to the next func (fw *FileWalker) Next() { fw.segment++ diff --git a/storage/execout/init_test.go b/storage/execout/init_test.go deleted file mode 100644 index 1e29149d9..000000000 --- a/storage/execout/init_test.go +++ /dev/null @@ -1,5 +0,0 @@ -package execout - -import "github.com/streamingfast/logging" - -var zlog, _ = logging.PackageLogger("test", "github.com/streamingfast/substreams/pipeline/execout/cachev1") diff --git a/storage/execout/interface.go b/storage/execout/interface.go index 329c94c19..2954a7bdd 100644 --- a/storage/execout/interface.go +++ b/storage/execout/interface.go @@ -14,6 +14,7 @@ type ExecutionOutputGetter interface { type ExecutionOutputSetter interface { Set(name string, value []byte) (err error) + SetFileOutput(name string, value []byte) (err error) } // ExecutionOutput gets/sets execution output for a given graph at a given block @@ -22,4 +23,4 @@ type ExecutionOutput interface { ExecutionOutputSetter } -var NotFound = errors.New("inputs module value not found") +var ErrNotFound = errors.New("inputs module value not found") diff --git a/storage/execout/pb/noalloc_version.go b/storage/execout/pb/noalloc_version.go index c7e1485f1..2e3e0eadb 100644 --- a/storage/execout/pb/noalloc_version.go +++ b/storage/execout/pb/noalloc_version.go @@ -2,11 +2,11 @@ package pboutputcache import ( fmt "fmt" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" io "io" - reflect "reflect" "unsafe" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // Get the string from a '[]byte' without any allocation @@ -129,15 +129,6 @@ func (m *Array) UnmarshalVTNoAlloc(dAtA []byte) error { return nil } -// Get the bytes of a `string` variable without doing any allocation, useful for writing to storage -// with high efficiency. This method exists because `[]byte(stringVar)` does an allocation, by using -// this method, you avoid this allocation. -// -// See https://stackoverflow.com/q/59209493/697930 for full discussion -func unsafeGetBytes(s string) []byte { - return unsafe.Slice((*byte)(unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&s)).Data)), len(s)) -} - func (m *Item) UnmarshalVTNoAlloc(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -335,6 +326,7 @@ func (m *Item) UnmarshalVTNoAlloc(dAtA []byte) error { //m.Cursor = string(dAtA[iNdEx:postIndex]) m.Cursor = unsafeGetString(dAtA[iNdEx:postIndex]) iNdEx = postIndex + default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/storage/execout/pb/output.pb.go b/storage/execout/pb/output.pb.go index 7f3deb0e7..5f0b944c3 100644 --- a/storage/execout/pb/output.pb.go +++ b/storage/execout/pb/output.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v4.25.2 // source: output.proto package pboutputcache @@ -229,12 +229,11 @@ var file_output_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x42, - 0x4f, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, + 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, 0x62, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x2f, - 0x65, 0x78, 0x65, 0x63, 0x6f, 0x75, 0x74, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x76, 0x31, 0x2f, - 0x70, 0x62, 0x3b, 0x70, 0x62, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x63, 0x61, 0x63, 0x68, 0x65, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x65, + 0x78, 0x65, 0x63, 0x6f, 0x75, 0x74, 0x2f, 0x70, 0x62, 0x3b, 0x70, 0x62, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x63, 0x61, 0x63, 0x68, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/storage/execout/writer.go b/storage/execout/writer.go index 4c5a7fae3..3e39b1f75 100644 --- a/storage/execout/writer.go +++ b/storage/execout/writer.go @@ -14,31 +14,38 @@ import ( type Writer struct { wg *sync.WaitGroup - currentFile *File - outputModule string + CurrentFile *File + outputModule string + isWriterForIndex bool } -func NewWriter(initialBlockBoundary, exclusiveEndBlock uint64, outputModule string, configs *Configs) *Writer { +func NewWriter(initialBlockBoundary, exclusiveEndBlock uint64, outputModule string, configs *Configs, isWriterForIndex bool) *Writer { w := &Writer{ - wg: &sync.WaitGroup{}, - outputModule: outputModule, + wg: &sync.WaitGroup{}, + outputModule: outputModule, + isWriterForIndex: isWriterForIndex, } segmenter := block.NewSegmenter(configs.execOutputSaveInterval, initialBlockBoundary, exclusiveEndBlock) walker := configs.NewFileWalker(outputModule, segmenter) - w.currentFile = walker.File() + w.CurrentFile = walker.File() return w } func (w *Writer) Write(clock *pbsubstreams.Clock, buffer *Buffer) { - if val, found := buffer.values[w.outputModule]; found { - w.currentFile.SetItem(clock, val) + if val, found := buffer.valuesForFileOutput[w.outputModule]; found { + w.CurrentFile.SetItem(clock, val) } } func (w *Writer) Close(ctx context.Context) error { - if err := w.currentFile.Save(ctx); err != nil { + // Skip outputs file saving for blockIndex module + if w.isWriterForIndex { + return nil + } + + if err := w.CurrentFile.Save(ctx); err != nil { return fmt.Errorf("flushing exec output writer: %w", err) } return nil diff --git a/storage/execout/writer_test.go b/storage/execout/writer_test.go index fd50968c0..87de94cf7 100644 --- a/storage/execout/writer_test.go +++ b/storage/execout/writer_test.go @@ -3,6 +3,7 @@ package execout import ( "testing" + "github.com/streamingfast/dstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -12,18 +13,21 @@ var testConfigs = &Configs{ ConfigMap: map[string]*Config{ "A": { moduleInitialBlock: 5, + objStore: dstore.NewMockStore(nil), }, "B": { moduleInitialBlock: 10, + objStore: dstore.NewMockStore(nil), }, "C": { moduleInitialBlock: 15, + objStore: dstore.NewMockStore(nil), }, }, } func TestNewExecOutputWriterIsSubRequest(t *testing.T) { - res := NewWriter(11, 15, "A", testConfigs) + res := NewWriter(11, 15, "A", testConfigs, false) require.NotNil(t, res) - assert.Equal(t, 15, int(res.currentFile.ExclusiveEndBlock)) + assert.Equal(t, 15, int(res.CurrentFile.ExclusiveEndBlock)) } diff --git a/storage/index/config.go b/storage/index/config.go new file mode 100644 index 000000000..d21f66fdf --- /dev/null +++ b/storage/index/config.go @@ -0,0 +1,43 @@ +package index + +import ( + "fmt" + + "github.com/streamingfast/substreams/block" + + "github.com/streamingfast/dstore" + "go.uber.org/zap" +) + +type Config struct { + name string + moduleHash string + objStore dstore.Store + moduleInitialBlock uint64 + logger *zap.Logger +} + +func NewConfig(name string, moduleInitialBlock uint64, moduleHash string, baseStore dstore.Store, logger *zap.Logger) (*Config, error) { + subStore, err := baseStore.SubStore(fmt.Sprintf("%s/index", moduleHash)) + if err != nil { + return nil, fmt.Errorf("creating sub store: %w", err) + } + + return &Config{ + name: name, + objStore: subStore, + moduleInitialBlock: moduleInitialBlock, + moduleHash: moduleHash, + logger: logger.With(zap.String("module", name)), + }, nil +} + +func (c *Config) NewFile(targetRange *block.Range) *File { + return &File{ + moduleInitialBlock: c.moduleInitialBlock, + store: c.objStore, + moduleName: c.name, + logger: c.logger, + blockRange: targetRange, + } +} diff --git a/storage/index/configs.go b/storage/index/configs.go new file mode 100644 index 000000000..9cc0d7563 --- /dev/null +++ b/storage/index/configs.go @@ -0,0 +1,37 @@ +package index + +import ( + "fmt" + + "github.com/streamingfast/dstore" + "github.com/streamingfast/substreams/manifest" + pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" + "go.uber.org/zap" +) + +type Configs struct { + ConfigMap map[string]*Config + logger *zap.Logger +} + +func NewConfigs(baseObjectStore dstore.Store, allRequestedModules []*pbsubstreams.Module, moduleHashes *manifest.ModuleHashes, logger *zap.Logger) (*Configs, error) { + out := make(map[string]*Config) + for _, mod := range allRequestedModules { + conf, err := NewConfig( + mod.Name, + mod.InitialBlock, + moduleHashes.Get(mod.Name), + baseObjectStore, + logger, + ) + if err != nil { + return nil, fmt.Errorf("new index config for %q: %w", mod.Name, err) + } + out[mod.Name] = conf + } + + return &Configs{ + ConfigMap: out, + logger: logger, + }, nil +} diff --git a/storage/index/file.go b/storage/index/file.go new file mode 100644 index 000000000..323de475b --- /dev/null +++ b/storage/index/file.go @@ -0,0 +1,121 @@ +package index + +import ( + "bytes" + "context" + "fmt" + "io" + + "google.golang.org/protobuf/proto" + + pbindexes "github.com/streamingfast/substreams/storage/index/pb" + + "github.com/RoaringBitmap/roaring/roaring64" + "github.com/streamingfast/derr" + "github.com/streamingfast/dstore" + "github.com/streamingfast/substreams/block" + "go.uber.org/zap" +) + +type File struct { + blockRange *block.Range + store dstore.Store + moduleName string + moduleInitialBlock uint64 + Indices map[string]*roaring64.Bitmap + logger *zap.Logger +} + +func NewFile(baseStore dstore.Store, moduleHash string, moduleName string, logger *zap.Logger, blockRange *block.Range) (*File, error) { + subStore, err := baseStore.SubStore(fmt.Sprintf("%s/index", moduleHash)) + if err != nil { + return nil, fmt.Errorf("creating sub store: %w", err) + } + return &File{ + blockRange: blockRange, + store: subStore, + moduleName: moduleName, + logger: logger, + }, nil +} + +func (f *File) Set(indices map[string]*roaring64.Bitmap) { + f.Indices = indices +} + +func ConvertIndexesMapToBytes(indices map[string]*roaring64.Bitmap) (map[string][]byte, error) { + out := make(map[string][]byte, len(indices)) + for key, value := range indices { + valueToBytes, err := value.ToBytes() + if err != nil { + return nil, fmt.Errorf("converting bitmap to bytes: %w", err) + } + out[key] = valueToBytes + } + return out, nil +} + +func (f *File) Save(ctx context.Context) error { + filename := f.Filename() + convertedIndexes, err := ConvertIndexesMapToBytes(f.Indices) + if err != nil { + return fmt.Errorf("converting Indices to bytes: %w", err) + } + pbIndexesMap := pbindexes.Map{Indexes: convertedIndexes} + cnt, err := proto.Marshal(&pbIndexesMap) + if err != nil { + return fmt.Errorf("marshalling Indices: %w", err) + } + + f.logger.Info("writing Indices file", zap.String("filename", filename)) + return derr.RetryContext(ctx, 5, func(ctx context.Context) error { + reader := bytes.NewReader(cnt) + err := f.store.WriteObject(ctx, filename, reader) + return err + }) +} + +func (f *File) Load(ctx context.Context) error { + pbIndexesMap := pbindexes.Map{} + + filename := f.Filename() + file, err := f.store.OpenObject(ctx, filename) + if err != nil { + return err + } + content, err := io.ReadAll(file) + if err != nil { + return err + } + + err = proto.Unmarshal(content, &pbIndexesMap) + if err != nil { + return err + } + + f.Indices = make(map[string]*roaring64.Bitmap) + + for k, v := range pbIndexesMap.Indexes { + f.Indices[k] = roaring64.New() + _, err := f.Indices[k].FromUnsafeBytes(v) + if err != nil { + return err + } + } + + return nil +} + +func (f *File) Print() { + for k, v := range f.Indices { + fmt.Printf("%s: %v\n", k, v.ToArray()) + } +} + +func (f *File) Filename() string { + return computeDBinFilename(f.blockRange.StartBlock, f.blockRange.ExclusiveEndBlock) +} + +func computeDBinFilename(startBlock, stopBlock uint64) string { + return fmt.Sprintf("%010d-%010d.index", startBlock, stopBlock) +} diff --git a/storage/index/index.go b/storage/index/index.go new file mode 100644 index 000000000..f3cb02973 --- /dev/null +++ b/storage/index/index.go @@ -0,0 +1,44 @@ +package index + +import ( + "fmt" + + "github.com/RoaringBitmap/roaring/roaring64" + pbindex "github.com/streamingfast/substreams/pb/sf/substreams/index/v1" + "github.com/streamingfast/substreams/sqe" + "google.golang.org/protobuf/proto" +) + +func NewBlockIndex(expression sqe.Expression, indexModule string, bitmap *roaring64.Bitmap) *BlockIndex { + return &BlockIndex{ + expression: expression, + IndexModule: indexModule, + bitmap: bitmap, + } +} + +type BlockIndex struct { + expression sqe.Expression // applied on-the-fly, from the block index module outputs + IndexModule string + bitmap *roaring64.Bitmap // pre-applied +} + +func (bi *BlockIndex) ExcludesAllBlocks() bool { + return bi != nil && bi.bitmap != nil && bi.bitmap.IsEmpty() +} + +func (bi *BlockIndex) Precomputed() bool { + return bi.bitmap != nil +} + +func (bi *BlockIndex) Skip(blk uint64) bool { + return bi.bitmap != nil && !bi.bitmap.Contains(blk) +} + +func (bi *BlockIndex) SkipFromKeys(indexedKeys []byte) bool { + keys := &pbindex.Keys{} + if err := proto.Unmarshal(indexedKeys, keys); err != nil { + panic(fmt.Errorf("unmarshalling keys: %w", err)) + } + return !sqe.KeysApply(bi.expression, sqe.NewFromIndexKeys(keys)) +} diff --git a/storage/index/pb/indexes.pb.go b/storage/index/pb/indexes.pb.go new file mode 100644 index 000000000..49bd00124 --- /dev/null +++ b/storage/index/pb/indexes.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v4.25.2 +// source: indexes.proto + +package pbindexes + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Map struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Indexes map[string][]byte `protobuf:"bytes,1,rep,name=indexes,proto3" json:"indexes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Map) Reset() { + *x = Map{} + if protoimpl.UnsafeEnabled { + mi := &file_indexes_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Map) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Map) ProtoMessage() {} + +func (x *Map) ProtoReflect() protoreflect.Message { + mi := &file_indexes_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Map.ProtoReflect.Descriptor instead. +func (*Map) Descriptor() ([]byte, []int) { + return file_indexes_proto_rawDescGZIP(), []int{0} +} + +func (x *Map) GetIndexes() map[string][]byte { + if x != nil { + return x.Indexes + } + return nil +} + +var File_indexes_proto protoreflect.FileDescriptor + +var file_indexes_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x16, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x76, 0x31, 0x22, 0x85, 0x01, 0x0a, 0x03, 0x4d, 0x61, 0x70, 0x12, + 0x42, 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x2e, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, 0x62, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x2f, 0x70, 0x62, 0x3b, 0x70, 0x62, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_indexes_proto_rawDescOnce sync.Once + file_indexes_proto_rawDescData = file_indexes_proto_rawDesc +) + +func file_indexes_proto_rawDescGZIP() []byte { + file_indexes_proto_rawDescOnce.Do(func() { + file_indexes_proto_rawDescData = protoimpl.X.CompressGZIP(file_indexes_proto_rawDescData) + }) + return file_indexes_proto_rawDescData +} + +var file_indexes_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_indexes_proto_goTypes = []interface{}{ + (*Map)(nil), // 0: sf.substreams.index.v1.Map + nil, // 1: sf.substreams.index.v1.Map.IndexesEntry +} +var file_indexes_proto_depIdxs = []int32{ + 1, // 0: sf.substreams.index.v1.Map.indexes:type_name -> sf.substreams.index.v1.Map.IndexesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_indexes_proto_init() } +func file_indexes_proto_init() { + if File_indexes_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_indexes_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Map); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_indexes_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_indexes_proto_goTypes, + DependencyIndexes: file_indexes_proto_depIdxs, + MessageInfos: file_indexes_proto_msgTypes, + }.Build() + File_indexes_proto = out.File + file_indexes_proto_rawDesc = nil + file_indexes_proto_goTypes = nil + file_indexes_proto_depIdxs = nil +} diff --git a/storage/index/pb/indexes.proto b/storage/index/pb/indexes.proto new file mode 100644 index 000000000..c54bb4d84 --- /dev/null +++ b/storage/index/pb/indexes.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package sf.substreams.index.v1; +option go_package = "github.com/streamingfast/substreams/storage/index/pb;pbindexes"; + +message Map { + map indexes = 1; +} + diff --git a/storage/index/writer.go b/storage/index/writer.go new file mode 100644 index 000000000..a53bc21df --- /dev/null +++ b/storage/index/writer.go @@ -0,0 +1,76 @@ +package index + +import ( + "context" + "fmt" + + "github.com/streamingfast/substreams/block" + + "go.uber.org/zap" + + "github.com/RoaringBitmap/roaring/roaring64" + + "github.com/streamingfast/dstore" + "github.com/streamingfast/substreams/manifest" + pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" +) + +type Writer struct { + indexFile *File +} + +func NewWriter(indexFile *File) *Writer { + return &Writer{ + indexFile: indexFile, + } +} + +func (w *Writer) Write(indexes map[string]*roaring64.Bitmap) { + w.indexFile.Set(indexes) +} + +func (w *Writer) Close(ctx context.Context) error { + err := w.indexFile.Save(ctx) + if err != nil { + return fmt.Errorf("saving index file %s: %w", w.indexFile.moduleName, err) + } + + return nil +} + +// startblock=500 +// look for 0->1000 + +// GenrateBlockIndexWriters will only generate writers for modules that have no preexisting index file and that are aligned with the bundle size +func GenerateBlockIndexWriters(ctx context.Context, baseStore dstore.Store, indexModules []*pbsubstreams.Module, ModuleHashes *manifest.ModuleHashes, logger *zap.Logger, blockRange *block.Range, bundleSize uint64) (writers map[string]*Writer, existingIndices map[string]map[string]*roaring64.Bitmap, err error) { + writers = make(map[string]*Writer) + existingIndices = make(map[string]map[string]*roaring64.Bitmap) + + isAligned := blockRange.StartBlock%bundleSize == 0 && blockRange.ExclusiveEndBlock%bundleSize == 0 + if !isAligned { // we align it, but won't write it because it would be missing blocks... + alignedStartBlock := blockRange.StartBlock - (blockRange.StartBlock % bundleSize) + blockRange = &block.Range{ + StartBlock: alignedStartBlock, + ExclusiveEndBlock: alignedStartBlock + bundleSize, + } + } + + for _, module := range indexModules { + currentFile, err := NewFile(baseStore, ModuleHashes.Get(module.Name), module.Name, logger, blockRange) + if err != nil { + return nil, nil, fmt.Errorf("creating new index file for %q: %w", module.Name, err) + } + if err := currentFile.Load(ctx); err == nil { + existingIndices[module.Name] = currentFile.Indices + continue + } + + if !isAligned { + continue + } + writers[module.Name] = NewWriter(currentFile) + + } + + return +} diff --git a/storage/store/base_store.go b/storage/store/base_store.go index 3f334ed5a..0d3f6dedf 100644 --- a/storage/store/base_store.go +++ b/storage/store/base_store.go @@ -14,8 +14,8 @@ import ( type baseStore struct { *Config - kv map[string][]byte // kv is the state, and assumes all deltas were already applied to it. - pendingOps *pbssinternal.Operations // operations to the curent block called from the WASM module + kv map[string][]byte // kv is the state, and assumes all deltas were already applied to it. + kvOps *pbssinternal.Operations // operations to the curent block called from the WASM module // deltas are always deltas for the given block. they are produced when store is flushed // and used to read back in the store at different ordinals deltas []*pbsubstreams.StoreDelta @@ -48,13 +48,13 @@ func (b *baseStore) Reset() { if tracer.Enabled() { b.logger.Debug("flushing store", zap.Int("delta_count", len(b.deltas)), zap.Int("entry_count", len(b.kv)), zap.Uint64("total_size_bytes", b.totalSizeBytes)) } - b.pendingOps = &pbssinternal.Operations{} + b.kvOps = &pbssinternal.Operations{} b.deltas = nil b.lastOrdinal = 0 } func (b *baseStore) ReadOps() []byte { - data, err := proto.Marshal(b.pendingOps) + data, err := proto.Marshal(b.kvOps) if err != nil { panic(err) } @@ -74,13 +74,13 @@ func (b *baseStore) ApplyOps(in []byte) error { if err := proto.Unmarshal(in, ops); err != nil { return err } - b.pendingOps = ops + b.kvOps = ops return b.Flush() } func (b *baseStore) Flush() error { - b.pendingOps.Sort() - for _, op := range b.pendingOps.Operations { + b.kvOps.Sort() + for _, op := range b.kvOps.Operations { switch op.Type { case pbssinternal.Operation_SET: b.set(op.Ord, op.Key, op.Value) diff --git a/storage/store/config.go b/storage/store/config.go index 059979a17..76db279b4 100644 --- a/storage/store/config.go +++ b/storage/store/config.go @@ -64,7 +64,7 @@ func NewConfig( func (c *Config) newBaseStore(logger *zap.Logger) *baseStore { return &baseStore{ Config: c, - pendingOps: &pbssinternal.Operations{}, + kvOps: &pbssinternal.Operations{}, kv: make(map[string][]byte), logger: logger.Named("store").With(zap.String("store_name", c.name), zap.String("module_hash", c.moduleHash)), marshaller: marshaller.Default(), diff --git a/storage/store/full_kv.go b/storage/store/full_kv.go index 25c0bfc35..b21a5d13a 100644 --- a/storage/store/full_kv.go +++ b/storage/store/full_kv.go @@ -24,7 +24,7 @@ func (s *FullKV) Marshaller() marshaller.Marshaller { func (s *FullKV) DerivePartialStore(initialBlock uint64) *PartialKV { b := &baseStore{ Config: s.Config, - pendingOps: &pbssinternal.Operations{}, + kvOps: &pbssinternal.Operations{}, kv: make(map[string][]byte), logger: s.logger, marshaller: marshaller.Default(), diff --git a/storage/store/full_kv_test.go b/storage/store/full_kv_test.go index 22419db1d..9f6982973 100644 --- a/storage/store/full_kv_test.go +++ b/storage/store/full_kv_test.go @@ -27,7 +27,7 @@ func TestFullKV_Save_Load_Empty_MapNotNil(t *testing.T) { baseStore: &baseStore{ kv: map[string][]byte{}, - pendingOps: &pbssinternal.Operations{}, + kvOps: &pbssinternal.Operations{}, logger: zap.NewNop(), marshaller: marshaller.Default(), @@ -48,7 +48,7 @@ func TestFullKV_Save_Load_Empty_MapNotNil(t *testing.T) { baseStore: &baseStore{ kv: map[string][]byte{}, - pendingOps: &pbssinternal.Operations{}, + kvOps: &pbssinternal.Operations{}, logger: zap.NewNop(), marshaller: marshaller.Default(), diff --git a/storage/store/init_test.go b/storage/store/init_test.go index 013a6f1bb..91c637062 100644 --- a/storage/store/init_test.go +++ b/storage/store/init_test.go @@ -32,7 +32,7 @@ func newTestBaseStore( require.NoError(t, err) return &baseStore{ Config: config, - pendingOps: &pbssinternal.Operations{}, + kvOps: &pbssinternal.Operations{}, kv: make(map[string][]byte), logger: zap.NewNop(), marshaller: &marshaller.Binary{}, diff --git a/storage/store/interface.go b/storage/store/interface.go index 0e5128b9a..5d732777c 100644 --- a/storage/store/interface.go +++ b/storage/store/interface.go @@ -77,6 +77,7 @@ type Iterable interface { type DeltaAccessor interface { SetDeltas([]*pbsubstreams.StoreDelta) GetDeltas() []*pbsubstreams.StoreDelta + ReadOps() []byte Flush() error ApplyDeltasReverse(deltas []*pbsubstreams.StoreDelta) ApplyDelta(delta *pbsubstreams.StoreDelta) diff --git a/storage/store/map.go b/storage/store/map.go index d1df95f46..6643a66da 100644 --- a/storage/store/map.go +++ b/storage/store/map.go @@ -2,10 +2,11 @@ package store import ( "errors" + "go.uber.org/zap/zapcore" ) -var NotFound = errors.New("store not found") +var ErrNotFound = errors.New("store not found") type Getter interface { Get(name string) (Store, bool) diff --git a/storage/store/merge.go b/storage/store/merge.go index ee954f4c1..265d52fe5 100644 --- a/storage/store/merge.go +++ b/storage/store/merge.go @@ -347,15 +347,6 @@ func strToBigFloat(in string) *big.Float { return newFloat.SetPrec(100) } -func strToFloat(in string) float64 { - newFloat, _, err := big.ParseFloat(in, 10, 100, big.ToNearestEven) - if err != nil { - panic(fmt.Sprintf("cannot load float %q: %s", in, err)) - } - f, _ := newFloat.SetPrec(100).Float64() - return f -} - func strToBigInt(in string) *big.Int { bi := &big.Int{} _, success := bi.SetString(in, 10) @@ -380,11 +371,3 @@ func floatToStr(f float64) string { func floatToBytes(f float64) []byte { return []byte(floatToStr(f)) } - -func bigFloatToStr(f *big.Float) string { - return f.Text('g', -1) -} - -func bigFloatToBytes(f *big.Float) []byte { - return []byte(bigFloatToStr(f)) -} diff --git a/storage/store/merge_test.go b/storage/store/merge_test.go index be900af77..edd096072 100644 --- a/storage/store/merge_test.go +++ b/storage/store/merge_test.go @@ -395,8 +395,8 @@ func newPartialStore(kv map[string][]byte, updatePolicy pbsubstreams.Module_Kind func newStore(kv map[string][]byte, updatePolicy pbsubstreams.Module_KindStore_UpdatePolicy, valueType string) *FullKV { b := &baseStore{ - kv: kv, - pendingOps: &pbssinternal.Operations{}, + kv: kv, + kvOps: &pbssinternal.Operations{}, Config: &Config{ updatePolicy: updatePolicy, valueType: valueType, diff --git a/storage/store/store_max.go b/storage/store/store_max.go index 0003a6b5e..d455294bd 100644 --- a/storage/store/store_max.go +++ b/storage/store/store_max.go @@ -10,7 +10,7 @@ import ( ) func (b *baseStore) SetMaxBigInt(ord uint64, key string, value *big.Int) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MAX_BIG_INT, Ord: ord, Key: key, @@ -35,7 +35,7 @@ func (b *baseStore) setMaxBigInt(ord uint64, key string, value *big.Int) { } func (b *baseStore) SetMaxInt64(ord uint64, key string, value int64) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MAX_INT64, Ord: ord, Key: key, @@ -60,7 +60,7 @@ func (b *baseStore) setMaxInt64(ord uint64, key string, value int64) { } func (b *baseStore) SetMaxFloat64(ord uint64, key string, value float64) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MAX_FLOAT64, Ord: ord, Key: key, @@ -86,7 +86,7 @@ func (b *baseStore) setMaxFloat64(ord uint64, key string, value float64) { } func (b *baseStore) SetMaxBigDecimal(ord uint64, key string, value decimal.Decimal) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MAX_BIG_DECIMAL, Ord: ord, Key: key, diff --git a/storage/store/store_max_test.go b/storage/store/store_max_test.go index 12999b086..98e728e54 100644 --- a/storage/store/store_max_test.go +++ b/storage/store/store_max_test.go @@ -75,8 +75,7 @@ func TestStoreSetMaxBigInt(t *testing.T) { func TestStoreSetMaxInt64(t *testing.T) { int64ptr := func(i int64) *int64 { - var p *int64 - p = new(int64) + p := new(int64) *p = i return p } @@ -143,8 +142,7 @@ func TestStoreSetMaxInt64(t *testing.T) { func TestStoreSetMaxFloat64(t *testing.T) { float64ptr := func(i float64) *float64 { - var p *float64 - p = new(float64) + p := new(float64) *p = i return p } diff --git a/storage/store/store_min.go b/storage/store/store_min.go index f52131c07..d722f62f7 100644 --- a/storage/store/store_min.go +++ b/storage/store/store_min.go @@ -10,7 +10,7 @@ import ( ) func (b *baseStore) SetMinBigInt(ord uint64, key string, value *big.Int) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MIN_BIG_INT, Ord: ord, Key: key, @@ -35,7 +35,7 @@ func (b *baseStore) setMinBigInt(ord uint64, key string, value *big.Int) { } func (b *baseStore) SetMinInt64(ord uint64, key string, value int64) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MIN_INT64, Ord: ord, Key: key, @@ -60,7 +60,7 @@ func (b *baseStore) setMinInt64(ord uint64, key string, value int64) { } func (b *baseStore) SetMinFloat64(ord uint64, key string, value float64) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MIN_FLOAT64, Ord: ord, Key: key, @@ -86,7 +86,7 @@ func (b *baseStore) setMinFloat64(ord uint64, key string, value float64) { } func (b *baseStore) SetMinBigDecimal(ord uint64, key string, value decimal.Decimal) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_MIN_BIG_DECIMAL, Ord: ord, Key: key, diff --git a/storage/store/store_min_test.go b/storage/store/store_min_test.go index ce7db1ed4..af64f2697 100644 --- a/storage/store/store_min_test.go +++ b/storage/store/store_min_test.go @@ -74,8 +74,7 @@ func TestStoreSetMinBigInt(t *testing.T) { func TestStoreSetMinInt64(t *testing.T) { int64ptr := func(i int64) *int64 { - var p *int64 - p = new(int64) + p := new(int64) *p = i return p } @@ -142,8 +141,7 @@ func TestStoreSetMinInt64(t *testing.T) { func TestStoreSetMinFloat64(t *testing.T) { float64ptr := func(i float64) *float64 { - var p *float64 - p = new(float64) + p := new(float64) *p = i return p } diff --git a/storage/store/store_sum.go b/storage/store/store_sum.go index ca48d02ea..4de0e7b6f 100644 --- a/storage/store/store_sum.go +++ b/storage/store/store_sum.go @@ -9,7 +9,7 @@ import ( ) func (b *baseStore) SumBigInt(ord uint64, key string, value *big.Int) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SUM_BIG_INT, Ord: ord, Key: key, @@ -34,7 +34,7 @@ func (b *baseStore) sumBigInt(ord uint64, key string, value *big.Int) { } func (b *baseStore) SumInt64(ord uint64, key string, value int64) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SUM_INT64, Ord: ord, Key: key, @@ -59,7 +59,7 @@ func (b *baseStore) sumInt64(ord uint64, key string, value int64) { } func (b *baseStore) SumFloat64(ord uint64, key string, value float64) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SUM_FLOAT64, Ord: ord, Key: key, @@ -84,7 +84,7 @@ func (b *baseStore) sumFloat64(ord uint64, key string, value float64) { } func (b *baseStore) SumBigDecimal(ord uint64, key string, value decimal.Decimal) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SUM_BIG_DECIMAL, Ord: ord, Key: key, diff --git a/storage/store/value_append.go b/storage/store/value_append.go index beea2c02f..59961ed4e 100644 --- a/storage/store/value_append.go +++ b/storage/store/value_append.go @@ -7,7 +7,7 @@ import ( ) func (b *baseStore) Append(ord uint64, key string, value []byte) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_APPEND, Ord: ord, Key: key, diff --git a/storage/store/value_delete.go b/storage/store/value_delete.go index d94371a7e..be853b958 100644 --- a/storage/store/value_delete.go +++ b/storage/store/value_delete.go @@ -9,7 +9,7 @@ import ( ) func (b *baseStore) DeletePrefix(ord uint64, prefix string) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_DELETE_PREFIX, Ord: ord, Key: prefix, diff --git a/storage/store/value_set.go b/storage/store/value_set.go index 41e3ff323..26eb7c579 100644 --- a/storage/store/value_set.go +++ b/storage/store/value_set.go @@ -9,7 +9,7 @@ import ( ) func (b *baseStore) SetBytesIfNotExists(ord uint64, key string, value []byte) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_BYTES_IF_NOT_EXISTS, Ord: ord, Key: key, @@ -18,7 +18,7 @@ func (b *baseStore) SetBytesIfNotExists(ord uint64, key string, value []byte) { } func (b *baseStore) SetIfNotExists(ord uint64, key string, value string) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_IF_NOT_EXISTS, Ord: ord, Key: key, @@ -27,7 +27,7 @@ func (b *baseStore) SetIfNotExists(ord uint64, key string, value string) { } func (b *baseStore) SetBytes(ord uint64, key string, value []byte) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET_BYTES, Ord: ord, Key: key, @@ -36,7 +36,7 @@ func (b *baseStore) SetBytes(ord uint64, key string, value []byte) { } func (b *baseStore) Set(ord uint64, key string, value string) { - b.pendingOps.Add(&pbssinternal.Operation{ + b.kvOps.Add(&pbssinternal.Operation{ Type: pbssinternal.Operation_SET, Ord: ord, Key: key, @@ -55,7 +55,7 @@ func (b *baseStore) set(ord uint64, key string, value []byte) { } if len(key) == 0 { - panic(fmt.Sprintf("invalid key")) + panic(fmt.Sprintf("invalid key %q", key)) } cpValue := make([]byte, len(value)) copy(cpValue, value) diff --git a/test/complex_integration_test.go b/test/complex_integration_test.go new file mode 100644 index 000000000..3b1686e64 --- /dev/null +++ b/test/complex_integration_test.go @@ -0,0 +1,64 @@ +package integration + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAllAssertionsInComplex(t *testing.T) { + cases := []struct { + name string + startBlock uint64 + linearHandoffBlock uint64 + exclusiveEndBlock uint64 + moduleName string + expectError bool + }{ + { + name: "startblock too low", + startBlock: 10, + linearHandoffBlock: 20, + exclusiveEndBlock: 80, + moduleName: "all_assert_init_20", + expectError: true, + }, + { + name: "linear mode test", + startBlock: 20, + linearHandoffBlock: 20, + exclusiveEndBlock: 80, + moduleName: "all_assert_init_20", + }, + { + name: "starting before unaligned stores test", + startBlock: 20, + linearHandoffBlock: 100, + exclusiveEndBlock: 120, + moduleName: "all_assert_init_20", + }, + { + name: "starting after unaligned stores test", + startBlock: 50, + linearHandoffBlock: 100, + exclusiveEndBlock: 120, + moduleName: "all_assert_init_20", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + run := newTestRun(t, int64(c.startBlock), c.linearHandoffBlock, c.exclusiveEndBlock, c.moduleName, "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg") + err := run.Run(t, c.moduleName) + if c.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } + + //assert.Len(t, listFiles(t, run.TempDir), 90) // All these .kv files on disk + // TODO: we don't produce those files when in linear mode.. + // because it produced inconsistent snapshots.. +} diff --git a/test/integration_test.go b/test/integration_test.go index 55c98048d..5b77a3650 100644 --- a/test/integration_test.go +++ b/test/integration_test.go @@ -2,6 +2,7 @@ package integration import ( "context" + "encoding/hex" "fmt" "math/big" "os" @@ -11,16 +12,14 @@ import ( "time" "github.com/streamingfast/bstream" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/streamingfast/substreams/block" + "github.com/streamingfast/substreams/manifest" "github.com/streamingfast/substreams/orchestrator/stage" "github.com/streamingfast/substreams/orchestrator/work" "github.com/streamingfast/substreams/reqctx" - - //_ "github.com/streamingfast/substreams/wasm/wasmtime" _ "github.com/streamingfast/substreams/wasm/wazero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestForkHandling(t *testing.T) { @@ -65,7 +64,7 @@ func TestForkHandling(t *testing.T) { {blockRef: bstream.NewBlockRef("5b", 5), previousID: "4b", libBlockRef: bstream.NewBlockRef("0a", 0)}, {blockRef: bstream.NewBlockRef("5a", 5), previousID: "4a", libBlockRef: bstream.NewBlockRef("0a", 0)}, {blockRef: bstream.NewBlockRef("6a", 6), previousID: "5a", libBlockRef: bstream.NewBlockRef("4a", 4)}, - {blockRef: bstream.NewBlockRef("7a", 6), previousID: "6a", libBlockRef: bstream.NewBlockRef("4a", 4)}, + {blockRef: bstream.NewBlockRef("7a", 7), previousID: "6a", libBlockRef: bstream.NewBlockRef("4a", 4)}, }, expectedResponseNames: []response{ {id: "1a", output: "assert_test_store_add_bigint"}, @@ -129,7 +128,7 @@ func TestForkHandling(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - run := newTestRun(t, 1, 1, 7, test.module) + run := newTestRun(t, 1, 1, 7, test.module, "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") run.NewBlockGenerator = func(startBlock uint64, inclusiveStopBlock uint64) TestBlockGenerator { return &ForkBlockGenerator{ initialLIB: bstream.NewBlockRef("0a", 0), @@ -180,6 +179,9 @@ func TestForkHandling(t *testing.T) { } func TestOneStoreOneMap(t *testing.T) { + testStoreAddI64Hash := hex.EncodeToString([]byte("setup_test_store_add_i64")) + assertTestStoreAddI64Hash := hex.EncodeToString([]byte("assert_test_store_add_i64")) + tests := []struct { name string startBlock int64 @@ -199,11 +201,10 @@ func TestOneStoreOneMap(t *testing.T) { expectedResponseCount: 4, expectFiles: []string{ - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000001-0000000010.output", // store outputs - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000010-0000000020.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000010-0000000001.kv", // store states - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000020-0000000001.kv", - // "states/0000000025-0000000020.partial", // produced, then deleted + testStoreAddI64Hash + "/outputs/0000000001-0000000010.output", // store outputs + testStoreAddI64Hash + "/outputs/0000000010-0000000020.output", + testStoreAddI64Hash + "/states/0000000010-0000000001.kv", // store states + testStoreAddI64Hash + "/states/0000000020-0000000001.kv", }, }, { @@ -214,109 +215,92 @@ func TestOneStoreOneMap(t *testing.T) { production: false, expectedResponseCount: 7, expectFiles: []string{ - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000001-0000000010.output", // store outputs - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000010-0000000020.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000010-0000000001.kv", // store states - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000020-0000000001.kv", - // "states/0000000025-0000000020.partial", // produced, then deleted - //"states/0000000030-0000000001.kv", // Again, backprocess wouldn't save this one, nor does it need to. + testStoreAddI64Hash + "/outputs/0000000001-0000000010.output", // store outputs + testStoreAddI64Hash + "/outputs/0000000010-0000000020.output", + testStoreAddI64Hash + "/states/0000000010-0000000001.kv", // store states + testStoreAddI64Hash + "/states/0000000020-0000000001.kv", }, }, { name: "prod_mode_back_forward_to_lib", startBlock: 25, - linearBlock: 27, + linearBlock: 20, stopBlock: 29, production: true, expectedResponseCount: 4, expectFiles: []string{ - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000020-0000000027.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000001-0000000010.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000010-0000000020.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000010-0000000001.kv", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000020-0000000001.kv", + testStoreAddI64Hash + "/outputs/0000000001-0000000010.output", + testStoreAddI64Hash + "/outputs/0000000010-0000000020.output", + testStoreAddI64Hash + "/states/0000000010-0000000001.kv", + testStoreAddI64Hash + "/states/0000000020-0000000001.kv", }, }, { name: "prod_mode_back_forward_to_stop", startBlock: 25, - linearBlock: 29, - stopBlock: 29, + linearBlock: 30, + stopBlock: 30, production: true, - expectedResponseCount: 4, + expectedResponseCount: 5, expectFiles: []string{ - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000001-0000000010.output", //store - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000010-0000000020.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000010-0000000001.kv", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000020-0000000001.kv", - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000020-0000000029.output", // map + testStoreAddI64Hash + "/outputs/0000000001-0000000010.output", //store + testStoreAddI64Hash + "/outputs/0000000010-0000000020.output", + testStoreAddI64Hash + "/outputs/0000000020-0000000030.output", + testStoreAddI64Hash + "/states/0000000010-0000000001.kv", + testStoreAddI64Hash + "/states/0000000020-0000000001.kv", + testStoreAddI64Hash + "/states/0000000030-0000000001.kv", + assertTestStoreAddI64Hash + "/outputs/0000000020-0000000030.output", // map }, }, { name: "prod_mode_back_forward_to_stop_passed_boundary", startBlock: 25, - linearBlock: 38, - stopBlock: 38, + linearBlock: 40, + stopBlock: 41, production: true, - expectedResponseCount: 13, + expectedResponseCount: 16, expectFiles: []string{ - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000001-0000000010.output", // store - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000010-0000000020.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000020-0000000030.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000010-0000000001.kv", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000020-0000000001.kv", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000030-0000000001.kv", - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000020-0000000030.output", // map - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000030-0000000038.output", - }, - }, - { - name: "prod_mode_start_before_linear_and_firstboundary", - startBlock: 7, - linearBlock: 8, - stopBlock: 9, - production: true, - expectedResponseCount: 2, - expectFiles: []string{ - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000001-0000000008.output", - }, - }, - { - name: "prod_mode_start_before_linear_then_pass_firstboundary", - startBlock: 7, - linearBlock: 8, - stopBlock: 15, - production: true, - expectedResponseCount: 8, - expectFiles: []string{ - //"states/0000000010-0000000001.kv", // TODO: not sure why this would have been produced with the prior code.. - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000001-0000000008.output", + testStoreAddI64Hash + "/outputs/0000000001-0000000010.output", // store + testStoreAddI64Hash + "/outputs/0000000010-0000000020.output", + testStoreAddI64Hash + "/outputs/0000000020-0000000030.output", + testStoreAddI64Hash + "/outputs/0000000030-0000000040.output", + testStoreAddI64Hash + "/states/0000000010-0000000001.kv", + testStoreAddI64Hash + "/states/0000000020-0000000001.kv", + testStoreAddI64Hash + "/states/0000000030-0000000001.kv", + testStoreAddI64Hash + "/states/0000000040-0000000001.kv", + assertTestStoreAddI64Hash + "/outputs/0000000020-0000000030.output", // map + assertTestStoreAddI64Hash + "/outputs/0000000030-0000000040.output", }, }, { name: "prod_mode_partial_existing", startBlock: 1, - linearBlock: 29, - stopBlock: 29, + linearBlock: 30, + stopBlock: 30, production: true, preWork: func(t *testing.T, run *testRun, workerFactory work.WorkerFactory) { partialPreWork(t, 1, 10, 0, run, workerFactory) }, - expectedResponseCount: 28, + expectedResponseCount: 29, expectFiles: []string{ - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000001-0000000010.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/outputs/0000000010-0000000020.output", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000010-0000000001.kv", - "ebd5bb65aaf4471e468efea126f27dbddb37b59e/states/0000000020-0000000001.kv", - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000001-0000000010.output", - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000010-0000000020.output", - "3574de26d590713344b911bbc1c3bf3305ccb906/outputs/0000000020-0000000029.output", + testStoreAddI64Hash + "/outputs/0000000001-0000000010.output", + testStoreAddI64Hash + "/outputs/0000000010-0000000020.output", + testStoreAddI64Hash + "/outputs/0000000020-0000000030.output", + testStoreAddI64Hash + "/states/0000000010-0000000001.kv", + testStoreAddI64Hash + "/states/0000000020-0000000001.kv", + testStoreAddI64Hash + "/states/0000000030-0000000001.kv", + assertTestStoreAddI64Hash + "/outputs/0000000001-0000000010.output", + assertTestStoreAddI64Hash + "/outputs/0000000010-0000000020.output", + assertTestStoreAddI64Hash + "/outputs/0000000020-0000000030.output", }, }, } + + manifest.UseSimpleHash = true + for _, test := range tests { t.Run(test.name, func(t *testing.T) { - run := newTestRun(t, test.startBlock, test.linearBlock, test.stopBlock, "assert_test_store_add_i64") + run := newTestRun(t, test.startBlock, test.linearBlock, test.stopBlock, "assert_test_store_add_i64", "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") run.ProductionMode = test.production run.ParallelSubrequests = 1 run.PreWork = test.preWork @@ -329,20 +313,20 @@ func TestOneStoreOneMap(t *testing.T) { assert.Equal(t, test.expectedResponseCount, strings.Count(mapOutput, "\n")) withZST := func(s []string) []string { - res := make([]string, len(s), len(s)) + res := make([]string, len(s)) for i, v := range s { res[i] = fmt.Sprintf("%s.zst", v) } return res } - assertFiles(t, run.TempDir, withZST(test.expectFiles)...) + assertFiles(t, run.TempDir, true, withZST(test.expectFiles)...) }) } } func TestStoreDeletePrefix(t *testing.T) { - run := newTestRun(t, 30, 41, 41, "assert_test_store_delete_prefix") + run := newTestRun(t, 30, 40, 42, "assert_test_store_delete_prefix", "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") run.BlockProcessedCallback = func(ctx *execContext) { if ctx.block.Number == 40 { s, storeFound := ctx.stores.Get("test_store_delete_prefix") @@ -356,7 +340,7 @@ func TestStoreDeletePrefix(t *testing.T) { func TestAllAssertions(t *testing.T) { // Relies on `assert_all_test` having modInit == 1, so - run := newTestRun(t, 1, 31, 31, "assert_all_test") + run := newTestRun(t, 1, 31, 31, "assert_all_test", "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") require.NoError(t, run.Run(t, "assert_all_test")) @@ -366,7 +350,7 @@ func TestAllAssertions(t *testing.T) { } func Test_SimpleMapModule(t *testing.T) { - run := newTestRun(t, 10000, 10001, 10001, "test_map") + run := newTestRun(t, 10000, 10001, 10001, "test_map", "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") run.Params = map[string]string{"test_map": "my test params"} run.NewBlockGenerator = func(startBlock uint64, inclusiveStopBlock uint64) TestBlockGenerator { return &LinearBlockGenerator{ @@ -381,7 +365,7 @@ func Test_SimpleMapModule(t *testing.T) { } func Test_Early(t *testing.T) { - run := newTestRun(t, 12, 14, 14, "test_map") + run := newTestRun(t, 12, 14, 14, "test_map", "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") run.Params = map[string]string{"test_map": "my test params"} run.ProductionMode = true run.NewBlockGenerator = func(startBlock uint64, inclusiveStopBlock uint64) TestBlockGenerator { @@ -396,7 +380,7 @@ func Test_Early(t *testing.T) { } func TestEarlyWithEmptyStore(t *testing.T) { - run := newTestRun(t, 2, 4, 4, "assert_test_store_delete_prefix") + run := newTestRun(t, 2, 4, 4, "assert_test_store_delete_prefix", "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") run.ProductionMode = true var foundBlock3 bool @@ -412,7 +396,7 @@ func TestEarlyWithEmptyStore(t *testing.T) { } func Test_SingleMapModule_FileWalker(t *testing.T) { - run := newTestRun(t, 200, 250, 300, "test_map") + run := newTestRun(t, 200, 250, 300, "test_map", "./testdata/simple_substreams/substreams-test-v0.1.0.spkg") run.Params = map[string]string{"test_map": "my test params"} run.ProductionMode = true run.NewBlockGenerator = func(startBlock uint64, inclusiveStopBlock uint64) TestBlockGenerator { @@ -454,7 +438,7 @@ func listFiles(t *testing.T, tempDir string) []string { return storedFiles } -func assertFiles(t *testing.T, tempDir string, wantedFiles ...string) { +func assertFiles(t *testing.T, tempDir string, expectPartialSpkg bool, wantedFiles ...string) { producedFiles := listFiles(t, tempDir) actualFiles := make([]string, 0, len(producedFiles)) @@ -468,7 +452,10 @@ func assertFiles(t *testing.T, tempDir string, wantedFiles ...string) { actualFiles = append(actualFiles, filepath.Join(parts[3:]...)) } - assert.True(t, seenPartialSpkg, "substreams.partial.spkg should be produced") + if expectPartialSpkg { + assert.True(t, seenPartialSpkg, "substreams.partial.spkg should be produced") + } + assert.ElementsMatch(t, wantedFiles, actualFiles) } @@ -479,7 +466,7 @@ func partialPreWork(t *testing.T, start, end uint64, stageIdx int, run *testRun, // caller to `partialPreWork` doesn't need to be changed too much? :) segmenter := block.NewSegmenter(10, 0, 0) unit := stage.Unit{Segment: segmenter.IndexForStartBlock(start), Stage: stageIdx} - ctx := reqctx.WithRequest(run.Context, &reqctx.RequestDetails{Modules: run.Package.Modules, OutputModule: run.ModuleName, CacheTag: "tag"}) + ctx := reqctx.WithRequest(run.Context, &reqctx.RequestDetails{Modules: run.Package.Modules, OutputModule: run.ModuleName}) cmd := worker.Work(ctx, unit, block.NewRange(start, end), []string{run.ModuleName}, nil) result := cmd() msg, ok := result.(work.MsgJobSucceeded) diff --git a/test/runnable_test.go b/test/runnable_test.go index de73b5014..a2ff26296 100644 --- a/test/runnable_test.go +++ b/test/runnable_test.go @@ -58,8 +58,8 @@ type testRun struct { TempDir string } -func newTestRun(t *testing.T, startBlock int64, linearHandoffBlock, exclusiveEndBlock uint64, moduleName string) *testRun { - pkg := manifest.TestReadManifest(t, "./testdata/substreams-test-v0.1.0.spkg") +func newTestRun(t *testing.T, startBlock int64, linearHandoffBlock, exclusiveEndBlock uint64, moduleName string, manifestPath string) *testRun { + pkg := manifest.TestReadManifest(t, manifestPath) return &testRun{Package: pkg, StartBlock: startBlock, ExclusiveEndBlock: exclusiveEndBlock, ModuleName: moduleName, LinearHandoffBlockNum: linearHandoffBlock} } @@ -293,7 +293,7 @@ func processRequest( } runtimeConfig := config.RuntimeConfig{ - StateBundleSize: 10, + SegmentSize: 10, DefaultParallelSubrequests: parallelSubrequests, BaseObjectStore: baseStoreStore, DefaultCacheTag: "tag", @@ -330,7 +330,7 @@ func (r *TestRunner) Run(context.Context) error { err := r.pipe.ProcessBlock(blk, generatedBlock.obj) if err != nil && !errors.Is(err, io.EOF) { - return fmt.Errorf("process block: %w", err) + return fmt.Errorf("process block %d: %w", blk.Number, err) } if errors.Is(err, io.EOF) { return err diff --git a/test/testdata/complex_substreams/Cargo.lock b/test/testdata/complex_substreams/Cargo.lock new file mode 100644 index 000000000..89d1a3740 --- /dev/null +++ b/test/testdata/complex_substreams/Cargo.lock @@ -0,0 +1,569 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" + +[[package]] +name = "autocfg" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" + +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + +[[package]] +name = "bytes" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "complex_substreams" +version = "0.0.1" +dependencies = [ + "anyhow", + "getrandom", + "hex-literal", + "num-bigint", + "num-traits", + "prost", + "prost-types", + "regex", + "substreams", +] + +[[package]] +name = "either" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "getrandom" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "indexmap" +version = "2.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + +[[package]] +name = "log" +version = "0.4.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" + +[[package]] +name = "memchr" +version = "2.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" + +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + +[[package]] +name = "num-bigint" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "pad" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "petgraph" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +dependencies = [ + "fixedbitset", + "indexmap", +] + +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "proc-macro2" +version = "1.0.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" +dependencies = [ + "bytes", + "heck", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 1.0.109", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-types" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +dependencies = [ + "prost", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" + +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "substreams" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3520661f782c338f0e3c6cfc001ac790ed5e68d8f28515139e2aa674f8bb54da" +dependencies = [ + "anyhow", + "bigdecimal", + "hex", + "hex-literal", + "num-bigint", + "num-integer", + "num-traits", + "pad", + "prost", + "prost-build", + "prost-types", + "substreams-macro", + "thiserror", +] + +[[package]] +name = "substreams-macro" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c15595ceab80fece579e462d4823048fe85d67922584c681f5e94305727ad9ee" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "thiserror", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +dependencies = [ + "cfg-if", + "fastrand", + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "1.0.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-width" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" diff --git a/test/testdata/complex_substreams/Cargo.toml b/test/testdata/complex_substreams/Cargo.toml new file mode 100644 index 000000000..65c0473a4 --- /dev/null +++ b/test/testdata/complex_substreams/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "complex_substreams" +version = "0.0.1" +edition = "2021" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +hex-literal = "0.3.4" +num-bigint = "0.4" +num-traits = "0.2.15" +prost = "0.11" +prost-types = "0.11" +substreams = { version = "0.5.6" } + +# Required so that ethabi > ethereum-types build correctly under wasm32-unknown-unknown +[target.wasm32-unknown-unknown.dependencies] +getrandom = { version = "0.2", features = ["custom"] } + +[build-dependencies] +anyhow = "1" +regex = "1.8" + +[profile.release] +lto = true +opt-level = 's' +strip = "debuginfo" diff --git a/test/testdata/complex_substreams/Makefile b/test/testdata/complex_substreams/Makefile new file mode 100644 index 000000000..168d70120 --- /dev/null +++ b/test/testdata/complex_substreams/Makefile @@ -0,0 +1,26 @@ +CARGO_VERSION := $(shell cargo version 2>/dev/null) + +.PHONY: build +build: +ifdef CARGO_VERSION + cargo build --target wasm32-unknown-unknown --release +else + @echo "Building substreams target using Docker. To speed up this step, install a Rust development environment." + docker run --rm -ti --init -v ${PWD}:/usr/src --workdir /usr/src/ rust:bullseye cargo build --target wasm32-unknown-unknown --release +endif + +.PHONY: run +run: build + substreams run substreams.yaml $(if $(MODULE),$(MODULE),map_events) $(if $(START_BLOCK),-s $(START_BLOCK)) $(if $(STOP_BLOCK),-t $(STOP_BLOCK)) + +.PHONY: gui +gui: build + substreams gui substreams.yaml $(if $(MODULE),$(MODULE),map_events) $(if $(START_BLOCK),-s $(START_BLOCK)) $(if $(STOP_BLOCK),-t $(STOP_BLOCK)) + +.PHONY: protogen +protogen: + substreams protogen ./substreams.yaml --exclude-paths="sf/substreams,google" + +.PHONY: pack +pack: build + substreams pack substreams.yaml diff --git a/test/testdata/complex_substreams/buf.gen.yaml b/test/testdata/complex_substreams/buf.gen.yaml new file mode 100644 index 000000000..d2e6544e9 --- /dev/null +++ b/test/testdata/complex_substreams/buf.gen.yaml @@ -0,0 +1,12 @@ + +version: v1 +plugins: +- plugin: buf.build/community/neoeinstein-prost:v0.2.2 + out: src/pb + opt: + - file_descriptor_set=false + +- plugin: buf.build/community/neoeinstein-prost-crate:v0.3.1 + out: src/pb + opt: + - no_features diff --git a/test/testdata/complex_substreams/complex-substreams-v0.1.0.spkg b/test/testdata/complex_substreams/complex-substreams-v0.1.0.spkg new file mode 100644 index 000000000..b81471d08 Binary files /dev/null and b/test/testdata/complex_substreams/complex-substreams-v0.1.0.spkg differ diff --git a/test/testdata/complex_substreams/rust-toolchain.toml b/test/testdata/complex_substreams/rust-toolchain.toml new file mode 100644 index 000000000..ec334c0b1 --- /dev/null +++ b/test/testdata/complex_substreams/rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "1.65" +components = [ "rustfmt" ] +targets = [ "wasm32-unknown-unknown" ] \ No newline at end of file diff --git a/test/testdata/complex_substreams/src/lib.rs b/test/testdata/complex_substreams/src/lib.rs new file mode 100644 index 000000000..826276b7d --- /dev/null +++ b/test/testdata/complex_substreams/src/lib.rs @@ -0,0 +1,199 @@ +mod pb; + +use crate::pb::test; +use crate::pb::keys; +use substreams::errors::Error; +use substreams::prelude::*; +use substreams::store::StoreAdd; +use substreams::store::StoreNew; +use crate::pb::test::Block; +use substreams::{store, Hex}; + + + +#[substreams::handlers::map] +fn index_init_60(blk: test::Block) -> Result { + let mut keys = keys::Keys::default(); + if blk.number % 2 == 0 { + keys.keys.push("even".to_string()); + } else { + keys.keys.push("odd".to_string()); + } + + Ok(keys) +} + +#[substreams::handlers::map] +fn map_using_index_init_70(blk: test::Block) -> Result { + Ok(test::Boolean { result: true }) +} + + +#[substreams::handlers::store] +fn first_store_init_20(block: test::Block, first_store: StoreAddInt64) { + first_store.add(0, "block_counter", 1); +} + +#[substreams::handlers::store] +fn second_store_init_30(block: test::Block, first_store: StoreGetInt64, second_store: StoreSetInt64) { + let block_counter = first_store.get_last("block_counter").unwrap(); + second_store.set(0, format!("block_counter_from_first_store"), &block_counter) +} + +#[substreams::handlers::store] +fn third_store_init_40(block: test::Block, second_store: StoreGetInt64, third_store: StoreSetInt64) { + let block_counter = second_store.get_last("block_counter_from_first_store").unwrap(); + third_store.set(0, format!("block_counter_from_second_store"), &block_counter) +} + +#[substreams::handlers::store] +fn fourth_store_init_52(block: test::Block, second_store: StoreGetInt64, fourth_store: StoreSetInt64) { + let block_counter_times_two = second_store.get_last("block_counter_from_first_store").unwrap()*2; + fourth_store.set(0, format!("block_counter_from_second_store_times_two"), &block_counter_times_two) +} + +#[substreams::handlers::map] +fn assert_first_store_init_20(block: test::Block, first_store: StoreGetInt64) -> Result { + let block_counter = first_store.get_last("block_counter"); + + if block.number < 20 { + assert!(block_counter.is_none()); + return Ok(test::Boolean { result: true }) + } + + assert_eq!(block_counter.unwrap(), (block.number - 19) as i64); + Ok(test::Boolean { result: true }) +} + +#[substreams::handlers::map] +fn assert_first_store_deltas_init_20(block: test::Block, first_store: store::Deltas) -> Result { + let mut block_counter = None; + + first_store + .deltas + .iter() + .for_each(|delta| match delta.key.as_str() { + "block_counter" => block_counter = Some(delta.new_value), + x => panic!("unhandled key {}", x), + }); + + if block.number < 20 { + assert!(block_counter.is_none()); + return Ok(test::Boolean { result: true }) + } + + assert_eq!(block_counter.unwrap(), (block.number - 19) as i64); + Ok(test::Boolean { result: true }) +} + +#[substreams::handlers::map] +fn assert_second_store_init_20(block: test::Block, second_store: StoreGetInt64) -> Result { + let block_counter = second_store.get_last("block_counter_from_first_store"); + + if block.number < 30 { + assert!(block_counter.is_none()); + return Ok(test::Boolean { result: true }) + } + + assert_eq!(block_counter.unwrap(), (block.number - 19) as i64); + Ok(test::Boolean { result: true }) +} + +#[substreams::handlers::map] +fn assert_second_store_deltas_init_20(block: test::Block, second_store: store::Deltas) -> Result { + let mut block_counter = None; + + second_store + .deltas + .iter() + .for_each(|delta| match delta.key.as_str() { + "block_counter_from_first_store" => block_counter = Some(delta.new_value), + x => panic!("unhandled key {}", x), + }); + + if block.number < 30 { + assert!(block_counter.is_none()); + return Ok(test::Boolean { result: true }) + } + + assert_eq!(block_counter.unwrap(), (block.number - 19) as i64); + Ok(test::Boolean { result: true }) +} + +#[substreams::handlers::map] +fn assert_third_store_init_20(block: test::Block, third_store: StoreGetInt64) -> Result { + let block_counter = third_store.get_last("block_counter_from_second_store"); + + if block.number < 40 { + assert!(block_counter.is_none()); + return Ok(test::Boolean { result: true }) + } + + assert_eq!(block_counter.unwrap(), (block.number - 19) as i64); + Ok(test::Boolean { result: true }) +} + +#[substreams::handlers::map] +fn assert_third_store_deltas_init_20(block: test::Block, third_store: store::Deltas) -> Result { + let mut block_counter = None; + + third_store + .deltas + .iter() + .for_each(|delta| match delta.key.as_str() { + "block_counter_from_second_store" => block_counter = Some(delta.new_value), + x => panic!("unhandled key {}", x), + }); + + if block.number < 40 { + assert!(block_counter.is_none()); + return Ok(test::Boolean { result: true }) + } + + assert_eq!(block_counter.unwrap(), (block.number - 19) as i64); + Ok(test::Boolean { result: true }) +} + +#[substreams::handlers::map] +fn all_assert_init_20(result_one: test::Boolean, result_two: test::Boolean, result_three: test::Boolean, result_fourth: test::Boolean, result_fifth: test::Boolean, result_sixth: test::Boolean) -> Result { + // + Ok(test::Boolean { result: true }) +} + +#[substreams::handlers::map] +fn map_output_init_50(block: test::Block, third_store: StoreGetInt64) -> Result { + let fake_counter = third_store.get_last("block_counter_from_second_store").unwrap() as u64; + + let out = test::MapResult { + block_number: fake_counter, + block_hash: block.id, + }; + + Ok(out) +} + +#[substreams::handlers::map] +fn second_map_output_init_50(block: test::Block, third_store: StoreGetInt64, fourth_store: StoreGetInt64) -> Result { + let mut fake_counter = 0; + + if block.number > 40 { + fake_counter = third_store.get_last("block_counter_from_second_store").unwrap() as u64; + } + + let mut fake_counter_times_two = 0; + if block.number > 52 { + fake_counter_times_two = fourth_store.get_last("block_counter_from_second_store_times_two").unwrap() as u64; + } + + + let out = test::MapResult { + block_number: fake_counter + fake_counter_times_two, + block_hash: block.id, + }; + + Ok(out) +} + + + + diff --git a/test/testdata/complex_substreams/src/pb/mod.rs b/test/testdata/complex_substreams/src/pb/mod.rs new file mode 100644 index 000000000..4ae716078 --- /dev/null +++ b/test/testdata/complex_substreams/src/pb/mod.rs @@ -0,0 +1,7 @@ +#[allow(dead_code)] +#[path = "./sf.substreams.v1.test.rs"] +pub mod test; + +#[allow(dead_code)] +#[path = "./sf.substreams.index.v1.rs"] +pub mod keys; diff --git a/test/testdata/complex_substreams/src/pb/sf.substreams.index.v1.rs b/test/testdata/complex_substreams/src/pb/sf.substreams.index.v1.rs new file mode 100644 index 000000000..a9543652c --- /dev/null +++ b/test/testdata/complex_substreams/src/pb/sf.substreams.index.v1.rs @@ -0,0 +1,8 @@ +// @generated +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Keys { + #[prost(string, repeated, tag="1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +// @@protoc_insertion_point(module) diff --git a/test/testdata/complex_substreams/src/pb/sf.substreams.v1.test.rs b/test/testdata/complex_substreams/src/pb/sf.substreams.v1.test.rs new file mode 100644 index 000000000..77a281b10 --- /dev/null +++ b/test/testdata/complex_substreams/src/pb/sf.substreams.v1.test.rs @@ -0,0 +1,69 @@ +// @generated +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Block { + #[prost(string, tag="1")] + pub id: ::prost::alloc::string::String, + #[prost(uint64, tag="2")] + pub number: u64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MapResult { + #[prost(uint64, tag="1")] + pub block_number: u64, + #[prost(string, tag="2")] + pub block_hash: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Boolean { + #[prost(bool, tag="1")] + pub result: bool, +} +/// Encoded file descriptor set for the `sf.substreams.v1.test` package +pub const FILE_DESCRIPTOR_SET: &[u8] = &[ + 0x0a, 0xc5, 0x05, 0x0a, 0x20, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x05, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x4d, 0x0a, + 0x09, 0x4d, 0x61, 0x70, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0x21, 0x0a, 0x07, + 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, + 0x50, 0x5a, 0x4e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, 0x62, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2f, + 0x3b, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x74, 0x65, 0x73, + 0x74, 0x4a, 0x8c, 0x03, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x11, 0x01, 0x0a, 0x08, 0x0a, 0x01, + 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1e, + 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x03, 0x00, 0x65, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, + 0x12, 0x03, 0x03, 0x00, 0x65, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x05, 0x00, 0x08, + 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x05, 0x08, 0x0d, 0x0a, 0x0b, 0x0a, + 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x06, 0x02, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x00, 0x05, 0x12, 0x03, 0x06, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x06, 0x09, 0x0b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x06, 0x0e, 0x0f, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x07, 0x02, + 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x07, 0x02, 0x08, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x07, 0x09, 0x0f, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x07, 0x12, 0x13, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x01, 0x12, 0x04, 0x0a, 0x00, 0x0d, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, + 0x0a, 0x08, 0x11, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x0b, 0x02, 0x1a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x05, 0x12, 0x03, 0x0b, 0x02, 0x08, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x09, 0x15, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0b, 0x18, 0x19, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, + 0x02, 0x01, 0x12, 0x03, 0x0c, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x05, + 0x12, 0x03, 0x0c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, + 0x0c, 0x09, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0c, 0x16, + 0x17, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x0f, 0x00, 0x11, 0x01, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x0f, 0x08, 0x0f, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, + 0x00, 0x12, 0x03, 0x10, 0x02, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, + 0x03, 0x10, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x10, + 0x07, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x10, 0x10, 0x11, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +]; +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/test/testdata/complex_substreams/substreams.yaml b/test/testdata/complex_substreams/substreams.yaml new file mode 100644 index 000000000..78b3254f6 --- /dev/null +++ b/test/testdata/complex_substreams/substreams.yaml @@ -0,0 +1,164 @@ +specVersion: v0.1.0 +package: + name: complex_substreams + version: v0.1.0 + +binaries: + default: + type: wasm/rust-v1 + file: ./target/wasm32-unknown-unknown/release/complex_substreams.wasm + +protobuf: + files: + - "sf/substreams/v1/test/test.proto" + importPaths: + - ../../../proto + +modules: + - name: index_init_60 + initialBlock: 60 + kind: blockIndex + inputs: + - source: sf.substreams.v1.test.Block + output: + type: proto:sf.substreams.index.v1.Keys + + - name: map_using_index_init_70 + kind: map + initialBlock: 70 + inputs: + - source: sf.substreams.v1.test.Block + blockFilter: + module: index_init_60 + query: even + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: first_store_init_20 + kind: store + initialBlock: 20 + updatePolicy: add + valueType: int64 + inputs: + - source: sf.substreams.v1.test.Block + + - name: assert_first_store_init_20 + kind: map + initialBlock: 20 + inputs: + - source: sf.substreams.v1.test.Block + - store: first_store_init_20 + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: assert_first_store_deltas_init_20 + kind: map + initialBlock: 20 + inputs: + - source: sf.substreams.v1.test.Block + - store: first_store_init_20 + mode: deltas + + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: second_store_init_30 + kind: store + initialBlock: 30 + updatePolicy: set + valueType: int64 + inputs: + - source: sf.substreams.v1.test.Block + - store: first_store_init_20 + + + - name: assert_second_store_init_20 + kind: map + initialBlock: 20 + inputs: + - source: sf.substreams.v1.test.Block + - store: second_store_init_30 + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: assert_second_store_deltas_init_20 + kind: map + initialBlock: 20 + inputs: + - source: sf.substreams.v1.test.Block + - store: second_store_init_30 + mode: deltas + + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: third_store_init_40 + kind: store + initialBlock: 40 + updatePolicy: set + valueType: int64 + inputs: + - source: sf.substreams.v1.test.Block + - store: second_store_init_30 + + - name: fourth_store_init_52 + kind: store + initialBlock: 52 + updatePolicy: set + valueType: int64 + inputs: + - source: sf.substreams.v1.test.Block + - store: second_store_init_30 + + - name: assert_third_store_init_20 + kind: map + initialBlock: 20 + inputs: + - source: sf.substreams.v1.test.Block + - store: third_store_init_40 + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: assert_third_store_deltas_init_20 + kind: map + initialBlock: 20 + inputs: + - source: sf.substreams.v1.test.Block + - store: third_store_init_40 + mode: deltas + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: all_assert_init_20 + kind: map + initialBlock: 20 + inputs: + - map: assert_first_store_init_20 + - map: assert_second_store_init_20 + - map: assert_third_store_init_20 + - map: assert_first_store_deltas_init_20 + - map: assert_second_store_deltas_init_20 + - map: assert_third_store_deltas_init_20 + + output: + type: proto:sf.substreams.v1.test.Boolean + + - name: map_output_init_50 + kind: map + initialBlock: 50 + inputs: + - source: sf.substreams.v1.test.Block + - store: third_store_init_40 + output: + type: proto:sf.substreams.v1.test.MapResult + + - name: second_map_output_init_50 + kind: map + initialBlock: 50 + inputs: + - source: sf.substreams.v1.test.Block + - store: third_store_init_40 + - store: fourth_store_init_52 + output: + type: proto:sf.substreams.v1.test.MapResult + diff --git a/test/testdata/simple_substreams/build b/test/testdata/simple_substreams/build new file mode 100755 index 000000000..b345fbbef --- /dev/null +++ b/test/testdata/simple_substreams/build @@ -0,0 +1,5 @@ +#!/bin/bash + +cargo build --target wasm32-unknown-unknown --release +substreams pack ./substreams.yaml +mv substreams-test-v0.1.0.spkg ../ diff --git a/test/testdata/simple_substreams/build.sh b/test/testdata/simple_substreams/build.sh index b345fbbef..978cd44c1 100755 --- a/test/testdata/simple_substreams/build.sh +++ b/test/testdata/simple_substreams/build.sh @@ -2,4 +2,4 @@ cargo build --target wasm32-unknown-unknown --release substreams pack ./substreams.yaml -mv substreams-test-v0.1.0.spkg ../ + diff --git a/test/testdata/simple_substreams/rust-toolchain.toml b/test/testdata/simple_substreams/rust-toolchain.toml index fd3414ca8..98d17b4dc 100644 --- a/test/testdata/simple_substreams/rust-toolchain.toml +++ b/test/testdata/simple_substreams/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.60.0" +channel = "1.75.0" components = [ "rustfmt" ] targets = [ "wasm32-unknown-unknown" ] diff --git a/test/testdata/simple_substreams/src/generated/externs.rs b/test/testdata/simple_substreams/src/generated/externs.rs index 894355236..81cbdad65 100644 --- a/test/testdata/simple_substreams/src/generated/externs.rs +++ b/test/testdata/simple_substreams/src/generated/externs.rs @@ -3,6 +3,27 @@ use substreams::errors::Error; use crate::pb; use crate::generated::substreams::{Substreams, SubstreamsTrait}; +#[no_mangle] +pub extern "C" fn test_index( + block_ptr: *mut u8, + block_len: usize, +) { + substreams::register_panic_hook(); + let func = ||-> Result { + + let block: pb::test::Block = substreams::proto::decode_ptr(block_ptr, block_len).unwrap(); + + Substreams::test_index( + block, + ) + }; + + let result = func(); + if result.is_err() { + panic!("{:?}", &result.err().unwrap()); + } + substreams::output(result.unwrap()); +} #[no_mangle] pub extern "C" fn test_map( @@ -91,6 +112,26 @@ pub extern "C" fn assert_test_store_delete_prefix( substreams::output(result.unwrap()); } +#[no_mangle] +pub extern "C" fn assert_test_index( + block_ptr: *mut u8, + block_len: usize, +) { + substreams::register_panic_hook(); + let func = ||-> Result{ + + let block: pb::test::Block = substreams::proto::decode_ptr(block_ptr, block_len).unwrap(); + + Substreams::assert_test_index(block + ) + }; + let result = func(); + if result.is_err() { + panic!("{:?}", &result.err().unwrap()); + } + substreams::output(result.unwrap()); +} + #[no_mangle] pub extern "C" fn setup_test_store_add_i64( block_ptr: *mut u8, @@ -2091,6 +2132,25 @@ pub extern "C" fn assert_all_test_delete_prefix( func() } +#[no_mangle] +pub extern "C" fn assert_all_test_index( + assert_test_index_ptr: *mut u8, + assert_test_index_len: usize, +) { + substreams::register_panic_hook(); + let func = ||{ + + let store: substreams::store::StoreSetInt64 = substreams::store::StoreSetInt64::new(); + + let assert_test_index_prefix: pb::test::Boolean = substreams::proto::decode_ptr(assert_test_index_ptr, assert_test_index_len).unwrap(); + + Substreams::assert_all_test_index(assert_test_index_prefix, + store, + ) + }; + func() +} + #[no_mangle] pub extern "C" fn assert_all_test( assert_all_test_delete_prefix_ptr: u32, @@ -2099,6 +2159,7 @@ pub extern "C" fn assert_all_test( assert_all_test_float64_ptr: u32, assert_all_test_bigint_ptr: u32, assert_all_test_bigdecimal_ptr: u32, + assert_all_test_index_ptr: u32, ) { substreams::register_panic_hook(); let func = ||-> Result{ @@ -2109,6 +2170,7 @@ pub extern "C" fn assert_all_test( let assert_all_test_float64: substreams::store::StoreGetInt64 = substreams::store::StoreGetInt64::new(assert_all_test_float64_ptr); let assert_all_test_bigint: substreams::store::StoreGetInt64 = substreams::store::StoreGetInt64::new(assert_all_test_bigint_ptr); let assert_all_test_bigdecimal: substreams::store::StoreGetInt64 = substreams::store::StoreGetInt64::new(assert_all_test_bigdecimal_ptr); + let assert_all_test_index: substreams::store::StoreGetInt64 = substreams::store::StoreGetInt64::new(assert_all_test_index_ptr); Substreams::assert_all_test(assert_all_test_delete_prefix, assert_all_test_string, @@ -2116,7 +2178,7 @@ pub extern "C" fn assert_all_test( assert_all_test_float64, assert_all_test_bigint, assert_all_test_bigdecimal, - + assert_all_test_index, ) }; let result = func(); diff --git a/test/testdata/simple_substreams/src/generated/substreams.rs b/test/testdata/simple_substreams/src/generated/substreams.rs index fa98041b0..c951dc95b 100644 --- a/test/testdata/simple_substreams/src/generated/substreams.rs +++ b/test/testdata/simple_substreams/src/generated/substreams.rs @@ -5,6 +5,13 @@ use substreams::errors::Error; pub struct Substreams{} pub trait SubstreamsTrait { + fn test_index( + block: pb::test::Block, + ) -> Result; + + fn assert_test_index( + block: pb::test::Block, + ) -> Result; fn test_map( params: String, @@ -483,6 +490,11 @@ pub trait SubstreamsTrait { store: substreams::store::StoreSetInt64, ); + fn assert_all_test_index( + assert_test_index: pb::test::Boolean, + store: substreams::store::StoreSetInt64, + ); + fn assert_all_test( assert_all_test_delete_prefix: substreams::store::StoreGetInt64, assert_all_test_string: substreams::store::StoreGetInt64, @@ -490,6 +502,7 @@ pub trait SubstreamsTrait { assert_all_test_float64: substreams::store::StoreGetInt64, assert_all_test_bigint: substreams::store::StoreGetInt64, assert_all_test_bigdecimal: substreams::store::StoreGetInt64, + assert_all_test_index: substreams::store::StoreGetInt64, ) -> Result; } diff --git a/test/testdata/simple_substreams/src/lib.rs b/test/testdata/simple_substreams/src/lib.rs index 8dda8ece2..3466d703e 100644 --- a/test/testdata/simple_substreams/src/lib.rs +++ b/test/testdata/simple_substreams/src/lib.rs @@ -1,5 +1,4 @@ mod generated; - use prost::encoding::float; use prost::Message; use std::borrow::Borrow; @@ -30,14 +29,31 @@ use substreams::{ use crate::pb::test; use crate::pb::test::Block; +use crate::pb::keys; mod pb; + const TO_SET: i64 = 100; const TO_ADD: i64 = 1; const TO_SUBTRACT: i64 = -1; impl generated::substreams::SubstreamsTrait for generated::substreams::Substreams { + fn test_index(block: test::Block) -> Result { + let mut keys = keys::Keys::default(); + if block.number % 2 == 0 { + keys.keys.push("even".to_string()); + } else { + keys.keys.push("odd".to_string()); + } + Ok(keys) + } + + fn assert_test_index(blk: test::Block) -> Result { + assert!(blk.number % 2 == 0, "expected even block number"); + Ok(test::Boolean { result: true }) + } + fn test_map(params: String, blk: test::Block) -> Result { let out = test::MapResult { block_number: blk.number, @@ -1446,6 +1462,13 @@ impl generated::substreams::SubstreamsTrait for generated::substreams::Substream // } + fn assert_all_test_index( + assert_test_index: pb::test::Boolean, + store: substreams::store::StoreSetInt64, + ) { + // + } + fn assert_all_test( assert_all_test_delete_prefix: StoreGetInt64, assert_all_test_string: StoreGetInt64, @@ -1453,6 +1476,7 @@ impl generated::substreams::SubstreamsTrait for generated::substreams::Substream assert_all_test_float64: StoreGetInt64, assert_all_test_bigint: StoreGetInt64, assert_all_test_bigdecimal: StoreGetInt64, + assert_all_test_index : StoreGetInt64, ) -> Result { return Ok(test::Boolean { result: true }); } diff --git a/test/testdata/simple_substreams/src/pb/mod.rs b/test/testdata/simple_substreams/src/pb/mod.rs index 0a0830f78..d3de45dd9 100644 --- a/test/testdata/simple_substreams/src/pb/mod.rs +++ b/test/testdata/simple_substreams/src/pb/mod.rs @@ -5,25 +5,8 @@ use std::str::FromStr; #[path = "./sf.substreams.v1.test.rs"] pub mod test; -impl Into for Block { - fn into(self) -> String { - format!("{}:{}", self.id, self.number) - } -} +#[allow(dead_code)] +#[path = "./sf.substreams.index.v1.rs"] +pub mod keys; + -impl From for Block { - fn from(block_as_string: String) -> Self { - let values: Vec<&str> = block_as_string.split(":").collect(); - println!("{:?}", values); - if values.len() != 3 { - return Self { - id: "default".to_string(), - number: 1, - }; - } - Self { - id: values[0].to_string(), - number: u64::from_str(values[1]).unwrap(), - } - } -} diff --git a/test/testdata/simple_substreams/src/pb/sf.substreams.index.v1.rs b/test/testdata/simple_substreams/src/pb/sf.substreams.index.v1.rs new file mode 100644 index 000000000..a9543652c --- /dev/null +++ b/test/testdata/simple_substreams/src/pb/sf.substreams.index.v1.rs @@ -0,0 +1,8 @@ +// @generated +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Keys { + #[prost(string, repeated, tag="1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +// @@protoc_insertion_point(module) diff --git a/test/testdata/simple_substreams/substreams-test-v0.1.0.spkg b/test/testdata/simple_substreams/substreams-test-v0.1.0.spkg new file mode 100644 index 000000000..e87fbafe2 Binary files /dev/null and b/test/testdata/simple_substreams/substreams-test-v0.1.0.spkg differ diff --git a/test/testdata/simple_substreams/substreams.yaml b/test/testdata/simple_substreams/substreams.yaml index b6d85668a..548624f05 100644 --- a/test/testdata/simple_substreams/substreams.yaml +++ b/test/testdata/simple_substreams/substreams.yaml @@ -20,6 +20,25 @@ binaries: my.types.v1: "pb::my_types_v1" modules: + - name: test_index + kind: blockIndex + initialBlock: 1 + inputs: + - source: sf.substreams.v1.test.Block + output: + type: proto:sf.substreams.index.v1.Keys + + - name: assert_test_index + kind: map + initialBlock: 1 + inputs: + - source: sf.substreams.v1.test.Block + blockFilter: + module: test_index + query: even + output: + type: proto:sf.substreams.v1.test.MapResult + - name: test_map kind: map initialBlock: 10 @@ -823,6 +842,14 @@ modules: inputs: - map: assert_test_store_delete_prefix + - name : assert_all_test_index + kind: store + initialBlock: 10 + updatePolicy: set + valueType: int64 + inputs: + - map: assert_test_index + - name: assert_all_test kind: map initialBlock: 1 @@ -833,8 +860,9 @@ modules: - store: assert_all_test_float64 - store: assert_all_test_bigint - store: assert_all_test_bigdecimal + - store: assert_all_test_index output: type: proto:sf.substreams.v1.test.Boolean params: - test_map: my default params value \ No newline at end of file + test_map: my default params value diff --git a/test/testdata/substreams-test-v0.1.0.spkg b/test/testdata/substreams-test-v0.1.0.spkg deleted file mode 100644 index f7c934e1f..000000000 Binary files a/test/testdata/substreams-test-v0.1.0.spkg and /dev/null differ diff --git a/test/tier2_integration_test.go b/test/tier2_integration_test.go new file mode 100644 index 000000000..6541558da --- /dev/null +++ b/test/tier2_integration_test.go @@ -0,0 +1,335 @@ +package integration + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + pbindexes "github.com/streamingfast/substreams/storage/index/pb" + "google.golang.org/protobuf/proto" + + "github.com/streamingfast/substreams/storage/index" + + "github.com/RoaringBitmap/roaring/roaring64" + + "github.com/streamingfast/dstore" + + pboutput "github.com/streamingfast/substreams/storage/execout/pb" + + "github.com/streamingfast/substreams/manifest" + + "github.com/streamingfast/substreams/block" + "github.com/stretchr/testify/require" + + "github.com/streamingfast/substreams/orchestrator/work" + "github.com/streamingfast/substreams/reqctx" +) + +type preCreatedIndices struct { + fileName string + indices map[string]*roaring64.Bitmap +} + +func TestTier2Call(t *testing.T) { + manifest.UseSimpleHash = true + mapInit50 := hex.EncodeToString([]byte("map_output_init_50")) + secondMapInit50 := hex.EncodeToString([]byte("second_map_output_init_50")) + + firstStoreInit20 := hex.EncodeToString([]byte("first_store_init_20")) + secondStoreInit30 := hex.EncodeToString([]byte("second_store_init_30")) + thirdStoreInit40 := hex.EncodeToString([]byte("third_store_init_40")) + fourthStoreInit52 := hex.EncodeToString([]byte("fourth_store_init_52")) + blockIndexInit60 := hex.EncodeToString([]byte("index_init_60")) + mapUsingIndexInit70 := hex.EncodeToString([]byte("map_using_index_init_70")) + + randomIndicesRange := roaring64.New() + randomIndicesRange.AddInt(70) + randomIndicesRange.AddInt(71) + randomIndicesRange.AddInt(72) + randomIndicesRange.AddInt(73) + randomIndicesRange.AddInt(74) + randomIndicesRange.AddInt(76) + + ctx := context.Background() + cases := []struct { + name string + startBlock uint64 + endBlock uint64 + stage int + moduleName string + stateBundleSize uint64 + manifestPath string + preCreatedFiles []string + preCreatedIndices *preCreatedIndices + expectRemainingFiles []string + mapOutputFileToCheck string + expectedSkippedBlocks map[uint64]struct{} + }{ + // Complex substreams package : "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg" + // Output module : map_output_init_50 + //Stage 0: [["first_store_init_20"]] + //Stage 1: [["second_store_init_30"]] + //Stage 2: [["third_store_init_40"]] + //Stage 3: [["map_output_init_50"]] + { + name: "check full kv production in previous stages", + startBlock: 50, + endBlock: 60, + stage: 3, + moduleName: "map_output_init_50", + stateBundleSize: 10, + manifestPath: "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg", + preCreatedFiles: []string{ + firstStoreInit20 + "/states/0000000050-0000000020.kv.zst", + secondStoreInit30 + "/states/0000000050-0000000030.kv.zst", + thirdStoreInit40 + "/states/0000000050-0000000040.kv.zst", + }, + + expectRemainingFiles: []string{ + firstStoreInit20 + "/states/0000000060-0000000020.kv", + secondStoreInit30 + "/states/0000000060-0000000030.kv", + thirdStoreInit40 + "/states/0000000060-0000000040.kv", + + firstStoreInit20 + "/states/0000000050-0000000020.kv", + firstStoreInit20 + "/outputs/0000000050-0000000060.output", + secondStoreInit30 + "/states/0000000050-0000000030.kv", + secondStoreInit30 + "/outputs/0000000050-0000000060.output", + thirdStoreInit40 + "/states/0000000050-0000000040.kv", + thirdStoreInit40 + "/outputs/0000000050-0000000060.output", + mapInit50 + "/outputs/0000000050-0000000060.output", + }, + }, + + // Complex substreams package : "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg" + // Output module : second_map_output_init_50 + //Stage 0: [["first_store_init_20"]] + //Stage 1: [["second_store_init_30"]] + //Stage 2: [["third_store_init_40","fourth_store_init_52"]] + //Stage 3: [["second_map_output_init_50"]] + { + name: "stores with different initial blocks on the same stage", + startBlock: 50, + endBlock: 60, + stage: 3, + moduleName: "second_map_output_init_50", + stateBundleSize: 10, + manifestPath: "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg", + preCreatedFiles: []string{ + firstStoreInit20 + "/states/0000000050-0000000020.kv.zst", + secondStoreInit30 + "/states/0000000050-0000000030.kv.zst", + thirdStoreInit40 + "/states/0000000050-0000000040.kv.zst", + }, + + expectRemainingFiles: []string{ + firstStoreInit20 + "/states/0000000060-0000000020.kv", + secondStoreInit30 + "/states/0000000060-0000000030.kv", + thirdStoreInit40 + "/states/0000000060-0000000040.kv", + + firstStoreInit20 + "/states/0000000050-0000000020.kv", + firstStoreInit20 + "/outputs/0000000050-0000000060.output", + secondStoreInit30 + "/states/0000000050-0000000030.kv", + secondStoreInit30 + "/outputs/0000000050-0000000060.output", + thirdStoreInit40 + "/states/0000000050-0000000040.kv", + thirdStoreInit40 + "/outputs/0000000050-0000000060.output", + secondMapInit50 + "/outputs/0000000050-0000000060.output", + + fourthStoreInit52 + "/states/0000000060-0000000052.kv", + fourthStoreInit52 + "/outputs/0000000052-0000000060.output", + }, + }, + // This test is checking the index file loading when file already existing + // Complex substreams package : "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg" + // Output module : map_using_index with block filter on even keys + //Stage 0: [["index"],["map_using_index"]] + { + name: "test index_init_60 with map_using_index_init_70 filtering through key 'even' with pre-existing random indices", + startBlock: 70, + endBlock: 80, + stage: 0, + moduleName: "map_using_index_init_70", + stateBundleSize: 10, + manifestPath: "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg", + + preCreatedIndices: &preCreatedIndices{ + fileName: blockIndexInit60 + "/index/0000000070-0000000080.index", + indices: map[string]*roaring64.Bitmap{"even": randomIndicesRange}, + }, + + expectRemainingFiles: []string{ + mapUsingIndexInit70 + "/outputs/0000000070-0000000080.output", + blockIndexInit60 + "/index/0000000070-0000000080.index", + }, + + mapOutputFileToCheck: mapUsingIndexInit70 + "/outputs/0000000070-0000000080.output", + expectedSkippedBlocks: map[uint64]struct{}{75: {}, 77: {}, 78: {}, 79: {}, 80: {}}, + }, + + // Complex substreams package : "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg" + // Output module : map_using_index with block filter on even keys + //Stage 0: [["index"],["map_using_index"]] + { + name: "test index_init_60 with map_using_index_init_70 filtering through key 'even'", + startBlock: 70, + endBlock: 80, + stage: 0, + moduleName: "map_using_index_init_70", + stateBundleSize: 10, + manifestPath: "./testdata/complex_substreams/complex-substreams-v0.1.0.spkg", + + expectRemainingFiles: []string{ + blockIndexInit60 + "/index/0000000070-0000000080.index", + mapUsingIndexInit70 + "/outputs/0000000070-0000000080.output", + }, + + mapOutputFileToCheck: mapUsingIndexInit70 + "/outputs/0000000070-0000000080.output", + expectedSkippedBlocks: map[uint64]struct{}{71: {}, 73: {}, 75: {}, 77: {}, 79: {}}, + }, + } + + for _, test := range cases { + t.Run(test.name, func(t *testing.T) { + testTempDir := t.TempDir() + + extendedTempDir := filepath.Join(testTempDir, "test.store", "tag") + err := createFiles(extendedTempDir, test.preCreatedFiles) + require.NoError(t, err) + + if test.preCreatedIndices != nil { + err = createIndexFile(ctx, extendedTempDir, test.preCreatedIndices.fileName, test.preCreatedIndices.indices) + require.NoError(t, err) + } + + pkg := manifest.TestReadManifest(t, test.manifestPath) + + ctx = reqctx.WithRequest(ctx, &reqctx.RequestDetails{Modules: pkg.Modules, OutputModule: test.moduleName}) + + ctx = reqctx.WithTier2RequestParameters(ctx, reqctx.Tier2RequestParameters{ + BlockType: "sf.substreams.v1.test.Block", + StateBundleSize: test.stateBundleSize, + StateStoreURL: filepath.Join(testTempDir, "test.store"), + StateStoreDefaultTag: "tag", + }) + + responseCollector := newResponseCollector() + + newBlockGenerator := func(startBlock uint64, inclusiveStopBlock uint64) TestBlockGenerator { + return &LinearBlockGenerator{ + startBlock: startBlock, + inclusiveStopBlock: inclusiveStopBlock, + } + } + + workRange := block.NewRange(test.startBlock, test.endBlock) + + request := work.NewRequest(ctx, reqctx.Details(ctx), test.stage, workRange) + + err = processInternalRequest(t, ctx, request, nil, newBlockGenerator, responseCollector, nil, testTempDir) + require.NoError(t, err) + + withZST := func(s []string) []string { + res := make([]string, len(s)) + for i, v := range s { + res[i] = fmt.Sprintf("%s.zst", v) + } + return res + } + + assertFiles(t, testTempDir, false, withZST(test.expectRemainingFiles)...) + + outputFileToCheck := test.mapOutputFileToCheck + if outputFileToCheck != "" { + err = checkBlockSkippedInOutputFile(ctx, extendedTempDir, outputFileToCheck, test.expectedSkippedBlocks) + } + require.NoError(t, err) + }) + } +} + +func createFiles(extendedTempDir string, files []string) error { + for _, file := range files { + _, err := createFile(extendedTempDir, file) + if err != nil { + return err + } + } + return nil +} + +func createFile(extendedTempDir string, file string) (*os.File, error) { + desiredPath := filepath.Join(extendedTempDir, file) + + err := os.MkdirAll(filepath.Dir(desiredPath), os.ModePerm) + if err != nil { + return nil, err + } + + createdFile, err := os.Create(desiredPath) + if err != nil { + return nil, err + } + + return createdFile, nil +} + +func checkBlockSkippedInOutputFile(ctx context.Context, extendedTempDir, checkedFile string, expectedSkippedBlock map[uint64]struct{}) error { + s, err := dstore.NewStore(extendedTempDir, "zst", "zstd", false) + if err != nil { + return fmt.Errorf("initializing dstore for %q: %w", extendedTempDir, err) + } + + fileReader, err := s.OpenObject(ctx, checkedFile) + if err != nil { + return fmt.Errorf("opening file %w", err) + } + + ctn, err := io.ReadAll(fileReader) + if err != nil { + return fmt.Errorf("reading store file %w", err) + } + + outputData := &pboutput.Map{} + + if err = outputData.UnmarshalFast(ctn); err != nil { + return fmt.Errorf("unmarshalling file %s: %w", checkedFile, err) + } + + for _, item := range outputData.Kv { + _, found := expectedSkippedBlock[item.BlockNum] + if found { + return fmt.Errorf("block %d should have been skipped", item.BlockNum) + } + } + + return nil +} + +func createIndexFile(ctx context.Context, extendedTempDir string, filename string, indices map[string]*roaring64.Bitmap) error { + data, err := index.ConvertIndexesMapToBytes(indices) + if err != nil { + return fmt.Errorf("converting indices into bytes") + } + + pbIndexesMap := pbindexes.Map{Indexes: data} + cnt, err := proto.Marshal(&pbIndexesMap) + if err != nil { + return fmt.Errorf("marshalling Indices: %w", err) + } + + store, err := dstore.NewStore(extendedTempDir, "zst", "zstd", false) + if err != nil { + return fmt.Errorf("initializing dstore for %q: %w", extendedTempDir, err) + } + + reader := bytes.NewReader(cnt) + err = store.WriteObject(ctx, filename, reader) + if err != nil { + return fmt.Errorf("writing file %s : %w", filename, err) + } + + return nil +} diff --git a/test/worker_test.go b/test/worker_test.go index 9acbbe1ae..2342ff7f4 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -50,8 +50,8 @@ func (w *TestWorker) Work(ctx context.Context, unit stage.Unit, workRange *block logger.Info("worker running test job", zap.Strings("stage_modules", moduleNames), zap.Int("stage", unit.Stage), - zap.Uint64("start_block_num", request.StartBlockNum), - zap.Uint64("stop_block_num", request.StopBlockNum), + zap.Uint64("segment size", request.SegmentSize), + zap.Uint64("segment number", request.SegmentNumber), ) return func() loop.Msg { @@ -60,8 +60,8 @@ func (w *TestWorker) Work(ctx context.Context, unit stage.Unit, workRange *block } logger.Info("worker done running job", zap.String("output_module", request.OutputModule), - zap.Uint64("start_block_num", request.StartBlockNum), - zap.Uint64("stop_block_num", request.StopBlockNum), + zap.Uint64("segment size", request.SegmentSize), + zap.Uint64("segment number", request.SegmentNumber), zap.Int("stage", unit.Stage), ) diff --git a/tools/analytics_store_stats.go b/tools/analytics_store_stats.go index 0d7993a19..b93072bb3 100644 --- a/tools/analytics_store_stats.go +++ b/tools/analytics_store_stats.go @@ -31,7 +31,7 @@ func init() { analyticsCmd.AddCommand(analyticsStoreStatsCmd) } -var EmptyStoreError = errors.New("store is empty") +var ErrEmptyStore = errors.New("store is empty") func StoreStatsE(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -63,7 +63,7 @@ func StoreStatsE(cmd *cobra.Command, args []string) error { go func() { start := time.Now() wg.Wait() - zlog.Debug("finished getting store stats", zap.Duration("duration", time.Now().Sub(start))) + zlog.Debug("finished getting store stats", zap.Duration("duration", time.Since(start))) close(statsStream) }() @@ -92,7 +92,7 @@ func StoreStatsE(cmd *cobra.Command, args []string) error { start := time.Now() defer func() { - zlog.Debug("finished getting store stats for module", zap.String("module", module.Name), zap.Duration("duration", time.Now().Sub(start))) + zlog.Debug("finished getting store stats for module", zap.String("module", module.Name), zap.Duration("duration", time.Since(start))) }() defer wg.Done() @@ -119,7 +119,7 @@ func StoreStatsE(cmd *cobra.Command, args []string) error { stateStore, fileInfos, err := getStore(ctx, conf, math.MaxUint64) if err != nil { - if errors.Is(err, EmptyStoreError) { + if errors.Is(err, ErrEmptyStore) { zlog.Debug("skipping empty store", zap.String("module", module.Name)) statsStream <- storeStats return @@ -158,7 +158,6 @@ func StoreStatsE(cmd *cobra.Command, args []string) error { } statsStream <- storeStats - return }(module) } @@ -242,11 +241,11 @@ func getStore(ctx context.Context, conf *store.Config, below uint64) (store.Stor if err != nil { return nil, nil, fmt.Errorf("listing snapshot files: %w", err) } - zlog.Debug("listing snapshot files", zap.Duration("duration", time.Now().Sub(start))) + zlog.Debug("listing snapshot files", zap.Duration("duration", time.Since(start))) if len(files) == 0 { zlog.Debug("store is empty", zap.String("module", conf.Name()), zap.String("hash", conf.ModuleHash())) - return nil, nil, EmptyStoreError + return nil, nil, ErrEmptyStore } kvFiles := make([]*store.FileInfo, 0, len(files)) @@ -259,14 +258,14 @@ func getStore(ctx context.Context, conf *store.Config, below uint64) (store.Stor if len(kvFiles) == 0 { zlog.Debug("store only contains partial files", zap.String("module", conf.Name()), zap.String("hash", conf.ModuleHash())) - return nil, nil, EmptyStoreError + return nil, nil, ErrEmptyStore } start = time.Now() sort.Slice(kvFiles, func(i, j int) bool { //reverse sort return kvFiles[i].Range.ExclusiveEndBlock >= kvFiles[j].Range.ExclusiveEndBlock }) - zlog.Debug("sorting snapshot files", zap.Duration("duration", time.Now().Sub(start))) + zlog.Debug("sorting snapshot files", zap.Duration("duration", time.Since(start))) var latestFiles []*store.FileInfo if len(kvFiles) > 5 { @@ -283,7 +282,7 @@ func getStore(ctx context.Context, conf *store.Config, below uint64) (store.Stor if err != nil { return nil, nil, fmt.Errorf("loading store: %w", err) } - zlog.Debug("loading store", zap.Duration("duration", time.Now().Sub(start))) + zlog.Debug("loading store", zap.Duration("duration", time.Since(start))) return s, latestFiles, nil } @@ -291,7 +290,7 @@ func getStore(ctx context.Context, conf *store.Config, below uint64) (store.Stor func calculateStoreStats(stateStore store.Store, stats *StoreStats) error { start := time.Now() defer func() { - zlog.Debug("calculating store stats", zap.Duration("duration", time.Now().Sub(start))) + zlog.Debug("calculating store stats", zap.Duration("duration", time.Since(start))) }() keyStats := &KeyStats{} diff --git a/tools/cmd.go b/tools/cmd.go index 79df1071a..a03886824 100644 --- a/tools/cmd.go +++ b/tools/cmd.go @@ -15,10 +15,10 @@ package tools import ( - "fmt" - "github.com/streamingfast/substreams/client" "os" - "time" + + "github.com/streamingfast/cli/sflags" + "github.com/streamingfast/substreams/client" "github.com/spf13/cobra" "github.com/streamingfast/cli" @@ -34,63 +34,8 @@ var ExamplePrefixed = func(prefix, in string) string { return string(cli.ExamplePrefixed(prefix, in)) } -func mustGetString(cmd *cobra.Command, flagName string) string { - val, err := cmd.Flags().GetString(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} - -func mustGetInt64(cmd *cobra.Command, flagName string) int64 { - val, err := cmd.Flags().GetInt64(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} -func mustGetUint64(cmd *cobra.Command, flagName string) uint64 { - val, err := cmd.Flags().GetUint64(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} -func mustGetBool(cmd *cobra.Command, flagName string) bool { - val, err := cmd.Flags().GetBool(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} -func mustGetDuration(cmd *cobra.Command, flagName string) time.Duration { - val, err := cmd.Flags().GetDuration(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} -func mustGetStringSlice(cmd *cobra.Command, flagName string) []string { - val, err := cmd.Flags().GetStringSlice(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - if len(val) == 0 { - return nil - } - return val -} - -func mustGetStringArray(cmd *cobra.Command, flagName string) []string { - val, err := cmd.Flags().GetStringArray(flagName) - if err != nil { - panic(fmt.Sprintf("flags: couldn't find flag %q", flagName)) - } - return val -} - func ReadAPIToken(cmd *cobra.Command, envFlagName string) string { - envVar := mustGetString(cmd, envFlagName) + envVar := sflags.MustGetString(cmd, envFlagName) value := os.Getenv(envVar) if value != "" { return value @@ -100,7 +45,7 @@ func ReadAPIToken(cmd *cobra.Command, envFlagName string) string { } func ReadAPIKey(cmd *cobra.Command, envFlagName string) string { - envVar := mustGetString(cmd, envFlagName) + envVar := sflags.MustGetString(cmd, envFlagName) value := os.Getenv(envVar) if value != "" { return value diff --git a/tools/decode.go b/tools/decode.go index 5c25379c7..1e5f7724a 100644 --- a/tools/decode.go +++ b/tools/decode.go @@ -11,6 +11,7 @@ import ( "github.com/jhump/protoreflect/dynamic" "github.com/spf13/cobra" "github.com/streamingfast/cli" + "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" "go.uber.org/zap" "google.golang.org/protobuf/types/descriptorpb" @@ -19,6 +20,7 @@ import ( "github.com/streamingfast/substreams/manifest" pbsubstreams "github.com/streamingfast/substreams/pb/sf/substreams/v1" "github.com/streamingfast/substreams/storage/execout" + "github.com/streamingfast/substreams/storage/index" "github.com/streamingfast/substreams/storage/store" ) @@ -36,9 +38,9 @@ var decodeOutputsModuleCmd = &cobra.Command{ file in place of ', or a link to a remote .spkg file, using urls gs://, http(s)://, ipfs://, etc.'. `), Example: string(cli.ExamplePrefixed("substreams tools decode outputs", ` - map_pools_created gs://[bucket-url-path] 12487090 pool:c772a65917d5da983b7fc3c9cfbfb53ef01aef7e - uniswap-v3.spkg store_pools gs://[bucket-url-path] 12487090 pool:c772a65917d5da983b7fc3c9cfbfb53ef01aef7e - dir-with-manifest store_pools gs://[bucket-url-path] 12487090 token:051cf5178f60e9def5d5a39b2a988a9f914107cb:dprice:eth + map_pools_created gs://[bucket-url-path] 12487090 + uniswap-v3.spkg store_pools gs://[bucket-url-path] 12487090 + dir-with-manifest store_pools gs://[bucket-url-path] 12487090 `)), RunE: runDecodeOutputsModuleRunE, Args: cobra.RangeArgs(3, 4), @@ -64,19 +66,30 @@ var decodeStatesModuleCmd = &cobra.Command{ SilenceUsage: true, } +var decodeIndexModuleCmd = &cobra.Command{ + Use: "index [] ", + Short: "Decode index file and print the key/values", + Example: string(cli.ExamplePrefixed("substreams tools decode index", ` + map_pools_created gs://[bucket-url-path] 12487090 pool:c772a65917d5da983b7fc3c9cfbfb53ef01aef7e + `)), + RunE: runDecodeIndexModuleRunE, + Args: cobra.RangeArgs(3, 4), + SilenceUsage: true, +} + func init() { - decodeOutputsModuleCmd.Flags().Uint64("save-interval", 1000, "Output save interval") - decodeStatesModuleCmd.Flags().Uint64("save-interval", 1000, "states save interval") + decodeCmd.PersistentFlags().Uint64("save-interval", 1000, "Save interval (segment size)") decodeCmd.AddCommand(decodeOutputsModuleCmd) decodeCmd.AddCommand(decodeStatesModuleCmd) + decodeCmd.AddCommand(decodeIndexModuleCmd) Cmd.AddCommand(decodeCmd) } func runDecodeStatesModuleRunE(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - saveInterval := mustGetUint64(cmd, "save-interval") + saveInterval := sflags.MustGetUint64(cmd, "save-interval") manifestPath := "" if len(args) == 5 { @@ -138,7 +151,7 @@ func runDecodeStatesModuleRunE(cmd *cobra.Command, args []string) error { moduleHash := hex.EncodeToString(hash) zlog.Info("found module hash", zap.String("hash", moduleHash), zap.String("module", matchingModule.Name)) - startBlock := execout.ComputeStartBlock(blockNumber, saveInterval) + startBlock := blockNumber - blockNumber%saveInterval switch matchingModule.Kind.(type) { case *pbsubstreams.Module_KindMap_: @@ -149,9 +162,90 @@ func runDecodeStatesModuleRunE(cmd *cobra.Command, args []string) error { return fmt.Errorf("module has an unknown") } +func runDecodeIndexModuleRunE(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + saveInterval := sflags.MustGetUint64(cmd, "save-interval") + + manifestPath := "" + if len(args) == 4 { + manifestPath = args[0] + args = args[1:] + } + + moduleName := args[0] + storeURL := args[1] + blockNumber, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + return fmt.Errorf("converting blockNumber to uint: %w", err) + } + + zlog.Info("decoding module", + zap.String("manifest_path", manifestPath), + zap.String("module_name", moduleName), + zap.String("store_url", storeURL), + zap.Uint64("block_number", blockNumber), + zap.Uint64("save_internal", saveInterval), + ) + + objStore, err := dstore.NewStore(storeURL, "zst", "zstd", false) + if err != nil { + return fmt.Errorf("initializing dstore for %q: %w", storeURL, err) + } + + manifestReader, err := manifest.NewReader(manifestPath, manifest.SkipPackageValidationReader()) + if err != nil { + return fmt.Errorf("manifest reader: %w", err) + } + + pkg, graph, err := manifestReader.Read() + if err != nil { + return fmt.Errorf("read manifest %q: %w", manifestPath, err) + } + + hashes := manifest.NewModuleHashes() + + var matchingModule *pbsubstreams.Module + for _, module := range pkg.Modules.Modules { + if module.Name == moduleName { + matchingModule = module + } + } + if matchingModule == nil { + return fmt.Errorf("module %q not found", moduleName) + } + + hash, err := hashes.HashModule(pkg.Modules, matchingModule, graph) + if err != nil { + panic(err) + } + moduleHash := hex.EncodeToString(hash) + zlog.Info("found module hash", zap.String("hash", moduleHash), zap.String("module", matchingModule.Name)) + + switch matchingModule.Kind.(type) { + case *pbsubstreams.Module_KindBlockIndex_: + default: + return fmt.Errorf("not a block index module") + } + + endBlock := blockNumber - (blockNumber % saveInterval) + saveInterval + + indexFile, err := index.NewFile(objStore, moduleHash, matchingModule.Name, zlog, block.NewRange(blockNumber, endBlock)) + if err != nil { + return fmt.Errorf("instantiating index file: %w", err) + } + if err := indexFile.Load(ctx); err != nil { + return fmt.Errorf("loading index file: %w", err) + } + + indexFile.Print() + fmt.Printf("done") + + return nil +} + func runDecodeOutputsModuleRunE(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - saveInterval := mustGetUint64(cmd, "save-interval") + saveInterval := sflags.MustGetUint64(cmd, "save-interval") manifestPath := "" if len(args) == 4 { @@ -209,7 +303,7 @@ func runDecodeOutputsModuleRunE(cmd *cobra.Command, args []string) error { moduleHash := hex.EncodeToString(hash) zlog.Info("found module hash", zap.String("hash", moduleHash), zap.String("module", matchingModule.Name)) - startBlock := execout.ComputeStartBlock(requestedBlocks.StartBlock, saveInterval) + startBlock := requestedBlocks.StartBlock - requestedBlocks.StartBlock%saveInterval if startBlock < matchingModule.InitialBlock { startBlock = matchingModule.InitialBlock } @@ -252,9 +346,7 @@ func searchOutputsModule( return fmt.Errorf("can't find cache at block %d storeURL %q", startBlock, moduleStore.BaseURL().String()) } - if err != nil { - return fmt.Errorf("loading cache %s file %s : %w", moduleStore.BaseURL(), outputCache.String(), err) - } + return fmt.Errorf("loading cache %s file %s : %w", moduleStore.BaseURL(), outputCache.String(), err) } for i := requestedBlocks.StartBlock; i < requestedBlocks.ExclusiveEndBlock; i++ { diff --git a/tools/logging.go b/tools/logging.go index 13ddea9cd..274b0e7c3 100644 --- a/tools/logging.go +++ b/tools/logging.go @@ -5,4 +5,4 @@ import ( "go.uber.org/zap" ) -var zlog, tracer = logging.PackageLogger("tools", "github.com/streamingfast/substreams/tools", logging.LoggerDefaultLevel(zap.InfoLevel)) +var zlog, _ = logging.PackageLogger("tools", "github.com/streamingfast/substreams/tools", logging.LoggerDefaultLevel(zap.InfoLevel)) diff --git a/tools/prometheus-exporter.go b/tools/prometheus-exporter.go index 31ab12ebd..6d72da29b 100644 --- a/tools/prometheus-exporter.go +++ b/tools/prometheus-exporter.go @@ -3,7 +3,6 @@ package tools import ( "context" "fmt" - "google.golang.org/grpc/metadata" "io" "net/http" "strconv" @@ -11,7 +10,10 @@ import ( "sync" "time" + "google.golang.org/grpc/metadata" + "github.com/streamingfast/cli" + "github.com/streamingfast/cli/sflags" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -65,7 +67,10 @@ func runPrometheus(cmd *cobra.Command, args []string) error { blockHeight := args[2] blockNum, err := strconv.ParseInt(blockHeight, 10, 64) - addr := mustGetString(cmd, "listen-addr") + if err != nil { + return err + } + addr := sflags.MustGetString(cmd, "listen-addr") manifestReader, err := manifest.NewReader(manifestPath) if err != nil { @@ -80,10 +85,10 @@ func runPrometheus(cmd *cobra.Command, args []string) error { outputStreamName := moduleName authToken, authType := GetAuth(cmd, "substreams-api-key-envvar", "substreams-api-token-envvar") - insecure := mustGetBool(cmd, "insecure") - plaintext := mustGetBool(cmd, "plaintext") - interval := mustGetDuration(cmd, "lookup_interval") - timeout := mustGetDuration(cmd, "lookup_timeout") + insecure := sflags.MustGetBool(cmd, "insecure") + plaintext := sflags.MustGetBool(cmd, "plaintext") + interval := sflags.MustGetDuration(cmd, "lookup_interval") + timeout := sflags.MustGetDuration(cmd, "lookup_timeout") for _, endpoint := range endpoints { substreamsClientConfig := client.NewSubstreamsClientConfig( endpoint, diff --git a/tools/tier2call.go b/tools/tier2call.go index 977000bb7..e4583e5d9 100644 --- a/tools/tier2call.go +++ b/tools/tier2call.go @@ -10,15 +10,16 @@ import ( "github.com/spf13/cobra" "google.golang.org/grpc/metadata" + "github.com/streamingfast/cli/sflags" "github.com/streamingfast/substreams/client" "github.com/streamingfast/substreams/manifest" pbssinternal "github.com/streamingfast/substreams/pb/sf/substreams/intern/v2" ) var tier2CallCmd = &cobra.Command{ - Use: "tier2call ", + Use: "tier2call ", Short: "Calls a tier2 service, for internal inspection", - Args: cobra.ExactArgs(5), + Args: cobra.ExactArgs(4), RunE: tier2CallE, } @@ -46,9 +47,7 @@ func tier2CallE(cmd *cobra.Command, args []string) error { manifestPath := args[0] outputModule := args[1] stage, _ := strconv.ParseUint(args[2], 10, 32) - startBlock, _ := strconv.ParseInt(args[3], 10, 64) - stopBlock, _ := strconv.ParseInt(args[4], 10, 64) - + segmentNumber, _ := strconv.ParseUint(args[3], 10, 32) manifestReader, err := manifest.NewReader(manifestPath) if err != nil { return fmt.Errorf("manifest reader: %w", err) @@ -59,7 +58,7 @@ func tier2CallE(cmd *cobra.Command, args []string) error { return fmt.Errorf("read manifest %q: %w", manifestPath, err) } - params, err := manifest.ParseParams(mustGetStringArray(cmd, "params")) + params, err := manifest.ParseParams(sflags.MustGetStringArray(cmd, "params")) if err != nil { return fmt.Errorf("parsing params: %w", err) } @@ -70,11 +69,11 @@ func tier2CallE(cmd *cobra.Command, args []string) error { authToken, authType := GetAuth(cmd, "substreams-api-key-envvar", "substreams-api-token-envvar") clientConfig := client.NewSubstreamsClientConfig( - mustGetString(cmd, "substreams-endpoint"), + sflags.MustGetString(cmd, "substreams-endpoint"), authToken, authType, - mustGetBool(cmd, "insecure"), - mustGetBool(cmd, "plaintext"), + sflags.MustGetBool(cmd, "insecure"), + sflags.MustGetBool(cmd, "plaintext"), ) ssClient, _, callOpts, headers, err := client.NewSubstreamsInternalClient(clientConfig) if err != nil { @@ -86,8 +85,8 @@ func tier2CallE(cmd *cobra.Command, args []string) error { ctx = metadata.AppendToOutgoingContext(ctx, headers.ToArray()...) } //parse additional-headers flag - additionalHeaders := mustGetStringSlice(cmd, "header") - if additionalHeaders != nil { + additionalHeaders := sflags.MustGetStringSlice(cmd, "header") + if len(additionalHeaders) != 0 { res := parseHeaders(additionalHeaders) headerArray := make([]string, 0, len(res)*2) for k, v := range res { @@ -97,23 +96,22 @@ func tier2CallE(cmd *cobra.Command, args []string) error { ctx = metadata.AppendToOutgoingContext(ctx, headerArray...) } - meteringConfig := mustGetString(cmd, "metering-plugin") - blockType := mustGetString(cmd, "block-type") - stateStore := mustGetString(cmd, "state-store-url") - stateStoreDefaultTag := mustGetString(cmd, "state-store-default-tag") - mergedBlocksStore := mustGetString(cmd, "merged-blocks-store-url") - stateBundleSize := mustGetUint64(cmd, "state-bundle-size") + meteringConfig := sflags.MustGetString(cmd, "metering-plugin") + blockType := sflags.MustGetString(cmd, "block-type") + stateStore := sflags.MustGetString(cmd, "state-store-url") + stateStoreDefaultTag := sflags.MustGetString(cmd, "state-store-default-tag") + mergedBlocksStore := sflags.MustGetString(cmd, "merged-blocks-store-url") + stateBundleSize := sflags.MustGetUint64(cmd, "state-bundle-size") req, err := ssClient.ProcessRange(ctx, &pbssinternal.ProcessRangeRequest{ - StartBlockNum: uint64(startBlock), - StopBlockNum: uint64(stopBlock), + SegmentSize: stateBundleSize, + SegmentNumber: segmentNumber, OutputModule: outputModule, Modules: pkg.Modules, Stage: uint32(stage), MeteringConfig: meteringConfig, BlockType: blockType, MergedBlocksStore: mergedBlocksStore, - StateBundleSize: stateBundleSize, StateStore: stateStore, StateStoreDefaultTag: stateStoreDefaultTag, }, callOpts...) diff --git a/tui/print.go b/tui/print.go index a0c30cd01..f92332726 100644 --- a/tui/print.go +++ b/tui/print.go @@ -23,7 +23,6 @@ func (ui *TUI) decoratedBlockScopedData( clock *pbsubstreams.Clock, ) error { var s []string - for _, out := range append([]*pbsubstreamsrpc.MapModuleOutput{output}, debugMapOutputs...) { if _, ok := ui.msgTypes[out.Name]; !ok { continue diff --git a/tui/tui.go b/tui/tui.go index 020a59396..6d8027ae2 100644 --- a/tui/tui.go +++ b/tui/tui.go @@ -19,7 +19,7 @@ import ( //go:generate go-enum -f=$GOFILE --nocase --marshal --names -// ENUM(TUI, JSON, JSONL) +// ENUM(TUI, JSON, JSONL, CLOCK) type OutputMode uint type TUI struct { @@ -80,7 +80,10 @@ func (ui *TUI) Init(outputMode string) error { msgType = modKind.KindStore.ValueType case *pbsubstreams.Module_KindMap_: msgType = modKind.KindMap.OutputType + case *pbsubstreams.Module_KindBlockIndex_: + msgType = modKind.KindBlockIndex.OutputType } + msgType = strings.TrimPrefix(msgType, "proto:") ui.msgTypes[mod.Name] = msgType @@ -120,6 +123,8 @@ func (ui *TUI) configureOutputMode(outputMode string) error { case OutputModeTUI: ui.prettyPrintOutput = true case OutputModeJSONL: + case OutputModeCLOCK: + fmt.Println("Writing clock information only (no data)") case OutputModeJSON: ui.prettyPrintOutput = true default: @@ -144,26 +149,34 @@ func (ui *TUI) Cancel() { func (ui *TUI) IncomingMessage(ctx context.Context, resp *pbsubstreamsrpc.Response, testRunner *test.Runner) error { switch m := resp.Message.(type) { case *pbsubstreamsrpc.Response_BlockUndoSignal: - if ui.outputMode == OutputModeTUI { + switch ui.outputMode { + case OutputModeTUI: printUndo(m.BlockUndoSignal.LastValidBlock, m.BlockUndoSignal.LastValidCursor) ui.ensureTerminalUnlocked() - } else { + case OutputModeJSON, OutputModeJSONL: printUndoJSON(m.BlockUndoSignal.LastValidBlock, m.BlockUndoSignal.LastValidCursor) + case OutputModeCLOCK: + fmt.Println("UNDO:", m.BlockUndoSignal.LastValidBlock) } case *pbsubstreamsrpc.Response_BlockScopedData: if testRunner != nil { if err := testRunner.Test(ctx, m.BlockScopedData.Output, m.BlockScopedData.DebugMapOutputs, m.BlockScopedData.DebugStoreOutputs, m.BlockScopedData.Clock); err != nil { - fmt.Errorf("test runner failed: %w", err) + return fmt.Errorf("test runner failed: %w", err) } } - if ui.outputMode == OutputModeTUI { - printClock(m.BlockScopedData) - } if m.BlockScopedData == nil { return nil } + switch ui.outputMode { + case OutputModeTUI: + printClock(m.BlockScopedData) + case OutputModeCLOCK: + printClock(m.BlockScopedData) + return nil + } + ui.seenFirstData = true if ui.outputMode == OutputModeTUI { ui.ensureTerminalUnlocked() diff --git a/tui/tui_enum.go b/tui/tui_enum.go index 043377e0e..6674ec6f6 100644 --- a/tui/tui_enum.go +++ b/tui/tui_enum.go @@ -18,16 +18,19 @@ const ( OutputModeJSON // OutputModeJSONL is a OutputMode of type JSONL. OutputModeJSONL + // OutputModeCLOCK is a OutputMode of type CLOCK. + OutputModeCLOCK ) var ErrInvalidOutputMode = fmt.Errorf("not a valid OutputMode, try [%s]", strings.Join(_OutputModeNames, ", ")) -const _OutputModeName = "TUIJSONJSONL" +const _OutputModeName = "TUIJSONJSONLCLOCK" var _OutputModeNames = []string{ _OutputModeName[0:3], _OutputModeName[3:7], _OutputModeName[7:12], + _OutputModeName[12:17], } // OutputModeNames returns a list of possible string values of OutputMode. @@ -41,6 +44,7 @@ var _OutputModeMap = map[OutputMode]string{ OutputModeTUI: _OutputModeName[0:3], OutputModeJSON: _OutputModeName[3:7], OutputModeJSONL: _OutputModeName[7:12], + OutputModeCLOCK: _OutputModeName[12:17], } // String implements the Stringer interface. @@ -59,12 +63,14 @@ func (x OutputMode) IsValid() bool { } var _OutputModeValue = map[string]OutputMode{ - _OutputModeName[0:3]: OutputModeTUI, - strings.ToLower(_OutputModeName[0:3]): OutputModeTUI, - _OutputModeName[3:7]: OutputModeJSON, - strings.ToLower(_OutputModeName[3:7]): OutputModeJSON, - _OutputModeName[7:12]: OutputModeJSONL, - strings.ToLower(_OutputModeName[7:12]): OutputModeJSONL, + _OutputModeName[0:3]: OutputModeTUI, + strings.ToLower(_OutputModeName[0:3]): OutputModeTUI, + _OutputModeName[3:7]: OutputModeJSON, + strings.ToLower(_OutputModeName[3:7]): OutputModeJSON, + _OutputModeName[7:12]: OutputModeJSONL, + strings.ToLower(_OutputModeName[7:12]): OutputModeJSONL, + _OutputModeName[12:17]: OutputModeCLOCK, + strings.ToLower(_OutputModeName[12:17]): OutputModeCLOCK, } // ParseOutputMode attempts to convert a string to a OutputMode. diff --git a/tui/utils.go b/tui/utils.go index 20e0ed087..06ffa5ac0 100644 --- a/tui/utils.go +++ b/tui/utils.go @@ -34,7 +34,7 @@ func (r ranges) Covered(lo, hi uint64) bool { func (r ranges) String() string { var out []string for _, m := range r { - out = append(out, fmt.Sprintf("%s", m.String())) + out = append(out, m.String()) } return strings.Join(out, ", ") } @@ -91,8 +91,6 @@ func (u updatedRanges) LoHi() (lo uint64, hi uint64) { func (u updatedRanges) Lo() uint64 { a, _ := u.LoHi(); return a } func (u updatedRanges) Hi() uint64 { _, b := u.LoHi(); return b } -type newRange map[string]blockRange - func mergeRangeLists(prevRanges ranges, newRange *blockRange) ranges { // fmt.Println("merge input, prevRanges:", prevRanges, "new range:", newRange) var stretched bool diff --git a/tui2/components/blocksearch/blocksearch.go b/tui2/components/blocksearch/blocksearch.go index fa1a2821b..da76fac78 100644 --- a/tui2/components/blocksearch/blocksearch.go +++ b/tui2/components/blocksearch/blocksearch.go @@ -1,11 +1,12 @@ package blocksearch import ( - "github.com/streamingfast/substreams/tui2/components/blockselect" - "github.com/streamingfast/substreams/tui2/components/search" "strconv" "strings" + "github.com/streamingfast/substreams/tui2/components/blockselect" + "github.com/streamingfast/substreams/tui2/components/search" + "github.com/charmbracelet/bubbles/textinput" tea "github.com/charmbracelet/bubbletea" @@ -117,12 +118,6 @@ func (s *BlockSearch) SetMatchCount(count int) { s.timesFound = count } -func (s *BlockSearch) applyBlockSearchQuery(query string) tea.Cmd { - return func() tea.Msg { - return ApplyBlockSearchQueryMsg(query) - } -} - func (s *BlockSearch) CheckValidQuery() (uint64, error) { strippedQuery := strings.ReplaceAll(s.Current, ",", "") strippedQuery = strings.ReplaceAll(strippedQuery, "#", "") diff --git a/tui2/components/ranges/bar.go b/tui2/components/ranges/bar.go index 539362122..d58816cbb 100644 --- a/tui2/components/ranges/bar.go +++ b/tui2/components/ranges/bar.go @@ -17,7 +17,6 @@ type Bar struct { name string modules []string targetEndBlock uint64 - totalBlocks uint64 ranges ranges } diff --git a/tui2/components/ranges/utils.go b/tui2/components/ranges/utils.go index 056fa1b9b..b7ec718c5 100644 --- a/tui2/components/ranges/utils.go +++ b/tui2/components/ranges/utils.go @@ -34,7 +34,7 @@ func (r ranges) Covered(lo, hi uint64) bool { func (r ranges) String() string { var out []string for _, m := range r { - out = append(out, fmt.Sprintf("%s", m.String())) + out = append(out, m.String()) } return strings.Join(out, ", ") } @@ -67,32 +67,6 @@ func (b BlockRange) String() string { return fmt.Sprintf("%d-%d", b.Start, b.End) } -type updatedRanges map[string]ranges - -// LoHi returns the lowest and highest of all modules. The global span, -// used to determine the width and the divider of each printable cell. -func (u updatedRanges) LoHi() (lo uint64, hi uint64) { - var loset bool - for _, v := range u { - tlo, thi := v.LoHi() - if thi > hi { - hi = thi - } - if !loset { - lo = tlo - loset = true - } else if tlo < lo { - lo = tlo - } - } - return -} - -func (u updatedRanges) Lo() uint64 { a, _ := u.LoHi(); return a } -func (u updatedRanges) Hi() uint64 { _, b := u.LoHi(); return b } - -type newRange map[string]BlockRange - func mergeRangeLists(prevRanges ranges, newRange *BlockRange) ranges { // fmt.Println("merge input, prevRanges:", prevRanges, "new range:", newRange) var stretched bool diff --git a/tui2/pages/output/output.go b/tui2/pages/output/output.go index 7e2c354b5..40f1435c6 100644 --- a/tui2/pages/output/output.go +++ b/tui2/pages/output/output.go @@ -505,15 +505,6 @@ func (o *Output) jumpToNextMatchingBlock() tea.Cmd { } } -func (o *Output) getActiveModuleIndex() int { - for i, mod := range o.moduleSelector.Modules { - if mod == o.active.Module { - return i - } - } - return 0 -} - func (o *Output) hasDataForBlock(num uint64) bool { for _, b := range o.blockSelector.BlocksWithData { if b == num { diff --git a/tui2/pages/output/render.go b/tui2/pages/output/render.go index 18aa67b5b..ea18c0c59 100644 --- a/tui2/pages/output/render.go +++ b/tui2/pages/output/render.go @@ -49,10 +49,9 @@ func (o *Output) wrapLogs(log string) string { } type renderedOutput struct { - plainErrorReceived string - plainLogs string - plainJSON string - plainOutput string + plainLogs string + plainJSON string + plainOutput string error error @@ -61,10 +60,6 @@ type renderedOutput struct { styledJSON string } -func (r *renderedOutput) highlighted() string { - return "" -} - func (o *Output) renderedOutput(in *pbsubstreamsrpc.AnyModuleOutput, withStyle bool) (out *renderedOutput) { out = &renderedOutput{styledError: &strings.Builder{}, styledLogs: &strings.Builder{}} if in == nil { diff --git a/tui2/pages/progress/progress.go b/tui2/pages/progress/progress.go index cd40fa055..83919f5ba 100644 --- a/tui2/pages/progress/progress.go +++ b/tui2/pages/progress/progress.go @@ -19,8 +19,6 @@ import ( "github.com/streamingfast/substreams/tui2/stream" ) -type refreshProgress tea.Msg - type Progress struct { common.Common @@ -30,7 +28,6 @@ type Progress struct { targetBlock uint64 progressView viewport.Model - progressUpdates int dataPayloads int slowestJobs []string slowestModules []string @@ -70,31 +67,27 @@ func (p *Progress) Init() tea.Cmd { } func (p *Progress) Update(msg tea.Msg) (tea.Model, tea.Cmd) { - var cmds []tea.Cmd + var outCmd tea.Cmd - switch msg.(type) { + switch msg := msg.(type) { case tea.KeyMsg: - switch msg.(tea.KeyMsg).String() { + switch msg.String() { case "m": p.bars.Mode = (p.bars.Mode + 1) % 3 p.progressView.SetContent(p.bars.View()) } - var cmd tea.Cmd - p.progressView, cmd = p.progressView.Update(msg) - cmds = append(cmds, cmd) + p.progressView, outCmd = p.progressView.Update(msg) case *pbsubstreamsrpc.SessionInit: - sessionInit := msg.(*pbsubstreamsrpc.SessionInit) - linearHandoff := sessionInit.LinearHandoffBlock - p.targetBlock = sessionInit.ResolvedStartBlock + linearHandoff := msg.LinearHandoffBlock + p.targetBlock = msg.ResolvedStartBlock p.dataPayloads = 0 - p.maxParallelWorkers = sessionInit.MaxParallelWorkers + p.maxParallelWorkers = msg.MaxParallelWorkers p.bars = ranges.NewBars(p.Common, linearHandoff) p.bars.Init() case *pbsubstreamsrpc.BlockScopedData: p.dataPayloads += 1 case *pbsubstreamsrpc.ModulesProgress: - msg := msg.(*pbsubstreamsrpc.ModulesProgress) newBars := make([]*ranges.Bar, len(msg.Stages)) var totalProcessedBlocks uint64 @@ -229,7 +222,7 @@ func (p *Progress) Update(msg tea.Msg) (tea.Model, tea.Cmd) { p.progressView.SetContent(p.bars.View()) case stream.StreamErrorMsg: p.state = "Error" - p.curErr = msg.(stream.StreamErrorMsg).Error() + p.curErr = msg.Error() p.SetSize(p.Common.Width, p.Common.Height) return p, nil @@ -247,7 +240,7 @@ func (p *Progress) Update(msg tea.Msg) (tea.Model, tea.Cmd) { p.state = "Replayed from log" } - return p, nil + return p, outCmd } var labels = []string{ diff --git a/tui2/stream/stream.go b/tui2/stream/stream.go index dee58fc20..77609e2f7 100644 --- a/tui2/stream/stream.go +++ b/tui2/stream/stream.go @@ -58,8 +58,8 @@ type Status int const ( StatusRunning Status = 0 - StatusError = 1 - StatusStopped = 2 + StatusError Status = 1 + StatusStopped Status = 2 ) func (s *Stream) StreamStatus() Status { diff --git a/tui2/ui.go b/tui2/ui.go index a57e03321..cf054354f 100644 --- a/tui2/ui.go +++ b/tui2/ui.go @@ -16,7 +16,6 @@ import ( "github.com/streamingfast/substreams/tui2/pages/progress" "github.com/streamingfast/substreams/tui2/pages/request" "github.com/streamingfast/substreams/tui2/replaylog" - "github.com/streamingfast/substreams/tui2/stream" streamui "github.com/streamingfast/substreams/tui2/stream" "github.com/streamingfast/substreams/tui2/styles" "github.com/streamingfast/substreams/tui2/tabs" @@ -31,7 +30,6 @@ const ( ) type UI struct { - memoized string lastView time.Time msgDescs map[string]*manifest.ModuleDescriptor @@ -44,8 +42,6 @@ type UI struct { pages []common.Component activePage page footer *footer.Footer - showFooter bool - error error tabs *tabs.Tabs } @@ -208,11 +204,11 @@ func (ui *UI) View() string { if ui.stream != nil { var color lipgloss.TerminalColor switch ui.stream.StreamStatus() { - case stream.StatusRunning: + case streamui.StatusRunning: color = ui.Styles.StreamRunningColor - case stream.StatusStopped: + case streamui.StatusStopped: color = ui.Styles.StreamStoppedColor - case stream.StatusError: + case streamui.StatusError: color = ui.Styles.StreamErrorColor } headline = ui.Styles.Header.Copy().Foreground(color).Render("Substreams GUI") diff --git a/wasm/arguments.go b/wasm/arguments.go index fdda8f6b0..213d32c48 100644 --- a/wasm/arguments.go +++ b/wasm/arguments.go @@ -20,6 +20,7 @@ type ValueArgument interface { Argument Value() []byte SetValue([]byte) + Active(blk uint64) bool } type ProtoScopeValueArgument interface { @@ -29,13 +30,18 @@ type ProtoScopeValueArgument interface { // implementations type BaseArgument struct { - name string + name string + initialBlock uint64 } func (b *BaseArgument) Name() string { return b.name } +func (b *BaseArgument) Active(blk uint64) bool { + return blk >= b.initialBlock +} + type BaseValueArgument struct { value []byte } @@ -48,10 +54,11 @@ type SourceInput struct { BaseValueArgument } -func NewSourceInput(name string) *SourceInput { +func NewSourceInput(name string, initialBlock uint64) *SourceInput { return &SourceInput{ BaseArgument: BaseArgument{ - name: name, + name: name, + initialBlock: initialBlock, }, } } @@ -65,10 +72,11 @@ type MapInput struct { BaseValueArgument } -func NewMapInput(name string) *MapInput { +func NewMapInput(name string, initialBlock uint64) *MapInput { return &MapInput{ BaseArgument: BaseArgument{ - name: name, + name: name, + initialBlock: initialBlock, }, } } @@ -82,10 +90,11 @@ type StoreDeltaInput struct { BaseValueArgument } -func NewStoreDeltaInput(name string) *StoreDeltaInput { +func NewStoreDeltaInput(name string, initialBlock uint64) *StoreDeltaInput { return &StoreDeltaInput{ BaseArgument: BaseArgument{ - name: name, + name: name, + initialBlock: initialBlock, }, } } @@ -99,10 +108,11 @@ type StoreReaderInput struct { Store store.Store } -func NewStoreReaderInput(name string, store store.Store) *StoreReaderInput { +func NewStoreReaderInput(name string, store store.Store, initialBlock uint64) *StoreReaderInput { return &StoreReaderInput{ BaseArgument: BaseArgument{ - name: name, + name: name, + initialBlock: initialBlock, }, Store: store, } @@ -118,7 +128,8 @@ type StoreWriterOutput struct { func NewStoreWriterOutput(name string, store store.Store, updatePolicy pbsubstreams.Module_KindStore_UpdatePolicy, valueType string) *StoreWriterOutput { return &StoreWriterOutput{ BaseArgument: BaseArgument{ - name: name, + name: name, + initialBlock: 0, }, Store: store, UpdatePolicy: updatePolicy, @@ -134,7 +145,8 @@ type ParamsInput struct { func NewParamsInput(value string) *ParamsInput { return &ParamsInput{ BaseArgument: BaseArgument{ - name: "params", + name: "params", + initialBlock: 0, }, BaseValueArgument: BaseValueArgument{ value: []byte(value), diff --git a/wasm/bench/bench_test.go b/wasm/bench/bench_test.go index 676207a00..f7176ba1c 100644 --- a/wasm/bench/bench_test.go +++ b/wasm/bench/bench_test.go @@ -122,7 +122,7 @@ func blockInputFile(t require.TestingT, filename string) wasm.Argument { content, err := os.ReadFile(filename) require.NoError(t, err) - input := wasm.NewSourceInput("sf.ethereum.type.v2.Block") + input := wasm.NewSourceInput("sf.ethereum.type.v2.Block", 0) input.SetValue(content) return input diff --git a/wasm/bench/cmd/wasigo/main.go b/wasm/bench/cmd/wasigo/main.go index 4ddb61831..4df3ecee6 100644 --- a/wasm/bench/cmd/wasigo/main.go +++ b/wasm/bench/cmd/wasigo/main.go @@ -41,8 +41,8 @@ func main() { args := args( wasm.NewParamsInput("{key.1: 'value.1'}"), blockInputFile("/Users/colindickson/code/dfuse/substreams/wasm/bench/cmd/wasigo/testdata/block.binpb"), - wasm.NewStoreReaderInput("store.reader.1", createStore(ctx, "store.reader.1")), - wasm.NewStoreReaderInput("store.reader.2", createStore(ctx, "store.reader.2")), + wasm.NewStoreReaderInput("store.reader.1", createStore(ctx, "store.reader.1"), 0), + wasm.NewStoreReaderInput("store.reader.2", createStore(ctx, "store.reader.2"), 0), wasm.NewStoreWriterOutput("out", createStore(ctx, "out"), 1, "string"), ) @@ -105,7 +105,7 @@ func blockInputFile(filename string) wasm.Argument { panic(fmt.Errorf("reading input file: %w", err)) } - input := wasm.NewSourceInput("sf.ethereum.type.v2.Block") + input := wasm.NewSourceInput("sf.ethereum.type.v2.Block", 0) input.SetValue(content) return input diff --git a/wasm/wasmtime/logging.go b/wasm/wasmtime/logging.go deleted file mode 100644 index ce80a69da..000000000 --- a/wasm/wasmtime/logging.go +++ /dev/null @@ -1,7 +0,0 @@ -package wasmtime - -import ( - "github.com/streamingfast/logging" -) - -var zlog, tracer = logging.PackageLogger("wasmtime-runtime", "github.com/streamingfast/substreams/wasm/wasmtime") diff --git a/wasm/wazero/instance.go b/wasm/wazero/instance.go index f615056aa..5a3356d9b 100644 --- a/wasm/wazero/instance.go +++ b/wasm/wazero/instance.go @@ -33,9 +33,13 @@ func (i *Instance) Close(ctx context.Context) error { return i.Module.Close(ctx) } +type instanceKeyType struct{} + +var instanceKey = instanceKeyType{} + func instanceFromContext(ctx context.Context) *Instance { - return ctx.Value("instance").(*Instance) + return ctx.Value(instanceKey).(*Instance) } func WithInstanceContext(ctx context.Context, inst *Instance) context.Context { - return context.WithValue(ctx, "instance", inst) + return context.WithValue(ctx, instanceKey, inst) } diff --git a/wasm/wazero/logger_hostmod.go b/wasm/wazero/logger_hostmod.go index e8c764774..43f10d260 100644 --- a/wasm/wazero/logger_hostmod.go +++ b/wasm/wazero/logger_hostmod.go @@ -34,7 +34,6 @@ var LoggerFuncs = []funcs{ zlog.Debug(message, zap.String("module_name", call.ModuleName), zap.String("log_source", "wasm")) } call.AppendLog(message) - return }), }, }